diff --git a/fonts/KaTeX_AMS-Regular.woff2 b/fonts/KaTeX_AMS-Regular.woff2 deleted file mode 100644 index 0acaaff03..000000000 Binary files a/fonts/KaTeX_AMS-Regular.woff2 and /dev/null differ diff --git a/fonts/KaTeX_Caligraphic-Bold.woff2 b/fonts/KaTeX_Caligraphic-Bold.woff2 deleted file mode 100644 index f390922ec..000000000 Binary files a/fonts/KaTeX_Caligraphic-Bold.woff2 and /dev/null differ diff --git a/fonts/KaTeX_Caligraphic-Regular.woff2 b/fonts/KaTeX_Caligraphic-Regular.woff2 deleted file mode 100644 index 75344a1f9..000000000 Binary files a/fonts/KaTeX_Caligraphic-Regular.woff2 and /dev/null differ diff --git a/fonts/KaTeX_Fraktur-Bold.woff2 b/fonts/KaTeX_Fraktur-Bold.woff2 deleted file mode 100644 index 395f28bea..000000000 Binary files a/fonts/KaTeX_Fraktur-Bold.woff2 and /dev/null differ diff --git a/fonts/KaTeX_Fraktur-Regular.woff2 b/fonts/KaTeX_Fraktur-Regular.woff2 deleted file mode 100644 index 735f6948d..000000000 Binary files a/fonts/KaTeX_Fraktur-Regular.woff2 and /dev/null differ diff --git a/fonts/KaTeX_Main-Bold.woff2 b/fonts/KaTeX_Main-Bold.woff2 deleted file mode 100644 index ab2ad21da..000000000 Binary files a/fonts/KaTeX_Main-Bold.woff2 and /dev/null differ diff --git a/fonts/KaTeX_Main-BoldItalic.woff2 b/fonts/KaTeX_Main-BoldItalic.woff2 deleted file mode 100644 index 5931794de..000000000 Binary files a/fonts/KaTeX_Main-BoldItalic.woff2 and /dev/null differ diff --git a/fonts/KaTeX_Main-Italic.woff2 b/fonts/KaTeX_Main-Italic.woff2 deleted file mode 100644 index b50920e13..000000000 Binary files a/fonts/KaTeX_Main-Italic.woff2 and /dev/null differ diff --git a/fonts/KaTeX_Main-Regular.woff2 b/fonts/KaTeX_Main-Regular.woff2 deleted file mode 100644 index eb24a7ba2..000000000 Binary files a/fonts/KaTeX_Main-Regular.woff2 and /dev/null differ diff --git a/fonts/KaTeX_Math-BoldItalic.woff2 b/fonts/KaTeX_Math-BoldItalic.woff2 deleted file mode 100644 index 29657023a..000000000 Binary files a/fonts/KaTeX_Math-BoldItalic.woff2 and /dev/null differ diff --git a/fonts/KaTeX_Math-Italic.woff2 b/fonts/KaTeX_Math-Italic.woff2 deleted file mode 100644 index 215c143fd..000000000 Binary files a/fonts/KaTeX_Math-Italic.woff2 and /dev/null differ diff --git a/fonts/KaTeX_SansSerif-Bold.woff2 b/fonts/KaTeX_SansSerif-Bold.woff2 deleted file mode 100644 index cfaa3bda5..000000000 Binary files a/fonts/KaTeX_SansSerif-Bold.woff2 and /dev/null differ diff --git a/fonts/KaTeX_SansSerif-Italic.woff2 b/fonts/KaTeX_SansSerif-Italic.woff2 deleted file mode 100644 index 349c06dc6..000000000 Binary files a/fonts/KaTeX_SansSerif-Italic.woff2 and /dev/null differ diff --git a/fonts/KaTeX_SansSerif-Regular.woff2 b/fonts/KaTeX_SansSerif-Regular.woff2 deleted file mode 100644 index a90eea85f..000000000 Binary files a/fonts/KaTeX_SansSerif-Regular.woff2 and /dev/null differ diff --git a/fonts/KaTeX_Script-Regular.woff2 b/fonts/KaTeX_Script-Regular.woff2 deleted file mode 100644 index b3048fc11..000000000 Binary files a/fonts/KaTeX_Script-Regular.woff2 and /dev/null differ diff --git a/fonts/KaTeX_Size1-Regular.woff2 b/fonts/KaTeX_Size1-Regular.woff2 deleted file mode 100644 index c5a8462fb..000000000 Binary files a/fonts/KaTeX_Size1-Regular.woff2 and /dev/null differ diff --git a/fonts/KaTeX_Size2-Regular.woff2 b/fonts/KaTeX_Size2-Regular.woff2 deleted file mode 100644 index e1bccfe24..000000000 Binary files a/fonts/KaTeX_Size2-Regular.woff2 and /dev/null differ diff --git a/fonts/KaTeX_Size3-Regular.woff2 b/fonts/KaTeX_Size3-Regular.woff2 deleted file mode 100644 index 249a28662..000000000 Binary files a/fonts/KaTeX_Size3-Regular.woff2 and /dev/null differ diff --git a/fonts/KaTeX_Size4-Regular.woff2 b/fonts/KaTeX_Size4-Regular.woff2 deleted file mode 100644 index 680c13085..000000000 Binary files a/fonts/KaTeX_Size4-Regular.woff2 and /dev/null differ diff --git a/fonts/KaTeX_Typewriter-Regular.woff2 b/fonts/KaTeX_Typewriter-Regular.woff2 deleted file mode 100644 index 771f1af70..000000000 Binary files a/fonts/KaTeX_Typewriter-Regular.woff2 and /dev/null differ diff --git a/katex.min.css b/katex.min.css deleted file mode 100644 index 5f1f85765..000000000 --- a/katex.min.css +++ /dev/null @@ -1 +0,0 @@ -@font-face{font-family:KaTeX_AMS;font-style:normal;font-weight:400;src:url(fonts/KaTeX_AMS-Regular.woff2) format("woff2")}@font-face{font-family:KaTeX_Caligraphic;font-style:normal;font-weight:700;src:url(fonts/KaTeX_Caligraphic-Bold.woff2) format("woff2")}@font-face{font-family:KaTeX_Caligraphic;font-style:normal;font-weight:400;src:url(fonts/KaTeX_Caligraphic-Regular.woff2) format("woff2")}@font-face{font-family:KaTeX_Fraktur;font-style:normal;font-weight:700;src:url(fonts/KaTeX_Fraktur-Bold.woff2) format("woff2")}@font-face{font-family:KaTeX_Fraktur;font-style:normal;font-weight:400;src:url(fonts/KaTeX_Fraktur-Regular.woff2) format("woff2")}@font-face{font-family:KaTeX_Main;font-style:normal;font-weight:700;src:url(fonts/KaTeX_Main-Bold.woff2) format("woff2")}@font-face{font-family:KaTeX_Main;font-style:italic;font-weight:700;src:url(fonts/KaTeX_Main-BoldItalic.woff2) format("woff2")}@font-face{font-family:KaTeX_Main;font-style:italic;font-weight:400;src:url(fonts/KaTeX_Main-Italic.woff2) format("woff2")}@font-face{font-family:KaTeX_Main;font-style:normal;font-weight:400;src:url(fonts/KaTeX_Main-Regular.woff2) format("woff2")}@font-face{font-family:KaTeX_Math;font-style:italic;font-weight:700;src:url(fonts/KaTeX_Math-BoldItalic.woff2) format("woff2")}@font-face{font-family:KaTeX_Math;font-style:italic;font-weight:400;src:url(fonts/KaTeX_Math-Italic.woff2) format("woff2")}@font-face{font-family:"KaTeX_SansSerif";font-style:normal;font-weight:700;src:url(fonts/KaTeX_SansSerif-Bold.woff2) format("woff2")}@font-face{font-family:"KaTeX_SansSerif";font-style:italic;font-weight:400;src:url(fonts/KaTeX_SansSerif-Italic.woff2) format("woff2")}@font-face{font-family:"KaTeX_SansSerif";font-style:normal;font-weight:400;src:url(fonts/KaTeX_SansSerif-Regular.woff2) format("woff2")}@font-face{font-family:KaTeX_Script;font-style:normal;font-weight:400;src:url(fonts/KaTeX_Script-Regular.woff2) format("woff2")}@font-face{font-family:KaTeX_Size1;font-style:normal;font-weight:400;src:url(fonts/KaTeX_Size1-Regular.woff2) format("woff2")}@font-face{font-family:KaTeX_Size2;font-style:normal;font-weight:400;src:url(fonts/KaTeX_Size2-Regular.woff2) format("woff2")}@font-face{font-family:KaTeX_Size3;font-style:normal;font-weight:400;src:url(fonts/KaTeX_Size3-Regular.woff2) format("woff2")}@font-face{font-family:KaTeX_Size4;font-style:normal;font-weight:400;src:url(fonts/KaTeX_Size4-Regular.woff2) format("woff2")}@font-face{font-family:KaTeX_Typewriter;font-style:normal;font-weight:400;src:url(fonts/KaTeX_Typewriter-Regular.woff2) format("woff2")}.katex{text-rendering:auto;font:normal 1.21em KaTeX_Main,Times New Roman,serif;line-height:1.2;text-indent:0}.katex *{-ms-high-contrast-adjust:none!important;border-color:currentColor}.katex .katex-version:after{content:"0.15.2"}.katex .katex-mathml{clip:rect(1px,1px,1px,1px);border:0;height:1px;overflow:hidden;padding:0;position:absolute;width:1px}.katex .katex-html>.newline{display:block}.katex .base{position:relative;white-space:nowrap;width:-webkit-min-content;width:-moz-min-content;width:min-content}.katex .base,.katex .strut{display:inline-block}.katex .textbf{font-weight:700}.katex .textit{font-style:italic}.katex .textrm{font-family:KaTeX_Main}.katex .textsf{font-family:KaTeX_SansSerif}.katex .texttt{font-family:KaTeX_Typewriter}.katex .mathnormal{font-family:KaTeX_Math;font-style:italic}.katex .mathit{font-family:KaTeX_Main;font-style:italic}.katex .mathrm{font-style:normal}.katex .mathbf{font-family:KaTeX_Main;font-weight:700}.katex .boldsymbol{font-family:KaTeX_Math;font-style:italic;font-weight:700}.katex .amsrm,.katex .mathbb,.katex .textbb{font-family:KaTeX_AMS}.katex .mathcal{font-family:KaTeX_Caligraphic}.katex .mathfrak,.katex .textfrak{font-family:KaTeX_Fraktur}.katex .mathtt{font-family:KaTeX_Typewriter}.katex .mathscr,.katex .textscr{font-family:KaTeX_Script}.katex .mathsf,.katex .textsf{font-family:KaTeX_SansSerif}.katex .mathboldsf,.katex .textboldsf{font-family:KaTeX_SansSerif;font-weight:700}.katex .mathitsf,.katex .textitsf{font-family:KaTeX_SansSerif;font-style:italic}.katex .mainrm{font-family:KaTeX_Main;font-style:normal}.katex .vlist-t{border-collapse:collapse;display:inline-table;table-layout:fixed}.katex .vlist-r{display:table-row}.katex .vlist{display:table-cell;position:relative;vertical-align:bottom}.katex .vlist>span{display:block;height:0;position:relative}.katex .vlist>span>span{display:inline-block}.katex .vlist>span>.pstrut{overflow:hidden;width:0}.katex .vlist-t2{margin-right:-2px}.katex .vlist-s{display:table-cell;font-size:1px;min-width:2px;vertical-align:bottom;width:2px}.katex .vbox{align-items:baseline;display:inline-flex;flex-direction:column}.katex .hbox{width:100%}.katex .hbox,.katex .thinbox{display:inline-flex;flex-direction:row}.katex .thinbox{max-width:0;width:0}.katex .msupsub{text-align:left}.katex .mfrac>span>span{text-align:center}.katex .mfrac .frac-line{border-bottom-style:solid;display:inline-block;width:100%}.katex .hdashline,.katex .hline,.katex .mfrac .frac-line,.katex .overline .overline-line,.katex .rule,.katex .underline .underline-line{min-height:1px}.katex .mspace{display:inline-block}.katex .clap,.katex .llap,.katex .rlap{position:relative;width:0}.katex .clap>.inner,.katex .llap>.inner,.katex .rlap>.inner{position:absolute}.katex .clap>.fix,.katex .llap>.fix,.katex .rlap>.fix{display:inline-block}.katex .llap>.inner{right:0}.katex .clap>.inner,.katex .rlap>.inner{left:0}.katex .clap>.inner>span{margin-left:-50%;margin-right:50%}.katex .rule{border:0 solid;display:inline-block;position:relative}.katex .hline,.katex .overline .overline-line,.katex .underline .underline-line{border-bottom-style:solid;display:inline-block;width:100%}.katex .hdashline{border-bottom-style:dashed;display:inline-block;width:100%}.katex .sqrt>.root{margin-left:.27777778em;margin-right:-.55555556em}.katex .fontsize-ensurer.reset-size1.size1,.katex .sizing.reset-size1.size1{font-size:1em}.katex .fontsize-ensurer.reset-size1.size2,.katex .sizing.reset-size1.size2{font-size:1.2em}.katex .fontsize-ensurer.reset-size1.size3,.katex .sizing.reset-size1.size3{font-size:1.4em}.katex .fontsize-ensurer.reset-size1.size4,.katex .sizing.reset-size1.size4{font-size:1.6em}.katex .fontsize-ensurer.reset-size1.size5,.katex .sizing.reset-size1.size5{font-size:1.8em}.katex .fontsize-ensurer.reset-size1.size6,.katex .sizing.reset-size1.size6{font-size:2em}.katex .fontsize-ensurer.reset-size1.size7,.katex .sizing.reset-size1.size7{font-size:2.4em}.katex .fontsize-ensurer.reset-size1.size8,.katex .sizing.reset-size1.size8{font-size:2.88em}.katex .fontsize-ensurer.reset-size1.size9,.katex .sizing.reset-size1.size9{font-size:3.456em}.katex .fontsize-ensurer.reset-size1.size10,.katex .sizing.reset-size1.size10{font-size:4.148em}.katex .fontsize-ensurer.reset-size1.size11,.katex .sizing.reset-size1.size11{font-size:4.976em}.katex .fontsize-ensurer.reset-size2.size1,.katex .sizing.reset-size2.size1{font-size:.83333333em}.katex .fontsize-ensurer.reset-size2.size2,.katex .sizing.reset-size2.size2{font-size:1em}.katex .fontsize-ensurer.reset-size2.size3,.katex .sizing.reset-size2.size3{font-size:1.16666667em}.katex .fontsize-ensurer.reset-size2.size4,.katex .sizing.reset-size2.size4{font-size:1.33333333em}.katex .fontsize-ensurer.reset-size2.size5,.katex .sizing.reset-size2.size5{font-size:1.5em}.katex .fontsize-ensurer.reset-size2.size6,.katex .sizing.reset-size2.size6{font-size:1.66666667em}.katex .fontsize-ensurer.reset-size2.size7,.katex .sizing.reset-size2.size7{font-size:2em}.katex .fontsize-ensurer.reset-size2.size8,.katex .sizing.reset-size2.size8{font-size:2.4em}.katex .fontsize-ensurer.reset-size2.size9,.katex .sizing.reset-size2.size9{font-size:2.88em}.katex .fontsize-ensurer.reset-size2.size10,.katex .sizing.reset-size2.size10{font-size:3.45666667em}.katex .fontsize-ensurer.reset-size2.size11,.katex .sizing.reset-size2.size11{font-size:4.14666667em}.katex .fontsize-ensurer.reset-size3.size1,.katex .sizing.reset-size3.size1{font-size:.71428571em}.katex .fontsize-ensurer.reset-size3.size2,.katex .sizing.reset-size3.size2{font-size:.85714286em}.katex .fontsize-ensurer.reset-size3.size3,.katex .sizing.reset-size3.size3{font-size:1em}.katex .fontsize-ensurer.reset-size3.size4,.katex .sizing.reset-size3.size4{font-size:1.14285714em}.katex .fontsize-ensurer.reset-size3.size5,.katex .sizing.reset-size3.size5{font-size:1.28571429em}.katex .fontsize-ensurer.reset-size3.size6,.katex .sizing.reset-size3.size6{font-size:1.42857143em}.katex .fontsize-ensurer.reset-size3.size7,.katex .sizing.reset-size3.size7{font-size:1.71428571em}.katex .fontsize-ensurer.reset-size3.size8,.katex .sizing.reset-size3.size8{font-size:2.05714286em}.katex .fontsize-ensurer.reset-size3.size9,.katex .sizing.reset-size3.size9{font-size:2.46857143em}.katex .fontsize-ensurer.reset-size3.size10,.katex .sizing.reset-size3.size10{font-size:2.96285714em}.katex .fontsize-ensurer.reset-size3.size11,.katex .sizing.reset-size3.size11{font-size:3.55428571em}.katex .fontsize-ensurer.reset-size4.size1,.katex .sizing.reset-size4.size1{font-size:.625em}.katex .fontsize-ensurer.reset-size4.size2,.katex .sizing.reset-size4.size2{font-size:.75em}.katex .fontsize-ensurer.reset-size4.size3,.katex .sizing.reset-size4.size3{font-size:.875em}.katex .fontsize-ensurer.reset-size4.size4,.katex .sizing.reset-size4.size4{font-size:1em}.katex .fontsize-ensurer.reset-size4.size5,.katex .sizing.reset-size4.size5{font-size:1.125em}.katex .fontsize-ensurer.reset-size4.size6,.katex .sizing.reset-size4.size6{font-size:1.25em}.katex .fontsize-ensurer.reset-size4.size7,.katex .sizing.reset-size4.size7{font-size:1.5em}.katex .fontsize-ensurer.reset-size4.size8,.katex .sizing.reset-size4.size8{font-size:1.8em}.katex .fontsize-ensurer.reset-size4.size9,.katex .sizing.reset-size4.size9{font-size:2.16em}.katex .fontsize-ensurer.reset-size4.size10,.katex .sizing.reset-size4.size10{font-size:2.5925em}.katex .fontsize-ensurer.reset-size4.size11,.katex .sizing.reset-size4.size11{font-size:3.11em}.katex .fontsize-ensurer.reset-size5.size1,.katex .sizing.reset-size5.size1{font-size:.55555556em}.katex .fontsize-ensurer.reset-size5.size2,.katex .sizing.reset-size5.size2{font-size:.66666667em}.katex .fontsize-ensurer.reset-size5.size3,.katex .sizing.reset-size5.size3{font-size:.77777778em}.katex .fontsize-ensurer.reset-size5.size4,.katex .sizing.reset-size5.size4{font-size:.88888889em}.katex .fontsize-ensurer.reset-size5.size5,.katex .sizing.reset-size5.size5{font-size:1em}.katex .fontsize-ensurer.reset-size5.size6,.katex .sizing.reset-size5.size6{font-size:1.11111111em}.katex .fontsize-ensurer.reset-size5.size7,.katex .sizing.reset-size5.size7{font-size:1.33333333em}.katex .fontsize-ensurer.reset-size5.size8,.katex .sizing.reset-size5.size8{font-size:1.6em}.katex .fontsize-ensurer.reset-size5.size9,.katex .sizing.reset-size5.size9{font-size:1.92em}.katex .fontsize-ensurer.reset-size5.size10,.katex .sizing.reset-size5.size10{font-size:2.30444444em}.katex .fontsize-ensurer.reset-size5.size11,.katex .sizing.reset-size5.size11{font-size:2.76444444em}.katex .fontsize-ensurer.reset-size6.size1,.katex .sizing.reset-size6.size1{font-size:.5em}.katex .fontsize-ensurer.reset-size6.size2,.katex .sizing.reset-size6.size2{font-size:.6em}.katex .fontsize-ensurer.reset-size6.size3,.katex .sizing.reset-size6.size3{font-size:.7em}.katex .fontsize-ensurer.reset-size6.size4,.katex .sizing.reset-size6.size4{font-size:.8em}.katex .fontsize-ensurer.reset-size6.size5,.katex .sizing.reset-size6.size5{font-size:.9em}.katex .fontsize-ensurer.reset-size6.size6,.katex .sizing.reset-size6.size6{font-size:1em}.katex .fontsize-ensurer.reset-size6.size7,.katex .sizing.reset-size6.size7{font-size:1.2em}.katex .fontsize-ensurer.reset-size6.size8,.katex .sizing.reset-size6.size8{font-size:1.44em}.katex .fontsize-ensurer.reset-size6.size9,.katex .sizing.reset-size6.size9{font-size:1.728em}.katex .fontsize-ensurer.reset-size6.size10,.katex .sizing.reset-size6.size10{font-size:2.074em}.katex .fontsize-ensurer.reset-size6.size11,.katex .sizing.reset-size6.size11{font-size:2.488em}.katex .fontsize-ensurer.reset-size7.size1,.katex .sizing.reset-size7.size1{font-size:.41666667em}.katex .fontsize-ensurer.reset-size7.size2,.katex .sizing.reset-size7.size2{font-size:.5em}.katex .fontsize-ensurer.reset-size7.size3,.katex .sizing.reset-size7.size3{font-size:.58333333em}.katex .fontsize-ensurer.reset-size7.size4,.katex .sizing.reset-size7.size4{font-size:.66666667em}.katex .fontsize-ensurer.reset-size7.size5,.katex .sizing.reset-size7.size5{font-size:.75em}.katex .fontsize-ensurer.reset-size7.size6,.katex .sizing.reset-size7.size6{font-size:.83333333em}.katex .fontsize-ensurer.reset-size7.size7,.katex .sizing.reset-size7.size7{font-size:1em}.katex .fontsize-ensurer.reset-size7.size8,.katex .sizing.reset-size7.size8{font-size:1.2em}.katex .fontsize-ensurer.reset-size7.size9,.katex .sizing.reset-size7.size9{font-size:1.44em}.katex .fontsize-ensurer.reset-size7.size10,.katex .sizing.reset-size7.size10{font-size:1.72833333em}.katex .fontsize-ensurer.reset-size7.size11,.katex .sizing.reset-size7.size11{font-size:2.07333333em}.katex .fontsize-ensurer.reset-size8.size1,.katex .sizing.reset-size8.size1{font-size:.34722222em}.katex .fontsize-ensurer.reset-size8.size2,.katex .sizing.reset-size8.size2{font-size:.41666667em}.katex .fontsize-ensurer.reset-size8.size3,.katex .sizing.reset-size8.size3{font-size:.48611111em}.katex .fontsize-ensurer.reset-size8.size4,.katex .sizing.reset-size8.size4{font-size:.55555556em}.katex .fontsize-ensurer.reset-size8.size5,.katex .sizing.reset-size8.size5{font-size:.625em}.katex .fontsize-ensurer.reset-size8.size6,.katex .sizing.reset-size8.size6{font-size:.69444444em}.katex .fontsize-ensurer.reset-size8.size7,.katex .sizing.reset-size8.size7{font-size:.83333333em}.katex .fontsize-ensurer.reset-size8.size8,.katex .sizing.reset-size8.size8{font-size:1em}.katex .fontsize-ensurer.reset-size8.size9,.katex .sizing.reset-size8.size9{font-size:1.2em}.katex .fontsize-ensurer.reset-size8.size10,.katex .sizing.reset-size8.size10{font-size:1.44027778em}.katex .fontsize-ensurer.reset-size8.size11,.katex .sizing.reset-size8.size11{font-size:1.72777778em}.katex .fontsize-ensurer.reset-size9.size1,.katex .sizing.reset-size9.size1{font-size:.28935185em}.katex .fontsize-ensurer.reset-size9.size2,.katex .sizing.reset-size9.size2{font-size:.34722222em}.katex .fontsize-ensurer.reset-size9.size3,.katex .sizing.reset-size9.size3{font-size:.40509259em}.katex .fontsize-ensurer.reset-size9.size4,.katex .sizing.reset-size9.size4{font-size:.46296296em}.katex .fontsize-ensurer.reset-size9.size5,.katex .sizing.reset-size9.size5{font-size:.52083333em}.katex .fontsize-ensurer.reset-size9.size6,.katex .sizing.reset-size9.size6{font-size:.5787037em}.katex .fontsize-ensurer.reset-size9.size7,.katex .sizing.reset-size9.size7{font-size:.69444444em}.katex .fontsize-ensurer.reset-size9.size8,.katex .sizing.reset-size9.size8{font-size:.83333333em}.katex .fontsize-ensurer.reset-size9.size9,.katex .sizing.reset-size9.size9{font-size:1em}.katex .fontsize-ensurer.reset-size9.size10,.katex .sizing.reset-size9.size10{font-size:1.20023148em}.katex .fontsize-ensurer.reset-size9.size11,.katex .sizing.reset-size9.size11{font-size:1.43981481em}.katex .fontsize-ensurer.reset-size10.size1,.katex .sizing.reset-size10.size1{font-size:.24108004em}.katex .fontsize-ensurer.reset-size10.size2,.katex .sizing.reset-size10.size2{font-size:.28929605em}.katex .fontsize-ensurer.reset-size10.size3,.katex .sizing.reset-size10.size3{font-size:.33751205em}.katex .fontsize-ensurer.reset-size10.size4,.katex .sizing.reset-size10.size4{font-size:.38572806em}.katex .fontsize-ensurer.reset-size10.size5,.katex .sizing.reset-size10.size5{font-size:.43394407em}.katex .fontsize-ensurer.reset-size10.size6,.katex .sizing.reset-size10.size6{font-size:.48216008em}.katex .fontsize-ensurer.reset-size10.size7,.katex .sizing.reset-size10.size7{font-size:.57859209em}.katex .fontsize-ensurer.reset-size10.size8,.katex .sizing.reset-size10.size8{font-size:.69431051em}.katex .fontsize-ensurer.reset-size10.size9,.katex .sizing.reset-size10.size9{font-size:.83317261em}.katex .fontsize-ensurer.reset-size10.size10,.katex .sizing.reset-size10.size10{font-size:1em}.katex .fontsize-ensurer.reset-size10.size11,.katex .sizing.reset-size10.size11{font-size:1.19961427em}.katex .fontsize-ensurer.reset-size11.size1,.katex .sizing.reset-size11.size1{font-size:.20096463em}.katex .fontsize-ensurer.reset-size11.size2,.katex .sizing.reset-size11.size2{font-size:.24115756em}.katex .fontsize-ensurer.reset-size11.size3,.katex .sizing.reset-size11.size3{font-size:.28135048em}.katex .fontsize-ensurer.reset-size11.size4,.katex .sizing.reset-size11.size4{font-size:.32154341em}.katex .fontsize-ensurer.reset-size11.size5,.katex .sizing.reset-size11.size5{font-size:.36173633em}.katex .fontsize-ensurer.reset-size11.size6,.katex .sizing.reset-size11.size6{font-size:.40192926em}.katex .fontsize-ensurer.reset-size11.size7,.katex .sizing.reset-size11.size7{font-size:.48231511em}.katex .fontsize-ensurer.reset-size11.size8,.katex .sizing.reset-size11.size8{font-size:.57877814em}.katex .fontsize-ensurer.reset-size11.size9,.katex .sizing.reset-size11.size9{font-size:.69453376em}.katex .fontsize-ensurer.reset-size11.size10,.katex .sizing.reset-size11.size10{font-size:.83360129em}.katex .fontsize-ensurer.reset-size11.size11,.katex .sizing.reset-size11.size11{font-size:1em}.katex .delimsizing.size1{font-family:KaTeX_Size1}.katex .delimsizing.size2{font-family:KaTeX_Size2}.katex .delimsizing.size3{font-family:KaTeX_Size3}.katex .delimsizing.size4{font-family:KaTeX_Size4}.katex .delimsizing.mult .delim-size1>span{font-family:KaTeX_Size1}.katex .delimsizing.mult .delim-size4>span{font-family:KaTeX_Size4}.katex .nulldelimiter{display:inline-block;width:.12em}.katex .delimcenter,.katex .op-symbol{position:relative}.katex .op-symbol.small-op{font-family:KaTeX_Size1}.katex .op-symbol.large-op{font-family:KaTeX_Size2}.katex .accent>.vlist-t,.katex .op-limits>.vlist-t{text-align:center}.katex .accent .accent-body{position:relative}.katex .accent .accent-body:not(.accent-full){width:0}.katex .overlay{display:block}.katex .mtable .vertical-separator{display:inline-block;min-width:1px}.katex .mtable .arraycolsep{display:inline-block}.katex .mtable .col-align-c>.vlist-t{text-align:center}.katex .mtable .col-align-l>.vlist-t{text-align:left}.katex .mtable .col-align-r>.vlist-t{text-align:right}.katex .svg-align{text-align:left}.katex svg{fill:currentColor;stroke:currentColor;fill-rule:nonzero;fill-opacity:1;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;display:block;height:inherit;position:absolute;width:100%}.katex svg path{stroke:none}.katex img{border-style:none;max-height:none;max-width:none;min-height:0;min-width:0}.katex .stretchy{display:block;overflow:hidden;position:relative;width:100%}.katex .stretchy:after,.katex .stretchy:before{content:""}.katex .hide-tail{overflow:hidden;position:relative;width:100%}.katex .halfarrow-left{left:0;overflow:hidden;position:absolute;width:50.2%}.katex .halfarrow-right{overflow:hidden;position:absolute;right:0;width:50.2%}.katex .brace-left{left:0;overflow:hidden;position:absolute;width:25.1%}.katex .brace-center{left:25%;overflow:hidden;position:absolute;width:50%}.katex .brace-right{overflow:hidden;position:absolute;right:0;width:25.1%}.katex .x-arrow-pad{padding:0 .5em}.katex .cd-arrow-pad{padding:0 .55556em 0 .27778em}.katex .mover,.katex .munder,.katex .x-arrow{text-align:center}.katex .boxpad{padding:0 .3em}.katex .fbox,.katex .fcolorbox{border:.04em solid;box-sizing:border-box}.katex .cancel-pad{padding:0 .2em}.katex .cancel-lap{margin-left:-.2em;margin-right:-.2em}.katex .sout{border-bottom-style:solid;border-bottom-width:.08em}.katex .angl{border-right:.049em solid;border-top:.049em solid;box-sizing:border-box;margin-right:.03889em}.katex .anglpad{padding:0 .03889em}.katex .eqn-num:before{content:"(" counter(katexEqnNo) ")";counter-increment:katexEqnNo}.katex .mml-eqn-num:before{content:"(" counter(mmlEqnNo) ")";counter-increment:mmlEqnNo}.katex .mtr-glue{width:50%}.katex .cd-vert-arrow{display:inline-block;position:relative}.katex .cd-label-left{display:inline-block;position:absolute;right:calc(50% + .3em);text-align:left}.katex .cd-label-right{display:inline-block;left:calc(50% + .3em);position:absolute;text-align:right}.katex-display{display:block;margin:1em 0;text-align:center}.katex-display>.katex{display:block;text-align:center;white-space:nowrap}.katex-display>.katex>.katex-html{display:block;position:relative}.katex-display>.katex>.katex-html>.tag{position:absolute;right:0}.katex-display.leqno>.katex>.katex-html>.tag{left:0;right:auto}.katex-display.fleqn>.katex{padding-left:2em;text-align:left}body{counter-reset:katexEqnNo mmlEqnNo} diff --git a/katex.min.js b/katex.min.js deleted file mode 100644 index e4d78f243..000000000 --- a/katex.min.js +++ /dev/null @@ -1 +0,0 @@ -!function(e,t){"object"==typeof exports&&"object"==typeof module?module.exports=t():"function"==typeof define&&define.amd?define([],t):"object"==typeof exports?exports.katex=t():e.katex=t()}("undefined"!=typeof self?self:this,(function(){return function(){"use strict";var e={d:function(t,r){for(var n in r)e.o(r,n)&&!e.o(t,n)&&Object.defineProperty(t,n,{enumerable:!0,get:r[n]})},o:function(e,t){return Object.prototype.hasOwnProperty.call(e,t)}},t={};e.d(t,{default:function(){return Zn}});var r=function e(t,r){this.position=void 0;var n,a="KaTeX parse error: "+t,i=r&&r.loc;if(i&&i.start<=i.end){var o=i.lexer.input;n=i.start;var s=i.end;n===o.length?a+=" at end of input: ":a+=" at position "+(n+1)+": ";var l=o.slice(n,s).replace(/[^]/g,"$&\u0332");a+=(n>15?"\u2026"+o.slice(n-15,n):o.slice(0,n))+l+(s+15":">","<":"<",'"':""","'":"'"},o=/[&><"']/g;var s=function e(t){return"ordgroup"===t.type||"color"===t.type?1===t.body.length?e(t.body[0]):t:"font"===t.type?e(t.body):t},l={contains:function(e,t){return-1!==e.indexOf(t)},deflt:function(e,t){return void 0===e?t:e},escape:function(e){return String(e).replace(o,(function(e){return i[e]}))},hyphenate:function(e){return e.replace(a,"-$1").toLowerCase()},getBaseElem:s,isCharacterBox:function(e){var t=s(e);return"mathord"===t.type||"textord"===t.type||"atom"===t.type},protocolFromUrl:function(e){var t=/^\s*([^\\/#]*?)(?::|�*58|�*3a)/i.exec(e);return null!=t?t[1]:"_relative"}},h={displayMode:{type:"boolean",description:"Render math in display mode, which puts the math in display style (so \\int and \\sum are large, for example), and centers the math on the page on its own line.",cli:"-d, --display-mode"},output:{type:{enum:["htmlAndMathml","html","mathml"]},description:"Determines the markup language of the output.",cli:"-F, --format "},leqno:{type:"boolean",description:"Render display math in leqno style (left-justified tags)."},fleqn:{type:"boolean",description:"Render display math flush left."},throwOnError:{type:"boolean",default:!0,cli:"-t, --no-throw-on-error",cliDescription:"Render errors (in the color given by --error-color) instead of throwing a ParseError exception when encountering an error."},errorColor:{type:"string",default:"#cc0000",cli:"-c, --error-color ",cliDescription:"A color string given in the format 'rgb' or 'rrggbb' (no #). This option determines the color of errors rendered by the -t option.",cliProcessor:function(e){return"#"+e}},macros:{type:"object",cli:"-m, --macro ",cliDescription:"Define custom macro of the form '\\foo:expansion' (use multiple -m arguments for multiple macros).",cliDefault:[],cliProcessor:function(e,t){return t.push(e),t}},minRuleThickness:{type:"number",description:"Specifies a minimum thickness, in ems, for fraction lines, `\\sqrt` top lines, `{array}` vertical lines, `\\hline`, `\\hdashline`, `\\underline`, `\\overline`, and the borders of `\\fbox`, `\\boxed`, and `\\fcolorbox`.",processor:function(e){return Math.max(0,e)},cli:"--min-rule-thickness ",cliProcessor:parseFloat},colorIsTextColor:{type:"boolean",description:"Makes \\color behave like LaTeX's 2-argument \\textcolor, instead of LaTeX's one-argument \\color mode change.",cli:"-b, --color-is-text-color"},strict:{type:[{enum:["warn","ignore","error"]},"boolean","function"],description:"Turn on strict / LaTeX faithfulness mode, which throws an error if the input uses features that are not supported by LaTeX.",cli:"-S, --strict",cliDefault:!1},trust:{type:["boolean","function"],description:"Trust the input, enabling all HTML features such as \\url.",cli:"-T, --trust"},maxSize:{type:"number",default:1/0,description:"If non-zero, all user-specified sizes, e.g. in \\rule{500em}{500em}, will be capped to maxSize ems. Otherwise, elements and spaces can be arbitrarily large",processor:function(e){return Math.max(0,e)},cli:"-s, --max-size ",cliProcessor:parseInt},maxExpand:{type:"number",default:1e3,description:"Limit the number of macro expansions to the specified number, to prevent e.g. infinite macro loops. If set to Infinity, the macro expander will try to fully expand as in LaTeX.",processor:function(e){return Math.max(0,e)},cli:"-e, --max-expand ",cliProcessor:function(e){return"Infinity"===e?1/0:parseInt(e)}},globalGroup:{type:"boolean",cli:!1}};function m(e){if(e.default)return e.default;var t=e.type,r=Array.isArray(t)?t[0]:t;if("string"!=typeof r)return r.enum[0];switch(r){case"boolean":return!1;case"string":return"";case"number":return 0;case"object":return{}}}var c=function(){function e(e){for(var t in this.displayMode=void 0,this.output=void 0,this.leqno=void 0,this.fleqn=void 0,this.throwOnError=void 0,this.errorColor=void 0,this.macros=void 0,this.minRuleThickness=void 0,this.colorIsTextColor=void 0,this.strict=void 0,this.trust=void 0,this.maxSize=void 0,this.maxExpand=void 0,this.globalGroup=void 0,e=e||{},h)if(h.hasOwnProperty(t)){var r=h[t];this[t]=void 0!==e[t]?r.processor?r.processor(e[t]):e[t]:m(r)}}var t=e.prototype;return t.reportNonstrict=function(e,t,r){var a=this.strict;if("function"==typeof a&&(a=a(e,t,r)),a&&"ignore"!==a){if(!0===a||"error"===a)throw new n("LaTeX-incompatible input and strict mode is set to 'error': "+t+" ["+e+"]",r);"warn"===a?"undefined"!=typeof console&&console.warn("LaTeX-incompatible input and strict mode is set to 'warn': "+t+" ["+e+"]"):"undefined"!=typeof console&&console.warn("LaTeX-incompatible input and strict mode is set to unrecognized '"+a+"': "+t+" ["+e+"]")}},t.useStrictBehavior=function(e,t,r){var n=this.strict;if("function"==typeof n)try{n=n(e,t,r)}catch(e){n="error"}return!(!n||"ignore"===n)&&(!0===n||"error"===n||("warn"===n?("undefined"!=typeof console&&console.warn("LaTeX-incompatible input and strict mode is set to 'warn': "+t+" ["+e+"]"),!1):("undefined"!=typeof console&&console.warn("LaTeX-incompatible input and strict mode is set to unrecognized '"+n+"': "+t+" ["+e+"]"),!1)))},t.isTrusted=function(e){e.url&&!e.protocol&&(e.protocol=l.protocolFromUrl(e.url));var t="function"==typeof this.trust?this.trust(e):this.trust;return Boolean(t)},e}(),u=function(){function e(e,t,r){this.id=void 0,this.size=void 0,this.cramped=void 0,this.id=e,this.size=t,this.cramped=r}var t=e.prototype;return t.sup=function(){return p[d[this.id]]},t.sub=function(){return p[f[this.id]]},t.fracNum=function(){return p[g[this.id]]},t.fracDen=function(){return p[v[this.id]]},t.cramp=function(){return p[b[this.id]]},t.text=function(){return p[y[this.id]]},t.isTight=function(){return this.size>=2},e}(),p=[new u(0,0,!1),new u(1,0,!0),new u(2,1,!1),new u(3,1,!0),new u(4,2,!1),new u(5,2,!0),new u(6,3,!1),new u(7,3,!0)],d=[4,5,4,5,6,7,6,7],f=[5,5,5,5,7,7,7,7],g=[2,3,4,5,6,7,6,7],v=[3,3,5,5,7,7,7,7],b=[1,1,3,3,5,5,7,7],y=[0,1,2,3,2,3,2,3],x={DISPLAY:p[0],TEXT:p[2],SCRIPT:p[4],SCRIPTSCRIPT:p[6]},w=[{name:"latin",blocks:[[256,591],[768,879]]},{name:"cyrillic",blocks:[[1024,1279]]},{name:"armenian",blocks:[[1328,1423]]},{name:"brahmic",blocks:[[2304,4255]]},{name:"georgian",blocks:[[4256,4351]]},{name:"cjk",blocks:[[12288,12543],[19968,40879],[65280,65376]]},{name:"hangul",blocks:[[44032,55215]]}];var k=[];function S(e){for(var t=0;t=k[t]&&e<=k[t+1])return!0;return!1}w.forEach((function(e){return e.blocks.forEach((function(e){return k.push.apply(k,e)}))}));var M=80,z={doubleleftarrow:"M262 157\nl10-10c34-36 62.7-77 86-123 3.3-8 5-13.3 5-16 0-5.3-6.7-8-20-8-7.3\n 0-12.2.5-14.5 1.5-2.3 1-4.8 4.5-7.5 10.5-49.3 97.3-121.7 169.3-217 216-28\n 14-57.3 25-88 33-6.7 2-11 3.8-13 5.5-2 1.7-3 4.2-3 7.5s1 5.8 3 7.5\nc2 1.7 6.3 3.5 13 5.5 68 17.3 128.2 47.8 180.5 91.5 52.3 43.7 93.8 96.2 124.5\n 157.5 9.3 8 15.3 12.3 18 13h6c12-.7 18-4 18-10 0-2-1.7-7-5-15-23.3-46-52-87\n-86-123l-10-10h399738v-40H218c328 0 0 0 0 0l-10-8c-26.7-20-65.7-43-117-69 2.7\n-2 6-3.7 10-5 36.7-16 72.3-37.3 107-64l10-8h399782v-40z\nm8 0v40h399730v-40zm0 194v40h399730v-40z",doublerightarrow:"M399738 392l\n-10 10c-34 36-62.7 77-86 123-3.3 8-5 13.3-5 16 0 5.3 6.7 8 20 8 7.3 0 12.2-.5\n 14.5-1.5 2.3-1 4.8-4.5 7.5-10.5 49.3-97.3 121.7-169.3 217-216 28-14 57.3-25 88\n-33 6.7-2 11-3.8 13-5.5 2-1.7 3-4.2 3-7.5s-1-5.8-3-7.5c-2-1.7-6.3-3.5-13-5.5-68\n-17.3-128.2-47.8-180.5-91.5-52.3-43.7-93.8-96.2-124.5-157.5-9.3-8-15.3-12.3-18\n-13h-6c-12 .7-18 4-18 10 0 2 1.7 7 5 15 23.3 46 52 87 86 123l10 10H0v40h399782\nc-328 0 0 0 0 0l10 8c26.7 20 65.7 43 117 69-2.7 2-6 3.7-10 5-36.7 16-72.3 37.3\n-107 64l-10 8H0v40zM0 157v40h399730v-40zm0 194v40h399730v-40z",leftarrow:"M400000 241H110l3-3c68.7-52.7 113.7-120\n 135-202 4-14.7 6-23 6-25 0-7.3-7-11-21-11-8 0-13.2.8-15.5 2.5-2.3 1.7-4.2 5.8\n-5.5 12.5-1.3 4.7-2.7 10.3-4 17-12 48.7-34.8 92-68.5 130S65.3 228.3 18 247\nc-10 4-16 7.7-18 11 0 8.7 6 14.3 18 17 47.3 18.7 87.8 47 121.5 85S196 441.3 208\n 490c.7 2 1.3 5 2 9s1.2 6.7 1.5 8c.3 1.3 1 3.3 2 6s2.2 4.5 3.5 5.5c1.3 1 3.3\n 1.8 6 2.5s6 1 10 1c14 0 21-3.7 21-11 0-2-2-10.3-6-25-20-79.3-65-146.7-135-202\n l-3-3h399890zM100 241v40h399900v-40z",leftbrace:"M6 548l-6-6v-35l6-11c56-104 135.3-181.3 238-232 57.3-28.7 117\n-45 179-50h399577v120H403c-43.3 7-81 15-113 26-100.7 33-179.7 91-237 174-2.7\n 5-6 9-10 13-.7 1-7.3 1-20 1H6z",leftbraceunder:"M0 6l6-6h17c12.688 0 19.313.3 20 1 4 4 7.313 8.3 10 13\n 35.313 51.3 80.813 93.8 136.5 127.5 55.688 33.7 117.188 55.8 184.5 66.5.688\n 0 2 .3 4 1 18.688 2.7 76 4.3 172 5h399450v120H429l-6-1c-124.688-8-235-61.7\n-331-161C60.687 138.7 32.312 99.3 7 54L0 41V6z",leftgroup:"M400000 80\nH435C64 80 168.3 229.4 21 260c-5.9 1.2-18 0-18 0-2 0-3-1-3-3v-38C76 61 257 0\n 435 0h399565z",leftgroupunder:"M400000 262\nH435C64 262 168.3 112.6 21 82c-5.9-1.2-18 0-18 0-2 0-3 1-3 3v38c76 158 257 219\n 435 219h399565z",leftharpoon:"M0 267c.7 5.3 3 10 7 14h399993v-40H93c3.3\n-3.3 10.2-9.5 20.5-18.5s17.8-15.8 22.5-20.5c50.7-52 88-110.3 112-175 4-11.3 5\n-18.3 3-21-1.3-4-7.3-6-18-6-8 0-13 .7-15 2s-4.7 6.7-8 16c-42 98.7-107.3 174.7\n-196 228-6.7 4.7-10.7 8-12 10-1.3 2-2 5.7-2 11zm100-26v40h399900v-40z",leftharpoonplus:"M0 267c.7 5.3 3 10 7 14h399993v-40H93c3.3-3.3 10.2-9.5\n 20.5-18.5s17.8-15.8 22.5-20.5c50.7-52 88-110.3 112-175 4-11.3 5-18.3 3-21-1.3\n-4-7.3-6-18-6-8 0-13 .7-15 2s-4.7 6.7-8 16c-42 98.7-107.3 174.7-196 228-6.7 4.7\n-10.7 8-12 10-1.3 2-2 5.7-2 11zm100-26v40h399900v-40zM0 435v40h400000v-40z\nm0 0v40h400000v-40z",leftharpoondown:"M7 241c-4 4-6.333 8.667-7 14 0 5.333.667 9 2 11s5.333\n 5.333 12 10c90.667 54 156 130 196 228 3.333 10.667 6.333 16.333 9 17 2 .667 5\n 1 9 1h5c10.667 0 16.667-2 18-6 2-2.667 1-9.667-3-21-32-87.333-82.667-157.667\n-152-211l-3-3h399907v-40zM93 281 H400000 v-40L7 241z",leftharpoondownplus:"M7 435c-4 4-6.3 8.7-7 14 0 5.3.7 9 2 11s5.3 5.3 12\n 10c90.7 54 156 130 196 228 3.3 10.7 6.3 16.3 9 17 2 .7 5 1 9 1h5c10.7 0 16.7\n-2 18-6 2-2.7 1-9.7-3-21-32-87.3-82.7-157.7-152-211l-3-3h399907v-40H7zm93 0\nv40h399900v-40zM0 241v40h399900v-40zm0 0v40h399900v-40z",lefthook:"M400000 281 H103s-33-11.2-61-33.5S0 197.3 0 164s14.2-61.2 42.5\n-83.5C70.8 58.2 104 47 142 47 c16.7 0 25 6.7 25 20 0 12-8.7 18.7-26 20-40 3.3\n-68.7 15.7-86 37-10 12-15 25.3-15 40 0 22.7 9.8 40.7 29.5 54 19.7 13.3 43.5 21\n 71.5 23h399859zM103 281v-40h399897v40z",leftlinesegment:"M40 281 V428 H0 V94 H40 V241 H400000 v40z\nM40 281 V428 H0 V94 H40 V241 H400000 v40z",leftmapsto:"M40 281 V448H0V74H40V241H400000v40z\nM40 281 V448H0V74H40V241H400000v40z",leftToFrom:"M0 147h400000v40H0zm0 214c68 40 115.7 95.7 143 167h22c15.3 0 23\n-.3 23-1 0-1.3-5.3-13.7-16-37-18-35.3-41.3-69-70-101l-7-8h399905v-40H95l7-8\nc28.7-32 52-65.7 70-101 10.7-23.3 16-35.7 16-37 0-.7-7.7-1-23-1h-22C115.7 265.3\n 68 321 0 361zm0-174v-40h399900v40zm100 154v40h399900v-40z",longequal:"M0 50 h400000 v40H0z m0 194h40000v40H0z\nM0 50 h400000 v40H0z m0 194h40000v40H0z",midbrace:"M200428 334\nc-100.7-8.3-195.3-44-280-108-55.3-42-101.7-93-139-153l-9-14c-2.7 4-5.7 8.7-9 14\n-53.3 86.7-123.7 153-211 199-66.7 36-137.3 56.3-212 62H0V214h199568c178.3-11.7\n 311.7-78.3 403-201 6-8 9.7-12 11-12 .7-.7 6.7-1 18-1s17.3.3 18 1c1.3 0 5 4 11\n 12 44.7 59.3 101.3 106.3 170 141s145.3 54.3 229 60h199572v120z",midbraceunder:"M199572 214\nc100.7 8.3 195.3 44 280 108 55.3 42 101.7 93 139 153l9 14c2.7-4 5.7-8.7 9-14\n 53.3-86.7 123.7-153 211-199 66.7-36 137.3-56.3 212-62h199568v120H200432c-178.3\n 11.7-311.7 78.3-403 201-6 8-9.7 12-11 12-.7.7-6.7 1-18 1s-17.3-.3-18-1c-1.3 0\n-5-4-11-12-44.7-59.3-101.3-106.3-170-141s-145.3-54.3-229-60H0V214z",oiintSize1:"M512.6 71.6c272.6 0 320.3 106.8 320.3 178.2 0 70.8-47.7 177.6\n-320.3 177.6S193.1 320.6 193.1 249.8c0-71.4 46.9-178.2 319.5-178.2z\nm368.1 178.2c0-86.4-60.9-215.4-368.1-215.4-306.4 0-367.3 129-367.3 215.4 0 85.8\n60.9 214.8 367.3 214.8 307.2 0 368.1-129 368.1-214.8z",oiintSize2:"M757.8 100.1c384.7 0 451.1 137.6 451.1 230 0 91.3-66.4 228.8\n-451.1 228.8-386.3 0-452.7-137.5-452.7-228.8 0-92.4 66.4-230 452.7-230z\nm502.4 230c0-111.2-82.4-277.2-502.4-277.2s-504 166-504 277.2\nc0 110 84 276 504 276s502.4-166 502.4-276z",oiiintSize1:"M681.4 71.6c408.9 0 480.5 106.8 480.5 178.2 0 70.8-71.6 177.6\n-480.5 177.6S202.1 320.6 202.1 249.8c0-71.4 70.5-178.2 479.3-178.2z\nm525.8 178.2c0-86.4-86.8-215.4-525.7-215.4-437.9 0-524.7 129-524.7 215.4 0\n85.8 86.8 214.8 524.7 214.8 438.9 0 525.7-129 525.7-214.8z",oiiintSize2:"M1021.2 53c603.6 0 707.8 165.8 707.8 277.2 0 110-104.2 275.8\n-707.8 275.8-606 0-710.2-165.8-710.2-275.8C311 218.8 415.2 53 1021.2 53z\nm770.4 277.1c0-131.2-126.4-327.6-770.5-327.6S248.4 198.9 248.4 330.1\nc0 130 128.8 326.4 772.7 326.4s770.5-196.4 770.5-326.4z",rightarrow:"M0 241v40h399891c-47.3 35.3-84 78-110 128\n-16.7 32-27.7 63.7-33 95 0 1.3-.2 2.7-.5 4-.3 1.3-.5 2.3-.5 3 0 7.3 6.7 11 20\n 11 8 0 13.2-.8 15.5-2.5 2.3-1.7 4.2-5.5 5.5-11.5 2-13.3 5.7-27 11-41 14.7-44.7\n 39-84.5 73-119.5s73.7-60.2 119-75.5c6-2 9-5.7 9-11s-3-9-9-11c-45.3-15.3-85\n-40.5-119-75.5s-58.3-74.8-73-119.5c-4.7-14-8.3-27.3-11-40-1.3-6.7-3.2-10.8-5.5\n-12.5-2.3-1.7-7.5-2.5-15.5-2.5-14 0-21 3.7-21 11 0 2 2 10.3 6 25 20.7 83.3 67\n 151.7 139 205zm0 0v40h399900v-40z",rightbrace:"M400000 542l\n-6 6h-17c-12.7 0-19.3-.3-20-1-4-4-7.3-8.3-10-13-35.3-51.3-80.8-93.8-136.5-127.5\ns-117.2-55.8-184.5-66.5c-.7 0-2-.3-4-1-18.7-2.7-76-4.3-172-5H0V214h399571l6 1\nc124.7 8 235 61.7 331 161 31.3 33.3 59.7 72.7 85 118l7 13v35z",rightbraceunder:"M399994 0l6 6v35l-6 11c-56 104-135.3 181.3-238 232-57.3\n 28.7-117 45-179 50H-300V214h399897c43.3-7 81-15 113-26 100.7-33 179.7-91 237\n-174 2.7-5 6-9 10-13 .7-1 7.3-1 20-1h17z",rightgroup:"M0 80h399565c371 0 266.7 149.4 414 180 5.9 1.2 18 0 18 0 2 0\n 3-1 3-3v-38c-76-158-257-219-435-219H0z",rightgroupunder:"M0 262h399565c371 0 266.7-149.4 414-180 5.9-1.2 18 0 18\n 0 2 0 3 1 3 3v38c-76 158-257 219-435 219H0z",rightharpoon:"M0 241v40h399993c4.7-4.7 7-9.3 7-14 0-9.3\n-3.7-15.3-11-18-92.7-56.7-159-133.7-199-231-3.3-9.3-6-14.7-8-16-2-1.3-7-2-15-2\n-10.7 0-16.7 2-18 6-2 2.7-1 9.7 3 21 15.3 42 36.7 81.8 64 119.5 27.3 37.7 58\n 69.2 92 94.5zm0 0v40h399900v-40z",rightharpoonplus:"M0 241v40h399993c4.7-4.7 7-9.3 7-14 0-9.3-3.7-15.3-11\n-18-92.7-56.7-159-133.7-199-231-3.3-9.3-6-14.7-8-16-2-1.3-7-2-15-2-10.7 0-16.7\n 2-18 6-2 2.7-1 9.7 3 21 15.3 42 36.7 81.8 64 119.5 27.3 37.7 58 69.2 92 94.5z\nm0 0v40h399900v-40z m100 194v40h399900v-40zm0 0v40h399900v-40z",rightharpoondown:"M399747 511c0 7.3 6.7 11 20 11 8 0 13-.8 15-2.5s4.7-6.8\n 8-15.5c40-94 99.3-166.3 178-217 13.3-8 20.3-12.3 21-13 5.3-3.3 8.5-5.8 9.5\n-7.5 1-1.7 1.5-5.2 1.5-10.5s-2.3-10.3-7-15H0v40h399908c-34 25.3-64.7 57-92 95\n-27.3 38-48.7 77.7-64 119-3.3 8.7-5 14-5 16zM0 241v40h399900v-40z",rightharpoondownplus:"M399747 705c0 7.3 6.7 11 20 11 8 0 13-.8\n 15-2.5s4.7-6.8 8-15.5c40-94 99.3-166.3 178-217 13.3-8 20.3-12.3 21-13 5.3-3.3\n 8.5-5.8 9.5-7.5 1-1.7 1.5-5.2 1.5-10.5s-2.3-10.3-7-15H0v40h399908c-34 25.3\n-64.7 57-92 95-27.3 38-48.7 77.7-64 119-3.3 8.7-5 14-5 16zM0 435v40h399900v-40z\nm0-194v40h400000v-40zm0 0v40h400000v-40z",righthook:"M399859 241c-764 0 0 0 0 0 40-3.3 68.7-15.7 86-37 10-12 15-25.3\n 15-40 0-22.7-9.8-40.7-29.5-54-19.7-13.3-43.5-21-71.5-23-17.3-1.3-26-8-26-20 0\n-13.3 8.7-20 26-20 38 0 71 11.2 99 33.5 0 0 7 5.6 21 16.7 14 11.2 21 33.5 21\n 66.8s-14 61.2-42 83.5c-28 22.3-61 33.5-99 33.5L0 241z M0 281v-40h399859v40z",rightlinesegment:"M399960 241 V94 h40 V428 h-40 V281 H0 v-40z\nM399960 241 V94 h40 V428 h-40 V281 H0 v-40z",rightToFrom:"M400000 167c-70.7-42-118-97.7-142-167h-23c-15.3 0-23 .3-23\n 1 0 1.3 5.3 13.7 16 37 18 35.3 41.3 69 70 101l7 8H0v40h399905l-7 8c-28.7 32\n-52 65.7-70 101-10.7 23.3-16 35.7-16 37 0 .7 7.7 1 23 1h23c24-69.3 71.3-125 142\n-167z M100 147v40h399900v-40zM0 341v40h399900v-40z",twoheadleftarrow:"M0 167c68 40\n 115.7 95.7 143 167h22c15.3 0 23-.3 23-1 0-1.3-5.3-13.7-16-37-18-35.3-41.3-69\n-70-101l-7-8h125l9 7c50.7 39.3 85 86 103 140h46c0-4.7-6.3-18.7-19-42-18-35.3\n-40-67.3-66-96l-9-9h399716v-40H284l9-9c26-28.7 48-60.7 66-96 12.7-23.333 19\n-37.333 19-42h-46c-18 54-52.3 100.7-103 140l-9 7H95l7-8c28.7-32 52-65.7 70-101\n 10.7-23.333 16-35.7 16-37 0-.7-7.7-1-23-1h-22C115.7 71.3 68 127 0 167z",twoheadrightarrow:"M400000 167\nc-68-40-115.7-95.7-143-167h-22c-15.3 0-23 .3-23 1 0 1.3 5.3 13.7 16 37 18 35.3\n 41.3 69 70 101l7 8h-125l-9-7c-50.7-39.3-85-86-103-140h-46c0 4.7 6.3 18.7 19 42\n 18 35.3 40 67.3 66 96l9 9H0v40h399716l-9 9c-26 28.7-48 60.7-66 96-12.7 23.333\n-19 37.333-19 42h46c18-54 52.3-100.7 103-140l9-7h125l-7 8c-28.7 32-52 65.7-70\n 101-10.7 23.333-16 35.7-16 37 0 .7 7.7 1 23 1h22c27.3-71.3 75-127 143-167z",tilde1:"M200 55.538c-77 0-168 73.953-177 73.953-3 0-7\n-2.175-9-5.437L2 97c-1-2-2-4-2-6 0-4 2-7 5-9l20-12C116 12 171 0 207 0c86 0\n 114 68 191 68 78 0 168-68 177-68 4 0 7 2 9 5l12 19c1 2.175 2 4.35 2 6.525 0\n 4.35-2 7.613-5 9.788l-19 13.05c-92 63.077-116.937 75.308-183 76.128\n-68.267.847-113-73.952-191-73.952z",tilde2:"M344 55.266c-142 0-300.638 81.316-311.5 86.418\n-8.01 3.762-22.5 10.91-23.5 5.562L1 120c-1-2-1-3-1-4 0-5 3-9 8-10l18.4-9C160.9\n 31.9 283 0 358 0c148 0 188 122 331 122s314-97 326-97c4 0 8 2 10 7l7 21.114\nc1 2.14 1 3.21 1 4.28 0 5.347-3 9.626-7 10.696l-22.3 12.622C852.6 158.372 751\n 181.476 676 181.476c-149 0-189-126.21-332-126.21z",tilde3:"M786 59C457 59 32 175.242 13 175.242c-6 0-10-3.457\n-11-10.37L.15 138c-1-7 3-12 10-13l19.2-6.4C378.4 40.7 634.3 0 804.3 0c337 0\n 411.8 157 746.8 157 328 0 754-112 773-112 5 0 10 3 11 9l1 14.075c1 8.066-.697\n 16.595-6.697 17.492l-21.052 7.31c-367.9 98.146-609.15 122.696-778.15 122.696\n -338 0-409-156.573-744-156.573z",tilde4:"M786 58C457 58 32 177.487 13 177.487c-6 0-10-3.345\n-11-10.035L.15 143c-1-7 3-12 10-13l22-6.7C381.2 35 637.15 0 807.15 0c337 0 409\n 177 744 177 328 0 754-127 773-127 5 0 10 3 11 9l1 14.794c1 7.805-3 13.38-9\n 14.495l-20.7 5.574c-366.85 99.79-607.3 139.372-776.3 139.372-338 0-409\n -175.236-744-175.236z",vec:"M377 20c0-5.333 1.833-10 5.5-14S391 0 397 0c4.667 0 8.667 1.667 12 5\n3.333 2.667 6.667 9 10 19 6.667 24.667 20.333 43.667 41 57 7.333 4.667 11\n10.667 11 18 0 6-1 10-3 12s-6.667 5-14 9c-28.667 14.667-53.667 35.667-75 63\n-1.333 1.333-3.167 3.5-5.5 6.5s-4 4.833-5 5.5c-1 .667-2.5 1.333-4.5 2s-4.333 1\n-7 1c-4.667 0-9.167-1.833-13.5-5.5S337 184 337 178c0-12.667 15.667-32.333 47-59\nH213l-171-1c-8.667-6-13-12.333-13-19 0-4.667 4.333-11.333 13-20h359\nc-16-25.333-24-45-24-59z",widehat1:"M529 0h5l519 115c5 1 9 5 9 10 0 1-1 2-1 3l-4 22\nc-1 5-5 9-11 9h-2L532 67 19 159h-2c-5 0-9-4-11-9l-5-22c-1-6 2-12 8-13z",widehat2:"M1181 0h2l1171 176c6 0 10 5 10 11l-2 23c-1 6-5 10\n-11 10h-1L1182 67 15 220h-1c-6 0-10-4-11-10l-2-23c-1-6 4-11 10-11z",widehat3:"M1181 0h2l1171 236c6 0 10 5 10 11l-2 23c-1 6-5 10\n-11 10h-1L1182 67 15 280h-1c-6 0-10-4-11-10l-2-23c-1-6 4-11 10-11z",widehat4:"M1181 0h2l1171 296c6 0 10 5 10 11l-2 23c-1 6-5 10\n-11 10h-1L1182 67 15 340h-1c-6 0-10-4-11-10l-2-23c-1-6 4-11 10-11z",widecheck1:"M529,159h5l519,-115c5,-1,9,-5,9,-10c0,-1,-1,-2,-1,-3l-4,-22c-1,\n-5,-5,-9,-11,-9h-2l-512,92l-513,-92h-2c-5,0,-9,4,-11,9l-5,22c-1,6,2,12,8,13z",widecheck2:"M1181,220h2l1171,-176c6,0,10,-5,10,-11l-2,-23c-1,-6,-5,-10,\n-11,-10h-1l-1168,153l-1167,-153h-1c-6,0,-10,4,-11,10l-2,23c-1,6,4,11,10,11z",widecheck3:"M1181,280h2l1171,-236c6,0,10,-5,10,-11l-2,-23c-1,-6,-5,-10,\n-11,-10h-1l-1168,213l-1167,-213h-1c-6,0,-10,4,-11,10l-2,23c-1,6,4,11,10,11z",widecheck4:"M1181,340h2l1171,-296c6,0,10,-5,10,-11l-2,-23c-1,-6,-5,-10,\n-11,-10h-1l-1168,273l-1167,-273h-1c-6,0,-10,4,-11,10l-2,23c-1,6,4,11,10,11z",baraboveleftarrow:"M400000 620h-399890l3 -3c68.7 -52.7 113.7 -120 135 -202\nc4 -14.7 6 -23 6 -25c0 -7.3 -7 -11 -21 -11c-8 0 -13.2 0.8 -15.5 2.5\nc-2.3 1.7 -4.2 5.8 -5.5 12.5c-1.3 4.7 -2.7 10.3 -4 17c-12 48.7 -34.8 92 -68.5 130\ns-74.2 66.3 -121.5 85c-10 4 -16 7.7 -18 11c0 8.7 6 14.3 18 17c47.3 18.7 87.8 47\n121.5 85s56.5 81.3 68.5 130c0.7 2 1.3 5 2 9s1.2 6.7 1.5 8c0.3 1.3 1 3.3 2 6\ns2.2 4.5 3.5 5.5c1.3 1 3.3 1.8 6 2.5s6 1 10 1c14 0 21 -3.7 21 -11\nc0 -2 -2 -10.3 -6 -25c-20 -79.3 -65 -146.7 -135 -202l-3 -3h399890z\nM100 620v40h399900v-40z M0 241v40h399900v-40zM0 241v40h399900v-40z",rightarrowabovebar:"M0 241v40h399891c-47.3 35.3-84 78-110 128-16.7 32\n-27.7 63.7-33 95 0 1.3-.2 2.7-.5 4-.3 1.3-.5 2.3-.5 3 0 7.3 6.7 11 20 11 8 0\n13.2-.8 15.5-2.5 2.3-1.7 4.2-5.5 5.5-11.5 2-13.3 5.7-27 11-41 14.7-44.7 39\n-84.5 73-119.5s73.7-60.2 119-75.5c6-2 9-5.7 9-11s-3-9-9-11c-45.3-15.3-85-40.5\n-119-75.5s-58.3-74.8-73-119.5c-4.7-14-8.3-27.3-11-40-1.3-6.7-3.2-10.8-5.5\n-12.5-2.3-1.7-7.5-2.5-15.5-2.5-14 0-21 3.7-21 11 0 2 2 10.3 6 25 20.7 83.3 67\n151.7 139 205zm96 379h399894v40H0zm0 0h399904v40H0z",baraboveshortleftharpoon:"M507,435c-4,4,-6.3,8.7,-7,14c0,5.3,0.7,9,2,11\nc1.3,2,5.3,5.3,12,10c90.7,54,156,130,196,228c3.3,10.7,6.3,16.3,9,17\nc2,0.7,5,1,9,1c0,0,5,0,5,0c10.7,0,16.7,-2,18,-6c2,-2.7,1,-9.7,-3,-21\nc-32,-87.3,-82.7,-157.7,-152,-211c0,0,-3,-3,-3,-3l399351,0l0,-40\nc-398570,0,-399437,0,-399437,0z M593 435 v40 H399500 v-40z\nM0 281 v-40 H399908 v40z M0 281 v-40 H399908 v40z",rightharpoonaboveshortbar:"M0,241 l0,40c399126,0,399993,0,399993,0\nc4.7,-4.7,7,-9.3,7,-14c0,-9.3,-3.7,-15.3,-11,-18c-92.7,-56.7,-159,-133.7,-199,\n-231c-3.3,-9.3,-6,-14.7,-8,-16c-2,-1.3,-7,-2,-15,-2c-10.7,0,-16.7,2,-18,6\nc-2,2.7,-1,9.7,3,21c15.3,42,36.7,81.8,64,119.5c27.3,37.7,58,69.2,92,94.5z\nM0 241 v40 H399908 v-40z M0 475 v-40 H399500 v40z M0 475 v-40 H399500 v40z",shortbaraboveleftharpoon:"M7,435c-4,4,-6.3,8.7,-7,14c0,5.3,0.7,9,2,11\nc1.3,2,5.3,5.3,12,10c90.7,54,156,130,196,228c3.3,10.7,6.3,16.3,9,17c2,0.7,5,1,9,\n1c0,0,5,0,5,0c10.7,0,16.7,-2,18,-6c2,-2.7,1,-9.7,-3,-21c-32,-87.3,-82.7,-157.7,\n-152,-211c0,0,-3,-3,-3,-3l399907,0l0,-40c-399126,0,-399993,0,-399993,0z\nM93 435 v40 H400000 v-40z M500 241 v40 H400000 v-40z M500 241 v40 H400000 v-40z",shortrightharpoonabovebar:"M53,241l0,40c398570,0,399437,0,399437,0\nc4.7,-4.7,7,-9.3,7,-14c0,-9.3,-3.7,-15.3,-11,-18c-92.7,-56.7,-159,-133.7,-199,\n-231c-3.3,-9.3,-6,-14.7,-8,-16c-2,-1.3,-7,-2,-15,-2c-10.7,0,-16.7,2,-18,6\nc-2,2.7,-1,9.7,3,21c15.3,42,36.7,81.8,64,119.5c27.3,37.7,58,69.2,92,94.5z\nM500 241 v40 H399408 v-40z M500 435 v40 H400000 v-40z"},A=function(){function e(e){this.children=void 0,this.classes=void 0,this.height=void 0,this.depth=void 0,this.maxFontSize=void 0,this.style=void 0,this.children=e,this.classes=[],this.height=0,this.depth=0,this.maxFontSize=0,this.style={}}var t=e.prototype;return t.hasClass=function(e){return l.contains(this.classes,e)},t.toNode=function(){for(var e=document.createDocumentFragment(),t=0;t=5?0:e>=3?1:2]){var r=N[t]={cssEmPerMu:B.quad[t]/18};for(var n in B)B.hasOwnProperty(n)&&(r[n]=B[n][t])}return N[t]}(this.size)),this._fontMetrics},t.getColor=function(){return this.phantom?"transparent":this.color},e}();H.BASESIZE=6;var E=H,L={pt:1,mm:7227/2540,cm:7227/254,in:72.27,bp:1.00375,pc:12,dd:1238/1157,cc:14856/1157,nd:685/642,nc:1370/107,sp:1/65536,px:1.00375},D={ex:!0,em:!0,mu:!0},P=function(e){return"string"!=typeof e&&(e=e.unit),e in L||e in D||"ex"===e},F=function(e,t){var r;if(e.unit in L)r=L[e.unit]/t.fontMetrics().ptPerEm/t.sizeMultiplier;else if("mu"===e.unit)r=t.fontMetrics().cssEmPerMu;else{var a;if(a=t.style.isTight()?t.havingStyle(t.style.text()):t,"ex"===e.unit)r=a.fontMetrics().xHeight;else{if("em"!==e.unit)throw new n("Invalid unit: '"+e.unit+"'");r=a.fontMetrics().quad}a!==t&&(r*=a.sizeMultiplier/t.sizeMultiplier)}return Math.min(e.number*r,t.maxSize)},V=function(e){return+e.toFixed(4)+"em"},G=function(e){return e.filter((function(e){return e})).join(" ")},U=function(e,t,r){if(this.classes=e||[],this.attributes={},this.height=0,this.depth=0,this.maxFontSize=0,this.style=r||{},t){t.style.isTight()&&this.classes.push("mtight");var n=t.getColor();n&&(this.style.color=n)}},Y=function(e){var t=document.createElement(e);for(var r in t.className=G(this.classes),this.style)this.style.hasOwnProperty(r)&&(t.style[r]=this.style[r]);for(var n in this.attributes)this.attributes.hasOwnProperty(n)&&t.setAttribute(n,this.attributes[n]);for(var a=0;a"},W=function(){function e(e,t,r,n){this.children=void 0,this.attributes=void 0,this.classes=void 0,this.height=void 0,this.depth=void 0,this.width=void 0,this.maxFontSize=void 0,this.style=void 0,U.call(this,e,r,n),this.children=t||[]}var t=e.prototype;return t.setAttribute=function(e,t){this.attributes[e]=t},t.hasClass=function(e){return l.contains(this.classes,e)},t.toNode=function(){return Y.call(this,"span")},t.toMarkup=function(){return X.call(this,"span")},e}(),_=function(){function e(e,t,r,n){this.children=void 0,this.attributes=void 0,this.classes=void 0,this.height=void 0,this.depth=void 0,this.maxFontSize=void 0,this.style=void 0,U.call(this,t,n),this.children=r||[],this.setAttribute("href",e)}var t=e.prototype;return t.setAttribute=function(e,t){this.attributes[e]=t},t.hasClass=function(e){return l.contains(this.classes,e)},t.toNode=function(){return Y.call(this,"a")},t.toMarkup=function(){return X.call(this,"a")},e}(),j=function(){function e(e,t,r){this.src=void 0,this.alt=void 0,this.classes=void 0,this.height=void 0,this.depth=void 0,this.maxFontSize=void 0,this.style=void 0,this.alt=t,this.src=e,this.classes=["mord"],this.style=r}var t=e.prototype;return t.hasClass=function(e){return l.contains(this.classes,e)},t.toNode=function(){var e=document.createElement("img");for(var t in e.src=this.src,e.alt=this.alt,e.className="mord",this.style)this.style.hasOwnProperty(t)&&(e.style[t]=this.style[t]);return e},t.toMarkup=function(){var e=""+this.alt+"=a[0]&&e<=a[1])return r.name}return null}(this.text.charCodeAt(0));l&&this.classes.push(l+"_fallback"),/[\xee\xef\xed\xec]/.test(this.text)&&(this.text=$[this.text])}var t=e.prototype;return t.hasClass=function(e){return l.contains(this.classes,e)},t.toNode=function(){var e=document.createTextNode(this.text),t=null;for(var r in this.italic>0&&((t=document.createElement("span")).style.marginRight=V(this.italic)),this.classes.length>0&&((t=t||document.createElement("span")).className=G(this.classes)),this.style)this.style.hasOwnProperty(r)&&((t=t||document.createElement("span")).style[r]=this.style[r]);return t?(t.appendChild(e),t):e},t.toMarkup=function(){var e=!1,t="0&&(r+="margin-right:"+this.italic+"em;"),this.style)this.style.hasOwnProperty(n)&&(r+=l.hyphenate(n)+":"+this.style[n]+";");r&&(e=!0,t+=' style="'+l.escape(r)+'"');var a=l.escape(this.text);return e?(t+=">",t+=a,t+=""):a},e}(),K=function(){function e(e,t){this.children=void 0,this.attributes=void 0,this.children=e||[],this.attributes=t||{}}var t=e.prototype;return t.toNode=function(){var e=document.createElementNS("http://www.w3.org/2000/svg","svg");for(var t in this.attributes)Object.prototype.hasOwnProperty.call(this.attributes,t)&&e.setAttribute(t,this.attributes[t]);for(var r=0;r":""},e}(),Q=function(){function e(e){this.attributes=void 0,this.attributes=e||{}}var t=e.prototype;return t.toNode=function(){var e=document.createElementNS("http://www.w3.org/2000/svg","line");for(var t in this.attributes)Object.prototype.hasOwnProperty.call(this.attributes,t)&&e.setAttribute(t,this.attributes[t]);return e},t.toMarkup=function(){var e="","\\gt",!0),ie(oe,le,be,"\u2208","\\in",!0),ie(oe,le,be,"\ue020","\\@not"),ie(oe,le,be,"\u2282","\\subset",!0),ie(oe,le,be,"\u2283","\\supset",!0),ie(oe,le,be,"\u2286","\\subseteq",!0),ie(oe,le,be,"\u2287","\\supseteq",!0),ie(oe,he,be,"\u2288","\\nsubseteq",!0),ie(oe,he,be,"\u2289","\\nsupseteq",!0),ie(oe,le,be,"\u22a8","\\models"),ie(oe,le,be,"\u2190","\\leftarrow",!0),ie(oe,le,be,"\u2264","\\le"),ie(oe,le,be,"\u2264","\\leq",!0),ie(oe,le,be,"<","\\lt",!0),ie(oe,le,be,"\u2192","\\rightarrow",!0),ie(oe,le,be,"\u2192","\\to"),ie(oe,he,be,"\u2271","\\ngeq",!0),ie(oe,he,be,"\u2270","\\nleq",!0),ie(oe,le,ye,"\xa0","\\ "),ie(oe,le,ye,"\xa0","\\space"),ie(oe,le,ye,"\xa0","\\nobreakspace"),ie(se,le,ye,"\xa0","\\ "),ie(se,le,ye,"\xa0"," "),ie(se,le,ye,"\xa0","\\space"),ie(se,le,ye,"\xa0","\\nobreakspace"),ie(oe,le,ye,null,"\\nobreak"),ie(oe,le,ye,null,"\\allowbreak"),ie(oe,le,ve,",",","),ie(oe,le,ve,";",";"),ie(oe,he,ce,"\u22bc","\\barwedge",!0),ie(oe,he,ce,"\u22bb","\\veebar",!0),ie(oe,le,ce,"\u2299","\\odot",!0),ie(oe,le,ce,"\u2295","\\oplus",!0),ie(oe,le,ce,"\u2297","\\otimes",!0),ie(oe,le,xe,"\u2202","\\partial",!0),ie(oe,le,ce,"\u2298","\\oslash",!0),ie(oe,he,ce,"\u229a","\\circledcirc",!0),ie(oe,he,ce,"\u22a1","\\boxdot",!0),ie(oe,le,ce,"\u25b3","\\bigtriangleup"),ie(oe,le,ce,"\u25bd","\\bigtriangledown"),ie(oe,le,ce,"\u2020","\\dagger"),ie(oe,le,ce,"\u22c4","\\diamond"),ie(oe,le,ce,"\u22c6","\\star"),ie(oe,le,ce,"\u25c3","\\triangleleft"),ie(oe,le,ce,"\u25b9","\\triangleright"),ie(oe,le,ge,"{","\\{"),ie(se,le,xe,"{","\\{"),ie(se,le,xe,"{","\\textbraceleft"),ie(oe,le,ue,"}","\\}"),ie(se,le,xe,"}","\\}"),ie(se,le,xe,"}","\\textbraceright"),ie(oe,le,ge,"{","\\lbrace"),ie(oe,le,ue,"}","\\rbrace"),ie(oe,le,ge,"[","\\lbrack",!0),ie(se,le,xe,"[","\\lbrack",!0),ie(oe,le,ue,"]","\\rbrack",!0),ie(se,le,xe,"]","\\rbrack",!0),ie(oe,le,ge,"(","\\lparen",!0),ie(oe,le,ue,")","\\rparen",!0),ie(se,le,xe,"<","\\textless",!0),ie(se,le,xe,">","\\textgreater",!0),ie(oe,le,ge,"\u230a","\\lfloor",!0),ie(oe,le,ue,"\u230b","\\rfloor",!0),ie(oe,le,ge,"\u2308","\\lceil",!0),ie(oe,le,ue,"\u2309","\\rceil",!0),ie(oe,le,xe,"\\","\\backslash"),ie(oe,le,xe,"\u2223","|"),ie(oe,le,xe,"\u2223","\\vert"),ie(se,le,xe,"|","\\textbar",!0),ie(oe,le,xe,"\u2225","\\|"),ie(oe,le,xe,"\u2225","\\Vert"),ie(se,le,xe,"\u2225","\\textbardbl"),ie(se,le,xe,"~","\\textasciitilde"),ie(se,le,xe,"\\","\\textbackslash"),ie(se,le,xe,"^","\\textasciicircum"),ie(oe,le,be,"\u2191","\\uparrow",!0),ie(oe,le,be,"\u21d1","\\Uparrow",!0),ie(oe,le,be,"\u2193","\\downarrow",!0),ie(oe,le,be,"\u21d3","\\Downarrow",!0),ie(oe,le,be,"\u2195","\\updownarrow",!0),ie(oe,le,be,"\u21d5","\\Updownarrow",!0),ie(oe,le,fe,"\u2210","\\coprod"),ie(oe,le,fe,"\u22c1","\\bigvee"),ie(oe,le,fe,"\u22c0","\\bigwedge"),ie(oe,le,fe,"\u2a04","\\biguplus"),ie(oe,le,fe,"\u22c2","\\bigcap"),ie(oe,le,fe,"\u22c3","\\bigcup"),ie(oe,le,fe,"\u222b","\\int"),ie(oe,le,fe,"\u222b","\\intop"),ie(oe,le,fe,"\u222c","\\iint"),ie(oe,le,fe,"\u222d","\\iiint"),ie(oe,le,fe,"\u220f","\\prod"),ie(oe,le,fe,"\u2211","\\sum"),ie(oe,le,fe,"\u2a02","\\bigotimes"),ie(oe,le,fe,"\u2a01","\\bigoplus"),ie(oe,le,fe,"\u2a00","\\bigodot"),ie(oe,le,fe,"\u222e","\\oint"),ie(oe,le,fe,"\u222f","\\oiint"),ie(oe,le,fe,"\u2230","\\oiiint"),ie(oe,le,fe,"\u2a06","\\bigsqcup"),ie(oe,le,fe,"\u222b","\\smallint"),ie(se,le,pe,"\u2026","\\textellipsis"),ie(oe,le,pe,"\u2026","\\mathellipsis"),ie(se,le,pe,"\u2026","\\ldots",!0),ie(oe,le,pe,"\u2026","\\ldots",!0),ie(oe,le,pe,"\u22ef","\\@cdots",!0),ie(oe,le,pe,"\u22f1","\\ddots",!0),ie(oe,le,xe,"\u22ee","\\varvdots"),ie(oe,le,me,"\u02ca","\\acute"),ie(oe,le,me,"\u02cb","\\grave"),ie(oe,le,me,"\xa8","\\ddot"),ie(oe,le,me,"~","\\tilde"),ie(oe,le,me,"\u02c9","\\bar"),ie(oe,le,me,"\u02d8","\\breve"),ie(oe,le,me,"\u02c7","\\check"),ie(oe,le,me,"^","\\hat"),ie(oe,le,me,"\u20d7","\\vec"),ie(oe,le,me,"\u02d9","\\dot"),ie(oe,le,me,"\u02da","\\mathring"),ie(oe,le,de,"\ue131","\\@imath"),ie(oe,le,de,"\ue237","\\@jmath"),ie(oe,le,xe,"\u0131","\u0131"),ie(oe,le,xe,"\u0237","\u0237"),ie(se,le,xe,"\u0131","\\i",!0),ie(se,le,xe,"\u0237","\\j",!0),ie(se,le,xe,"\xdf","\\ss",!0),ie(se,le,xe,"\xe6","\\ae",!0),ie(se,le,xe,"\u0153","\\oe",!0),ie(se,le,xe,"\xf8","\\o",!0),ie(se,le,xe,"\xc6","\\AE",!0),ie(se,le,xe,"\u0152","\\OE",!0),ie(se,le,xe,"\xd8","\\O",!0),ie(se,le,me,"\u02ca","\\'"),ie(se,le,me,"\u02cb","\\`"),ie(se,le,me,"\u02c6","\\^"),ie(se,le,me,"\u02dc","\\~"),ie(se,le,me,"\u02c9","\\="),ie(se,le,me,"\u02d8","\\u"),ie(se,le,me,"\u02d9","\\."),ie(se,le,me,"\xb8","\\c"),ie(se,le,me,"\u02da","\\r"),ie(se,le,me,"\u02c7","\\v"),ie(se,le,me,"\xa8",'\\"'),ie(se,le,me,"\u02dd","\\H"),ie(se,le,me,"\u25ef","\\textcircled");var we={"--":!0,"---":!0,"``":!0,"''":!0};ie(se,le,xe,"\u2013","--",!0),ie(se,le,xe,"\u2013","\\textendash"),ie(se,le,xe,"\u2014","---",!0),ie(se,le,xe,"\u2014","\\textemdash"),ie(se,le,xe,"\u2018","`",!0),ie(se,le,xe,"\u2018","\\textquoteleft"),ie(se,le,xe,"\u2019","'",!0),ie(se,le,xe,"\u2019","\\textquoteright"),ie(se,le,xe,"\u201c","``",!0),ie(se,le,xe,"\u201c","\\textquotedblleft"),ie(se,le,xe,"\u201d","''",!0),ie(se,le,xe,"\u201d","\\textquotedblright"),ie(oe,le,xe,"\xb0","\\degree",!0),ie(se,le,xe,"\xb0","\\degree"),ie(se,le,xe,"\xb0","\\textdegree",!0),ie(oe,le,xe,"\xa3","\\pounds"),ie(oe,le,xe,"\xa3","\\mathsterling",!0),ie(se,le,xe,"\xa3","\\pounds"),ie(se,le,xe,"\xa3","\\textsterling",!0),ie(oe,he,xe,"\u2720","\\maltese"),ie(se,he,xe,"\u2720","\\maltese");for(var ke='0123456789/@."',Se=0;Set&&(t=i.height),i.depth>r&&(r=i.depth),i.maxFontSize>n&&(n=i.maxFontSize)}e.height=t,e.depth=r,e.maxFontSize=n},Xe=function(e,t,r,n){var a=new W(e,t,r,n);return Ye(a),a},We=function(e,t,r,n){return new W(e,t,r,n)},_e=function(e){var t=new A(e);return Ye(t),t},je=function(e,t,r){var n="";switch(e){case"amsrm":n="AMS";break;case"textrm":n="Main";break;case"textsf":n="SansSerif";break;case"texttt":n="Typewriter";break;default:n=e}return n+"-"+("textbf"===t&&"textit"===r?"BoldItalic":"textbf"===t?"Bold":"textit"===t?"Italic":"Regular")},$e={mathbf:{variant:"bold",fontName:"Main-Bold"},mathrm:{variant:"normal",fontName:"Main-Regular"},textit:{variant:"italic",fontName:"Main-Italic"},mathit:{variant:"italic",fontName:"Main-Italic"},mathnormal:{variant:"italic",fontName:"Math-Italic"},mathbb:{variant:"double-struck",fontName:"AMS-Regular"},mathcal:{variant:"script",fontName:"Caligraphic-Regular"},mathfrak:{variant:"fraktur",fontName:"Fraktur-Regular"},mathscr:{variant:"script",fontName:"Script-Regular"},mathsf:{variant:"sans-serif",fontName:"SansSerif-Regular"},mathtt:{variant:"monospace",fontName:"Typewriter-Regular"}},Ze={vec:["vec",.471,.714],oiintSize1:["oiintSize1",.957,.499],oiintSize2:["oiintSize2",1.472,.659],oiiintSize1:["oiiintSize1",1.304,.499],oiiintSize2:["oiiintSize2",1.98,.659]},Ke={fontMap:$e,makeSymbol:Ge,mathsym:function(e,t,r,n){return void 0===n&&(n=[]),"boldsymbol"===r.font&&Ve(e,"Main-Bold",t).metrics?Ge(e,"Main-Bold",t,r,n.concat(["mathbf"])):"\\"===e||"main"===ae[t][e].font?Ge(e,"Main-Regular",t,r,n):Ge(e,"AMS-Regular",t,r,n.concat(["amsrm"]))},makeSpan:Xe,makeSvgSpan:We,makeLineSpan:function(e,t,r){var n=Xe([e],[],t);return n.height=Math.max(r||t.fontMetrics().defaultRuleThickness,t.minRuleThickness),n.style.borderBottomWidth=V(n.height),n.maxFontSize=1,n},makeAnchor:function(e,t,r,n){var a=new _(e,t,r,n);return Ye(a),a},makeFragment:_e,wrapFragment:function(e,t){return e instanceof A?Xe([],[e],t):e},makeVList:function(e,t){for(var r=function(e){if("individualShift"===e.positionType){for(var t=e.children,r=[t[0]],n=-t[0].shift-t[0].elem.depth,a=n,i=1;i0&&(o.push(kt(s,t)),s=[]),o.push(a[l]));s.length>0&&o.push(kt(s,t)),r?((i=kt(ft(r,t,!0))).classes=["tag"],o.push(i)):n&&o.push(n);var m=mt(["katex-html"],o);if(m.setAttribute("aria-hidden","true"),i){var c=i.children[0];c.style.height=V(m.height+m.depth),m.depth&&(c.style.verticalAlign=V(-m.depth))}return m}function Mt(e){return new A(e)}var zt=function(){function e(e,t,r){this.type=void 0,this.attributes=void 0,this.children=void 0,this.classes=void 0,this.type=e,this.attributes={},this.children=t||[],this.classes=r||[]}var t=e.prototype;return t.setAttribute=function(e,t){this.attributes[e]=t},t.getAttribute=function(e){return this.attributes[e]},t.toNode=function(){var e=document.createElementNS("http://www.w3.org/1998/Math/MathML",this.type);for(var t in this.attributes)Object.prototype.hasOwnProperty.call(this.attributes,t)&&e.setAttribute(t,this.attributes[t]);this.classes.length>0&&(e.className=G(this.classes));for(var r=0;r0&&(e+=' class ="'+l.escape(G(this.classes))+'"'),e+=">";for(var r=0;r"},t.toText=function(){return this.children.map((function(e){return e.toText()})).join("")},e}(),At=function(){function e(e){this.text=void 0,this.text=e}var t=e.prototype;return t.toNode=function(){return document.createTextNode(this.text)},t.toMarkup=function(){return l.escape(this.toText())},t.toText=function(){return this.text},e}(),Tt={MathNode:zt,TextNode:At,SpaceNode:function(){function e(e){this.width=void 0,this.character=void 0,this.width=e,this.character=e>=.05555&&e<=.05556?"\u200a":e>=.1666&&e<=.1667?"\u2009":e>=.2222&&e<=.2223?"\u2005":e>=.2777&&e<=.2778?"\u2005\u200a":e>=-.05556&&e<=-.05555?"\u200a\u2063":e>=-.1667&&e<=-.1666?"\u2009\u2063":e>=-.2223&&e<=-.2222?"\u205f\u2063":e>=-.2778&&e<=-.2777?"\u2005\u2063":null}var t=e.prototype;return t.toNode=function(){if(this.character)return document.createTextNode(this.character);var e=document.createElementNS("http://www.w3.org/1998/Math/MathML","mspace");return e.setAttribute("width",V(this.width)),e},t.toMarkup=function(){return this.character?""+this.character+"":''},t.toText=function(){return this.character?this.character:" "},e}(),newDocumentFragment:Mt},Bt=function(e,t,r){return!ae[t][e]||!ae[t][e].replace||55349===e.charCodeAt(0)||we.hasOwnProperty(e)&&r&&(r.fontFamily&&"tt"===r.fontFamily.substr(4,2)||r.font&&"tt"===r.font.substr(4,2))||(e=ae[t][e].replace),new Tt.TextNode(e)},Ct=function(e){return 1===e.length?e[0]:new Tt.MathNode("mrow",e)},qt=function(e,t){if("texttt"===t.fontFamily)return"monospace";if("textsf"===t.fontFamily)return"textit"===t.fontShape&&"textbf"===t.fontWeight?"sans-serif-bold-italic":"textit"===t.fontShape?"sans-serif-italic":"textbf"===t.fontWeight?"bold-sans-serif":"sans-serif";if("textit"===t.fontShape&&"textbf"===t.fontWeight)return"bold-italic";if("textit"===t.fontShape)return"italic";if("textbf"===t.fontWeight)return"bold";var r=t.font;if(!r||"mathnormal"===r)return null;var n=e.mode;if("mathit"===r)return"italic";if("boldsymbol"===r)return"textord"===e.type?"bold":"bold-italic";if("mathbf"===r)return"bold";if("mathbb"===r)return"double-struck";if("mathfrak"===r)return"fraktur";if("mathscr"===r||"mathcal"===r)return"script";if("mathsf"===r)return"sans-serif";if("mathtt"===r)return"monospace";var a=e.text;return l.contains(["\\imath","\\jmath"],a)?null:(ae[n][a]&&ae[n][a].replace&&(a=ae[n][a].replace),q(a,Ke.fontMap[r].fontName,n)?Ke.fontMap[r].variant:null)},Nt=function(e,t,r){if(1===e.length){var n=Rt(e[0],t);return r&&n instanceof zt&&"mo"===n.type&&(n.setAttribute("lspace","0em"),n.setAttribute("rspace","0em")),[n]}for(var a,i=[],o=0;o0&&(p.text=p.text.slice(0,1)+"\u0338"+p.text.slice(1),i.pop())}}}i.push(s),a=s}return i},It=function(e,t,r){return Ct(Nt(e,t,r))},Rt=function(e,t){if(!e)return new Tt.MathNode("mrow");if(it[e.type])return it[e.type](e,t);throw new n("Got group of unknown type: '"+e.type+"'")};function Ot(e,t,r,n,a){var i,o=Nt(e,r);i=1===o.length&&o[0]instanceof zt&&l.contains(["mrow","mtable"],o[0].type)?o[0]:new Tt.MathNode("mrow",o);var s=new Tt.MathNode("annotation",[new Tt.TextNode(t)]);s.setAttribute("encoding","application/x-tex");var h=new Tt.MathNode("semantics",[i,s]),m=new Tt.MathNode("math",[h]);m.setAttribute("xmlns","http://www.w3.org/1998/Math/MathML"),n&&m.setAttribute("display","block");var c=a?"katex":"katex-mathml";return Ke.makeSpan([c],[m])}var Ht=function(e){return new E({style:e.displayMode?x.DISPLAY:x.TEXT,maxSize:e.maxSize,minRuleThickness:e.minRuleThickness})},Et=function(e,t){if(t.displayMode){var r=["katex-display"];t.leqno&&r.push("leqno"),t.fleqn&&r.push("fleqn"),e=Ke.makeSpan(r,[e])}return e},Lt=function(e,t,r){var n,a=Ht(r);if("mathml"===r.output)return Ot(e,t,a,r.displayMode,!0);if("html"===r.output){var i=St(e,a);n=Ke.makeSpan(["katex"],[i])}else{var o=Ot(e,t,a,r.displayMode,!1),s=St(e,a);n=Ke.makeSpan(["katex"],[o,s])}return Et(n,r)},Dt={widehat:"^",widecheck:"\u02c7",widetilde:"~",utilde:"~",overleftarrow:"\u2190",underleftarrow:"\u2190",xleftarrow:"\u2190",overrightarrow:"\u2192",underrightarrow:"\u2192",xrightarrow:"\u2192",underbrace:"\u23df",overbrace:"\u23de",overgroup:"\u23e0",undergroup:"\u23e1",overleftrightarrow:"\u2194",underleftrightarrow:"\u2194",xleftrightarrow:"\u2194",Overrightarrow:"\u21d2",xRightarrow:"\u21d2",overleftharpoon:"\u21bc",xleftharpoonup:"\u21bc",overrightharpoon:"\u21c0",xrightharpoonup:"\u21c0",xLeftarrow:"\u21d0",xLeftrightarrow:"\u21d4",xhookleftarrow:"\u21a9",xhookrightarrow:"\u21aa",xmapsto:"\u21a6",xrightharpoondown:"\u21c1",xleftharpoondown:"\u21bd",xrightleftharpoons:"\u21cc",xleftrightharpoons:"\u21cb",xtwoheadleftarrow:"\u219e",xtwoheadrightarrow:"\u21a0",xlongequal:"=",xtofrom:"\u21c4",xrightleftarrows:"\u21c4",xrightequilibrium:"\u21cc",xleftequilibrium:"\u21cb","\\cdrightarrow":"\u2192","\\cdleftarrow":"\u2190","\\cdlongequal":"="},Pt={overrightarrow:[["rightarrow"],.888,522,"xMaxYMin"],overleftarrow:[["leftarrow"],.888,522,"xMinYMin"],underrightarrow:[["rightarrow"],.888,522,"xMaxYMin"],underleftarrow:[["leftarrow"],.888,522,"xMinYMin"],xrightarrow:[["rightarrow"],1.469,522,"xMaxYMin"],"\\cdrightarrow":[["rightarrow"],3,522,"xMaxYMin"],xleftarrow:[["leftarrow"],1.469,522,"xMinYMin"],"\\cdleftarrow":[["leftarrow"],3,522,"xMinYMin"],Overrightarrow:[["doublerightarrow"],.888,560,"xMaxYMin"],xRightarrow:[["doublerightarrow"],1.526,560,"xMaxYMin"],xLeftarrow:[["doubleleftarrow"],1.526,560,"xMinYMin"],overleftharpoon:[["leftharpoon"],.888,522,"xMinYMin"],xleftharpoonup:[["leftharpoon"],.888,522,"xMinYMin"],xleftharpoondown:[["leftharpoondown"],.888,522,"xMinYMin"],overrightharpoon:[["rightharpoon"],.888,522,"xMaxYMin"],xrightharpoonup:[["rightharpoon"],.888,522,"xMaxYMin"],xrightharpoondown:[["rightharpoondown"],.888,522,"xMaxYMin"],xlongequal:[["longequal"],.888,334,"xMinYMin"],"\\cdlongequal":[["longequal"],3,334,"xMinYMin"],xtwoheadleftarrow:[["twoheadleftarrow"],.888,334,"xMinYMin"],xtwoheadrightarrow:[["twoheadrightarrow"],.888,334,"xMaxYMin"],overleftrightarrow:[["leftarrow","rightarrow"],.888,522],overbrace:[["leftbrace","midbrace","rightbrace"],1.6,548],underbrace:[["leftbraceunder","midbraceunder","rightbraceunder"],1.6,548],underleftrightarrow:[["leftarrow","rightarrow"],.888,522],xleftrightarrow:[["leftarrow","rightarrow"],1.75,522],xLeftrightarrow:[["doubleleftarrow","doublerightarrow"],1.75,560],xrightleftharpoons:[["leftharpoondownplus","rightharpoonplus"],1.75,716],xleftrightharpoons:[["leftharpoonplus","rightharpoondownplus"],1.75,716],xhookleftarrow:[["leftarrow","righthook"],1.08,522],xhookrightarrow:[["lefthook","rightarrow"],1.08,522],overlinesegment:[["leftlinesegment","rightlinesegment"],.888,522],underlinesegment:[["leftlinesegment","rightlinesegment"],.888,522],overgroup:[["leftgroup","rightgroup"],.888,342],undergroup:[["leftgroupunder","rightgroupunder"],.888,342],xmapsto:[["leftmapsto","rightarrow"],1.5,522],xtofrom:[["leftToFrom","rightToFrom"],1.75,528],xrightleftarrows:[["baraboveleftarrow","rightarrowabovebar"],1.75,901],xrightequilibrium:[["baraboveshortleftharpoon","rightharpoonaboveshortbar"],1.75,716],xleftequilibrium:[["shortbaraboveleftharpoon","shortrightharpoonabovebar"],1.75,716]},Ft=function(e,t,r,n,a){var i,o=e.height+e.depth+r+n;if(/fbox|color|angl/.test(t)){if(i=Ke.makeSpan(["stretchy",t],[],a),"fbox"===t){var s=a.color&&a.getColor();s&&(i.style.borderColor=s)}}else{var l=[];/^[bx]cancel$/.test(t)&&l.push(new Q({x1:"0",y1:"0",x2:"100%",y2:"100%","stroke-width":"0.046em"})),/^x?cancel$/.test(t)&&l.push(new Q({x1:"0",y1:"100%",x2:"100%",y2:"0","stroke-width":"0.046em"}));var h=new K(l,{width:"100%",height:V(o)});i=Ke.makeSvgSpan([],[h],a)}return i.height=o,i.style.height=V(o),i},Vt=function(e){var t=new Tt.MathNode("mo",[new Tt.TextNode(Dt[e.replace(/^\\/,"")])]);return t.setAttribute("stretchy","true"),t},Gt=function(e,t){var r=function(){var r=4e5,n=e.label.substr(1);if(l.contains(["widehat","widecheck","widetilde","utilde"],n)){var a,i,o,s="ordgroup"===(d=e.base).type?d.body.length:1;if(s>5)"widehat"===n||"widecheck"===n?(a=420,r=2364,o=.42,i=n+"4"):(a=312,r=2340,o=.34,i="tilde4");else{var h=[1,1,2,2,3,3][s];"widehat"===n||"widecheck"===n?(r=[0,1062,2364,2364,2364][h],a=[0,239,300,360,420][h],o=[0,.24,.3,.3,.36,.42][h],i=n+h):(r=[0,600,1033,2339,2340][h],a=[0,260,286,306,312][h],o=[0,.26,.286,.3,.306,.34][h],i="tilde"+h)}var m=new J(i),c=new K([m],{width:"100%",height:V(o),viewBox:"0 0 "+r+" "+a,preserveAspectRatio:"none"});return{span:Ke.makeSvgSpan([],[c],t),minWidth:0,height:o}}var u,p,d,f=[],g=Pt[n],v=g[0],b=g[1],y=g[2],x=y/1e3,w=v.length;if(1===w)u=["hide-tail"],p=[g[3]];else if(2===w)u=["halfarrow-left","halfarrow-right"],p=["xMinYMin","xMaxYMin"];else{if(3!==w)throw new Error("Correct katexImagesData or update code here to support\n "+w+" children.");u=["brace-left","brace-center","brace-right"],p=["xMinYMin","xMidYMin","xMaxYMin"]}for(var k=0;k0&&(n.style.minWidth=V(a)),n};function Ut(e,t){if(!e||e.type!==t)throw new Error("Expected node of type "+t+", but got "+(e?"node of type "+e.type:String(e)));return e}function Yt(e){var t=Xt(e);if(!t)throw new Error("Expected node of symbol group type, but got "+(e?"node of type "+e.type:String(e)));return t}function Xt(e){return e&&("atom"===e.type||re.hasOwnProperty(e.type))?e:null}var Wt=function(e,t){var r,n,a;e&&"supsub"===e.type?(r=(n=Ut(e.base,"accent")).base,e.base=r,a=function(e){if(e instanceof W)return e;throw new Error("Expected span but got "+String(e)+".")}(wt(e,t)),e.base=n):r=(n=Ut(e,"accent")).base;var i=wt(r,t.havingCrampedStyle()),o=0;if(n.isShifty&&l.isCharacterBox(r)){var s=l.getBaseElem(r);o=ee(wt(s,t.havingCrampedStyle())).skew}var h,m="\\c"===n.label,c=m?i.height+i.depth:Math.min(i.height,t.fontMetrics().xHeight);if(n.isStretchy)h=Gt(n,t),h=Ke.makeVList({positionType:"firstBaseline",children:[{type:"elem",elem:i},{type:"elem",elem:h,wrapperClasses:["svg-align"],wrapperStyle:o>0?{width:"calc(100% - "+V(2*o)+")",marginLeft:V(2*o)}:void 0}]},t);else{var u,p;"\\vec"===n.label?(u=Ke.staticSvg("vec",t),p=Ke.svgData.vec[1]):((u=ee(u=Ke.makeOrd({mode:n.mode,text:n.label},t,"textord"))).italic=0,p=u.width,m&&(c+=u.depth)),h=Ke.makeSpan(["accent-body"],[u]);var d="\\textcircled"===n.label;d&&(h.classes.push("accent-full"),c=i.height);var f=o;d||(f-=p/2),h.style.left=V(f),"\\textcircled"===n.label&&(h.style.top=".2em"),h=Ke.makeVList({positionType:"firstBaseline",children:[{type:"elem",elem:i},{type:"kern",size:-c},{type:"elem",elem:h}]},t)}var g=Ke.makeSpan(["mord","accent"],[h],t);return a?(a.children[0]=g,a.height=Math.max(g.height,a.height),a.classes[0]="mord",a):g},_t=function(e,t){var r=e.isStretchy?Vt(e.label):new Tt.MathNode("mo",[Bt(e.label,e.mode)]),n=new Tt.MathNode("mover",[Rt(e.base,t),r]);return n.setAttribute("accent","true"),n},jt=new RegExp(["\\acute","\\grave","\\ddot","\\tilde","\\bar","\\breve","\\check","\\hat","\\vec","\\dot","\\mathring"].map((function(e){return"\\"+e})).join("|"));ot({type:"accent",names:["\\acute","\\grave","\\ddot","\\tilde","\\bar","\\breve","\\check","\\hat","\\vec","\\dot","\\mathring","\\widecheck","\\widehat","\\widetilde","\\overrightarrow","\\overleftarrow","\\Overrightarrow","\\overleftrightarrow","\\overgroup","\\overlinesegment","\\overleftharpoon","\\overrightharpoon"],props:{numArgs:1},handler:function(e,t){var r=lt(t[0]),n=!jt.test(e.funcName),a=!n||"\\widehat"===e.funcName||"\\widetilde"===e.funcName||"\\widecheck"===e.funcName;return{type:"accent",mode:e.parser.mode,label:e.funcName,isStretchy:n,isShifty:a,base:r}},htmlBuilder:Wt,mathmlBuilder:_t}),ot({type:"accent",names:["\\'","\\`","\\^","\\~","\\=","\\u","\\.",'\\"',"\\c","\\r","\\H","\\v","\\textcircled"],props:{numArgs:1,allowedInText:!0,allowedInMath:!0,argTypes:["primitive"]},handler:function(e,t){var r=t[0],n=e.parser.mode;return"math"===n&&(e.parser.settings.reportNonstrict("mathVsTextAccents","LaTeX's accent "+e.funcName+" works only in text mode"),n="text"),{type:"accent",mode:n,label:e.funcName,isStretchy:!1,isShifty:!0,base:r}},htmlBuilder:Wt,mathmlBuilder:_t}),ot({type:"accentUnder",names:["\\underleftarrow","\\underrightarrow","\\underleftrightarrow","\\undergroup","\\underlinesegment","\\utilde"],props:{numArgs:1},handler:function(e,t){var r=e.parser,n=e.funcName,a=t[0];return{type:"accentUnder",mode:r.mode,label:n,base:a}},htmlBuilder:function(e,t){var r=wt(e.base,t),n=Gt(e,t),a="\\utilde"===e.label?.12:0,i=Ke.makeVList({positionType:"top",positionData:r.height,children:[{type:"elem",elem:n,wrapperClasses:["svg-align"]},{type:"kern",size:a},{type:"elem",elem:r}]},t);return Ke.makeSpan(["mord","accentunder"],[i],t)},mathmlBuilder:function(e,t){var r=Vt(e.label),n=new Tt.MathNode("munder",[Rt(e.base,t),r]);return n.setAttribute("accentunder","true"),n}});var $t=function(e){var t=new Tt.MathNode("mpadded",e?[e]:[]);return t.setAttribute("width","+0.6em"),t.setAttribute("lspace","0.3em"),t};ot({type:"xArrow",names:["\\xleftarrow","\\xrightarrow","\\xLeftarrow","\\xRightarrow","\\xleftrightarrow","\\xLeftrightarrow","\\xhookleftarrow","\\xhookrightarrow","\\xmapsto","\\xrightharpoondown","\\xrightharpoonup","\\xleftharpoondown","\\xleftharpoonup","\\xrightleftharpoons","\\xleftrightharpoons","\\xlongequal","\\xtwoheadrightarrow","\\xtwoheadleftarrow","\\xtofrom","\\xrightleftarrows","\\xrightequilibrium","\\xleftequilibrium","\\\\cdrightarrow","\\\\cdleftarrow","\\\\cdlongequal"],props:{numArgs:1,numOptionalArgs:1},handler:function(e,t,r){var n=e.parser,a=e.funcName;return{type:"xArrow",mode:n.mode,label:a,body:t[0],below:r[0]}},htmlBuilder:function(e,t){var r,n=t.style,a=t.havingStyle(n.sup()),i=Ke.wrapFragment(wt(e.body,a,t),t),o="\\x"===e.label.slice(0,2)?"x":"cd";i.classes.push(o+"-arrow-pad"),e.below&&(a=t.havingStyle(n.sub()),(r=Ke.wrapFragment(wt(e.below,a,t),t)).classes.push(o+"-arrow-pad"));var s,l=Gt(e,t),h=-t.fontMetrics().axisHeight+.5*l.height,m=-t.fontMetrics().axisHeight-.5*l.height-.111;if((i.depth>.25||"\\xleftequilibrium"===e.label)&&(m-=i.depth),r){var c=-t.fontMetrics().axisHeight+r.height+.5*l.height+.111;s=Ke.makeVList({positionType:"individualShift",children:[{type:"elem",elem:i,shift:m},{type:"elem",elem:l,shift:h},{type:"elem",elem:r,shift:c}]},t)}else s=Ke.makeVList({positionType:"individualShift",children:[{type:"elem",elem:i,shift:m},{type:"elem",elem:l,shift:h}]},t);return s.children[0].children[0].children[1].classes.push("svg-align"),Ke.makeSpan(["mrel","x-arrow"],[s],t)},mathmlBuilder:function(e,t){var r,n=Vt(e.label);if(n.setAttribute("minsize","x"===e.label.charAt(0)?"1.75em":"3.0em"),e.body){var a=$t(Rt(e.body,t));if(e.below){var i=$t(Rt(e.below,t));r=new Tt.MathNode("munderover",[n,i,a])}else r=new Tt.MathNode("mover",[n,a])}else if(e.below){var o=$t(Rt(e.below,t));r=new Tt.MathNode("munder",[n,o])}else r=$t(),r=new Tt.MathNode("mover",[n,r]);return r}});var Zt={">":"\\\\cdrightarrow","<":"\\\\cdleftarrow","=":"\\\\cdlongequal",A:"\\uparrow",V:"\\downarrow","|":"\\Vert",".":"no arrow"},Kt=function(e){return"textord"===e.type&&"@"===e.text};function Jt(e,t,r){var n=Zt[e];switch(n){case"\\\\cdrightarrow":case"\\\\cdleftarrow":return r.callFunction(n,[t[0]],[t[1]]);case"\\uparrow":case"\\downarrow":var a={type:"atom",text:n,mode:"math",family:"rel"},i={type:"ordgroup",mode:"math",body:[r.callFunction("\\\\cdleft",[t[0]],[]),r.callFunction("\\Big",[a],[]),r.callFunction("\\\\cdright",[t[1]],[])]};return r.callFunction("\\\\cdparent",[i],[]);case"\\\\cdlongequal":return r.callFunction("\\\\cdlongequal",[],[]);case"\\Vert":return r.callFunction("\\Big",[{type:"textord",text:"\\Vert",mode:"math"}],[]);default:return{type:"textord",text:" ",mode:"math"}}}ot({type:"cdlabel",names:["\\\\cdleft","\\\\cdright"],props:{numArgs:1},handler:function(e,t){var r=e.parser,n=e.funcName;return{type:"cdlabel",mode:r.mode,side:n.slice(4),label:t[0]}},htmlBuilder:function(e,t){var r=t.havingStyle(t.style.sup()),n=Ke.wrapFragment(wt(e.label,r,t),t);return n.classes.push("cd-label-"+e.side),n.style.bottom=V(.8-n.depth),n.height=0,n.depth=0,n},mathmlBuilder:function(e,t){var r=new Tt.MathNode("mrow",[Rt(e.label,t)]);return(r=new Tt.MathNode("mpadded",[r])).setAttribute("width","0"),"left"===e.side&&r.setAttribute("lspace","-1width"),r.setAttribute("voffset","0.7em"),(r=new Tt.MathNode("mstyle",[r])).setAttribute("displaystyle","false"),r.setAttribute("scriptlevel","1"),r}}),ot({type:"cdlabelparent",names:["\\\\cdparent"],props:{numArgs:1},handler:function(e,t){return{type:"cdlabelparent",mode:e.parser.mode,fragment:t[0]}},htmlBuilder:function(e,t){var r=Ke.wrapFragment(wt(e.fragment,t),t);return r.classes.push("cd-vert-arrow"),r},mathmlBuilder:function(e,t){return new Tt.MathNode("mrow",[Rt(e.fragment,t)])}}),ot({type:"textord",names:["\\@char"],props:{numArgs:1,allowedInText:!0},handler:function(e,t){for(var r=e.parser,a=Ut(t[0],"ordgroup").body,i="",o=0;o=1114111)throw new n("\\@char with invalid code point "+i);return l<=65535?s=String.fromCharCode(l):(l-=65536,s=String.fromCharCode(55296+(l>>10),56320+(1023&l))),{type:"textord",mode:r.mode,text:s}}});var Qt=function(e,t){var r=ft(e.body,t.withColor(e.color),!1);return Ke.makeFragment(r)},er=function(e,t){var r=Nt(e.body,t.withColor(e.color)),n=new Tt.MathNode("mstyle",r);return n.setAttribute("mathcolor",e.color),n};ot({type:"color",names:["\\textcolor"],props:{numArgs:2,allowedInText:!0,argTypes:["color","original"]},handler:function(e,t){var r=e.parser,n=Ut(t[0],"color-token").color,a=t[1];return{type:"color",mode:r.mode,color:n,body:ht(a)}},htmlBuilder:Qt,mathmlBuilder:er}),ot({type:"color",names:["\\color"],props:{numArgs:1,allowedInText:!0,argTypes:["color"]},handler:function(e,t){var r=e.parser,n=e.breakOnTokenText,a=Ut(t[0],"color-token").color;r.gullet.macros.set("\\current@color",a);var i=r.parseExpression(!0,n);return{type:"color",mode:r.mode,color:a,body:i}},htmlBuilder:Qt,mathmlBuilder:er}),ot({type:"cr",names:["\\\\"],props:{numArgs:0,numOptionalArgs:1,argTypes:["size"],allowedInText:!0},handler:function(e,t,r){var n=e.parser,a=r[0],i=!n.settings.displayMode||!n.settings.useStrictBehavior("newLineInDisplayMode","In LaTeX, \\\\ or \\newline does nothing in display mode");return{type:"cr",mode:n.mode,newLine:i,size:a&&Ut(a,"size").value}},htmlBuilder:function(e,t){var r=Ke.makeSpan(["mspace"],[],t);return e.newLine&&(r.classes.push("newline"),e.size&&(r.style.marginTop=V(F(e.size,t)))),r},mathmlBuilder:function(e,t){var r=new Tt.MathNode("mspace");return e.newLine&&(r.setAttribute("linebreak","newline"),e.size&&r.setAttribute("height",V(F(e.size,t)))),r}});var tr={"\\global":"\\global","\\long":"\\\\globallong","\\\\globallong":"\\\\globallong","\\def":"\\gdef","\\gdef":"\\gdef","\\edef":"\\xdef","\\xdef":"\\xdef","\\let":"\\\\globallet","\\futurelet":"\\\\globalfuture"},rr=function(e){var t=e.text;if(/^(?:[\\{}$&#^_]|EOF)$/.test(t))throw new n("Expected a control sequence",e);return t},nr=function(e,t,r,n){var a=e.gullet.macros.get(r.text);null==a&&(r.noexpand=!0,a={tokens:[r],numArgs:0,unexpandable:!e.gullet.isExpandable(r.text)}),e.gullet.macros.set(t,a,n)};ot({type:"internal",names:["\\global","\\long","\\\\globallong"],props:{numArgs:0,allowedInText:!0},handler:function(e){var t=e.parser,r=e.funcName;t.consumeSpaces();var a=t.fetch();if(tr[a.text])return"\\global"!==r&&"\\\\globallong"!==r||(a.text=tr[a.text]),Ut(t.parseFunction(),"internal");throw new n("Invalid token after macro prefix",a)}}),ot({type:"internal",names:["\\def","\\gdef","\\edef","\\xdef"],props:{numArgs:0,allowedInText:!0,primitive:!0},handler:function(e){var t=e.parser,r=e.funcName,a=t.gullet.popToken(),i=a.text;if(/^(?:[\\{}$&#^_]|EOF)$/.test(i))throw new n("Expected a control sequence",a);for(var o,s=0,l=[[]];"{"!==t.gullet.future().text;)if("#"===(a=t.gullet.popToken()).text){if("{"===t.gullet.future().text){o=t.gullet.future(),l[s].push("{");break}if(a=t.gullet.popToken(),!/^[1-9]$/.test(a.text))throw new n('Invalid argument number "'+a.text+'"');if(parseInt(a.text)!==s+1)throw new n('Argument number "'+a.text+'" out of order');s++,l.push([])}else{if("EOF"===a.text)throw new n("Expected a macro definition");l[s].push(a.text)}var h=t.gullet.consumeArg().tokens;return o&&h.unshift(o),"\\edef"!==r&&"\\xdef"!==r||(h=t.gullet.expandTokens(h)).reverse(),t.gullet.macros.set(i,{tokens:h,numArgs:s,delimiters:l},r===tr[r]),{type:"internal",mode:t.mode}}}),ot({type:"internal",names:["\\let","\\\\globallet"],props:{numArgs:0,allowedInText:!0,primitive:!0},handler:function(e){var t=e.parser,r=e.funcName,n=rr(t.gullet.popToken());t.gullet.consumeSpaces();var a=function(e){var t=e.gullet.popToken();return"="===t.text&&" "===(t=e.gullet.popToken()).text&&(t=e.gullet.popToken()),t}(t);return nr(t,n,a,"\\\\globallet"===r),{type:"internal",mode:t.mode}}}),ot({type:"internal",names:["\\futurelet","\\\\globalfuture"],props:{numArgs:0,allowedInText:!0,primitive:!0},handler:function(e){var t=e.parser,r=e.funcName,n=rr(t.gullet.popToken()),a=t.gullet.popToken(),i=t.gullet.popToken();return nr(t,n,i,"\\\\globalfuture"===r),t.gullet.pushToken(i),t.gullet.pushToken(a),{type:"internal",mode:t.mode}}});var ar=function(e,t,r){var n=q(ae.math[e]&&ae.math[e].replace||e,t,r);if(!n)throw new Error("Unsupported symbol "+e+" and font size "+t+".");return n},ir=function(e,t,r,n){var a=r.havingBaseStyle(t),i=Ke.makeSpan(n.concat(a.sizingClasses(r)),[e],r),o=a.sizeMultiplier/r.sizeMultiplier;return i.height*=o,i.depth*=o,i.maxFontSize=a.sizeMultiplier,i},or=function(e,t,r){var n=t.havingBaseStyle(r),a=(1-t.sizeMultiplier/n.sizeMultiplier)*t.fontMetrics().axisHeight;e.classes.push("delimcenter"),e.style.top=V(a),e.height-=a,e.depth+=a},sr=function(e,t,r,n,a,i){var o=function(e,t,r,n){return Ke.makeSymbol(e,"Size"+t+"-Regular",r,n)}(e,t,a,n),s=ir(Ke.makeSpan(["delimsizing","size"+t],[o],n),x.TEXT,n,i);return r&&or(s,n,x.TEXT),s},lr=function(e,t,r){var n;return n="Size1-Regular"===t?"delim-size1":"delim-size4",{type:"elem",elem:Ke.makeSpan(["delimsizinginner",n],[Ke.makeSpan([],[Ke.makeSymbol(e,t,r)])])}},hr=function(e,t,r){var n=T["Size4-Regular"][e.charCodeAt(0)]?T["Size4-Regular"][e.charCodeAt(0)][4]:T["Size1-Regular"][e.charCodeAt(0)][4],a=new J("inner",function(e,t){switch(e){case"\u239c":return"M291 0 H417 V"+t+" H291z M291 0 H417 V"+t+" H291z";case"\u2223":return"M145 0 H188 V"+t+" H145z M145 0 H188 V"+t+" H145z";case"\u2225":return"M145 0 H188 V"+t+" H145z M145 0 H188 V"+t+" H145zM367 0 H410 V"+t+" H367z M367 0 H410 V"+t+" H367z";case"\u239f":return"M457 0 H583 V"+t+" H457z M457 0 H583 V"+t+" H457z";case"\u23a2":return"M319 0 H403 V"+t+" H319z M319 0 H403 V"+t+" H319z";case"\u23a5":return"M263 0 H347 V"+t+" H263z M263 0 H347 V"+t+" H263z";case"\u23aa":return"M384 0 H504 V"+t+" H384z M384 0 H504 V"+t+" H384z";case"\u23d0":return"M312 0 H355 V"+t+" H312z M312 0 H355 V"+t+" H312z";case"\u2016":return"M257 0 H300 V"+t+" H257z M257 0 H300 V"+t+" H257zM478 0 H521 V"+t+" H478z M478 0 H521 V"+t+" H478z";default:return""}}(e,Math.round(1e3*t))),i=new K([a],{width:V(n),height:V(t),style:"width:"+V(n),viewBox:"0 0 "+1e3*n+" "+Math.round(1e3*t),preserveAspectRatio:"xMinYMin"}),o=Ke.makeSvgSpan([],[i],r);return o.height=t,o.style.height=V(t),o.style.width=V(n),{type:"elem",elem:o}},mr={type:"kern",size:-.008},cr=["|","\\lvert","\\rvert","\\vert"],ur=["\\|","\\lVert","\\rVert","\\Vert"],pr=function(e,t,r,n,a,i){var o,s,h,m;o=h=m=e,s=null;var c="Size1-Regular";"\\uparrow"===e?h=m="\u23d0":"\\Uparrow"===e?h=m="\u2016":"\\downarrow"===e?o=h="\u23d0":"\\Downarrow"===e?o=h="\u2016":"\\updownarrow"===e?(o="\\uparrow",h="\u23d0",m="\\downarrow"):"\\Updownarrow"===e?(o="\\Uparrow",h="\u2016",m="\\Downarrow"):l.contains(cr,e)?h="\u2223":l.contains(ur,e)?h="\u2225":"["===e||"\\lbrack"===e?(o="\u23a1",h="\u23a2",m="\u23a3",c="Size4-Regular"):"]"===e||"\\rbrack"===e?(o="\u23a4",h="\u23a5",m="\u23a6",c="Size4-Regular"):"\\lfloor"===e||"\u230a"===e?(h=o="\u23a2",m="\u23a3",c="Size4-Regular"):"\\lceil"===e||"\u2308"===e?(o="\u23a1",h=m="\u23a2",c="Size4-Regular"):"\\rfloor"===e||"\u230b"===e?(h=o="\u23a5",m="\u23a6",c="Size4-Regular"):"\\rceil"===e||"\u2309"===e?(o="\u23a4",h=m="\u23a5",c="Size4-Regular"):"("===e||"\\lparen"===e?(o="\u239b",h="\u239c",m="\u239d",c="Size4-Regular"):")"===e||"\\rparen"===e?(o="\u239e",h="\u239f",m="\u23a0",c="Size4-Regular"):"\\{"===e||"\\lbrace"===e?(o="\u23a7",s="\u23a8",m="\u23a9",h="\u23aa",c="Size4-Regular"):"\\}"===e||"\\rbrace"===e?(o="\u23ab",s="\u23ac",m="\u23ad",h="\u23aa",c="Size4-Regular"):"\\lgroup"===e||"\u27ee"===e?(o="\u23a7",m="\u23a9",h="\u23aa",c="Size4-Regular"):"\\rgroup"===e||"\u27ef"===e?(o="\u23ab",m="\u23ad",h="\u23aa",c="Size4-Regular"):"\\lmoustache"===e||"\u23b0"===e?(o="\u23a7",m="\u23ad",h="\u23aa",c="Size4-Regular"):"\\rmoustache"!==e&&"\u23b1"!==e||(o="\u23ab",m="\u23a9",h="\u23aa",c="Size4-Regular");var u=ar(o,c,a),p=u.height+u.depth,d=ar(h,c,a),f=d.height+d.depth,g=ar(m,c,a),v=g.height+g.depth,b=0,y=1;if(null!==s){var w=ar(s,c,a);b=w.height+w.depth,y=2}var k=p+v+b,S=k+Math.max(0,Math.ceil((t-k)/(y*f)))*y*f,M=n.fontMetrics().axisHeight;r&&(M*=n.sizeMultiplier);var z=S/2-M,A=[];if(A.push(lr(m,c,a)),A.push(mr),null===s){var T=S-p-v+.016;A.push(hr(h,T,n))}else{var B=(S-p-v-b)/2+.016;A.push(hr(h,B,n)),A.push(mr),A.push(lr(s,c,a)),A.push(mr),A.push(hr(h,B,n))}A.push(mr),A.push(lr(o,c,a));var C=n.havingBaseStyle(x.TEXT),q=Ke.makeVList({positionType:"bottom",positionData:z,children:A},C);return ir(Ke.makeSpan(["delimsizing","mult"],[q],C),x.TEXT,n,i)},dr=.08,fr=function(e,t,r,n,a){var i=function(e,t,r){t*=1e3;var n="";switch(e){case"sqrtMain":n=function(e,t){return"M95,"+(622+e+t)+"\nc-2.7,0,-7.17,-2.7,-13.5,-8c-5.8,-5.3,-9.5,-10,-9.5,-14\nc0,-2,0.3,-3.3,1,-4c1.3,-2.7,23.83,-20.7,67.5,-54\nc44.2,-33.3,65.8,-50.3,66.5,-51c1.3,-1.3,3,-2,5,-2c4.7,0,8.7,3.3,12,10\ns173,378,173,378c0.7,0,35.3,-71,104,-213c68.7,-142,137.5,-285,206.5,-429\nc69,-144,104.5,-217.7,106.5,-221\nl"+e/2.075+" -"+e+"\nc5.3,-9.3,12,-14,20,-14\nH400000v"+(40+e)+"H845.2724\ns-225.272,467,-225.272,467s-235,486,-235,486c-2.7,4.7,-9,7,-19,7\nc-6,0,-10,-1,-12,-3s-194,-422,-194,-422s-65,47,-65,47z\nM"+(834+e)+" "+t+"h400000v"+(40+e)+"h-400000z"}(t,M);break;case"sqrtSize1":n=function(e,t){return"M263,"+(601+e+t)+"c0.7,0,18,39.7,52,119\nc34,79.3,68.167,158.7,102.5,238c34.3,79.3,51.8,119.3,52.5,120\nc340,-704.7,510.7,-1060.3,512,-1067\nl"+e/2.084+" -"+e+"\nc4.7,-7.3,11,-11,19,-11\nH40000v"+(40+e)+"H1012.3\ns-271.3,567,-271.3,567c-38.7,80.7,-84,175,-136,283c-52,108,-89.167,185.3,-111.5,232\nc-22.3,46.7,-33.8,70.3,-34.5,71c-4.7,4.7,-12.3,7,-23,7s-12,-1,-12,-1\ns-109,-253,-109,-253c-72.7,-168,-109.3,-252,-110,-252c-10.7,8,-22,16.7,-34,26\nc-22,17.3,-33.3,26,-34,26s-26,-26,-26,-26s76,-59,76,-59s76,-60,76,-60z\nM"+(1001+e)+" "+t+"h400000v"+(40+e)+"h-400000z"}(t,M);break;case"sqrtSize2":n=function(e,t){return"M983 "+(10+e+t)+"\nl"+e/3.13+" -"+e+"\nc4,-6.7,10,-10,18,-10 H400000v"+(40+e)+"\nH1013.1s-83.4,268,-264.1,840c-180.7,572,-277,876.3,-289,913c-4.7,4.7,-12.7,7,-24,7\ns-12,0,-12,0c-1.3,-3.3,-3.7,-11.7,-7,-25c-35.3,-125.3,-106.7,-373.3,-214,-744\nc-10,12,-21,25,-33,39s-32,39,-32,39c-6,-5.3,-15,-14,-27,-26s25,-30,25,-30\nc26.7,-32.7,52,-63,76,-91s52,-60,52,-60s208,722,208,722\nc56,-175.3,126.3,-397.3,211,-666c84.7,-268.7,153.8,-488.2,207.5,-658.5\nc53.7,-170.3,84.5,-266.8,92.5,-289.5z\nM"+(1001+e)+" "+t+"h400000v"+(40+e)+"h-400000z"}(t,M);break;case"sqrtSize3":n=function(e,t){return"M424,"+(2398+e+t)+"\nc-1.3,-0.7,-38.5,-172,-111.5,-514c-73,-342,-109.8,-513.3,-110.5,-514\nc0,-2,-10.7,14.3,-32,49c-4.7,7.3,-9.8,15.7,-15.5,25c-5.7,9.3,-9.8,16,-12.5,20\ns-5,7,-5,7c-4,-3.3,-8.3,-7.7,-13,-13s-13,-13,-13,-13s76,-122,76,-122s77,-121,77,-121\ns209,968,209,968c0,-2,84.7,-361.7,254,-1079c169.3,-717.3,254.7,-1077.7,256,-1081\nl"+e/4.223+" -"+e+"c4,-6.7,10,-10,18,-10 H400000\nv"+(40+e)+"H1014.6\ns-87.3,378.7,-272.6,1166c-185.3,787.3,-279.3,1182.3,-282,1185\nc-2,6,-10,9,-24,9\nc-8,0,-12,-0.7,-12,-2z M"+(1001+e)+" "+t+"\nh400000v"+(40+e)+"h-400000z"}(t,M);break;case"sqrtSize4":n=function(e,t){return"M473,"+(2713+e+t)+"\nc339.3,-1799.3,509.3,-2700,510,-2702 l"+e/5.298+" -"+e+"\nc3.3,-7.3,9.3,-11,18,-11 H400000v"+(40+e)+"H1017.7\ns-90.5,478,-276.2,1466c-185.7,988,-279.5,1483,-281.5,1485c-2,6,-10,9,-24,9\nc-8,0,-12,-0.7,-12,-2c0,-1.3,-5.3,-32,-16,-92c-50.7,-293.3,-119.7,-693.3,-207,-1200\nc0,-1.3,-5.3,8.7,-16,30c-10.7,21.3,-21.3,42.7,-32,64s-16,33,-16,33s-26,-26,-26,-26\ns76,-153,76,-153s77,-151,77,-151c0.7,0.7,35.7,202,105,604c67.3,400.7,102,602.7,104,\n606zM"+(1001+e)+" "+t+"h400000v"+(40+e)+"H1017.7z"}(t,M);break;case"sqrtTall":n=function(e,t,r){return"M702 "+(e+t)+"H400000"+(40+e)+"\nH742v"+(r-54-t-e)+"l-4 4-4 4c-.667.7 -2 1.5-4 2.5s-4.167 1.833-6.5 2.5-5.5 1-9.5 1\nh-12l-28-84c-16.667-52-96.667 -294.333-240-727l-212 -643 -85 170\nc-4-3.333-8.333-7.667-13 -13l-13-13l77-155 77-156c66 199.333 139 419.667\n219 661 l218 661zM702 "+t+"H400000v"+(40+e)+"H742z"}(t,M,r)}return n}(e,n,r),o=new J(e,i),s=new K([o],{width:"400em",height:V(t),viewBox:"0 0 400000 "+r,preserveAspectRatio:"xMinYMin slice"});return Ke.makeSvgSpan(["hide-tail"],[s],a)},gr=["(","\\lparen",")","\\rparen","[","\\lbrack","]","\\rbrack","\\{","\\lbrace","\\}","\\rbrace","\\lfloor","\\rfloor","\u230a","\u230b","\\lceil","\\rceil","\u2308","\u2309","\\surd"],vr=["\\uparrow","\\downarrow","\\updownarrow","\\Uparrow","\\Downarrow","\\Updownarrow","|","\\|","\\vert","\\Vert","\\lvert","\\rvert","\\lVert","\\rVert","\\lgroup","\\rgroup","\u27ee","\u27ef","\\lmoustache","\\rmoustache","\u23b0","\u23b1"],br=["<",">","\\langle","\\rangle","/","\\backslash","\\lt","\\gt"],yr=[0,1.2,1.8,2.4,3],xr=[{type:"small",style:x.SCRIPTSCRIPT},{type:"small",style:x.SCRIPT},{type:"small",style:x.TEXT},{type:"large",size:1},{type:"large",size:2},{type:"large",size:3},{type:"large",size:4}],wr=[{type:"small",style:x.SCRIPTSCRIPT},{type:"small",style:x.SCRIPT},{type:"small",style:x.TEXT},{type:"stack"}],kr=[{type:"small",style:x.SCRIPTSCRIPT},{type:"small",style:x.SCRIPT},{type:"small",style:x.TEXT},{type:"large",size:1},{type:"large",size:2},{type:"large",size:3},{type:"large",size:4},{type:"stack"}],Sr=function(e){if("small"===e.type)return"Main-Regular";if("large"===e.type)return"Size"+e.size+"-Regular";if("stack"===e.type)return"Size4-Regular";throw new Error("Add support for delim type '"+e.type+"' here.")},Mr=function(e,t,r,n){for(var a=Math.min(2,3-n.style.size);at)return r[a]}return r[r.length-1]},zr=function(e,t,r,n,a,i){var o;"<"===e||"\\lt"===e||"\u27e8"===e?e="\\langle":">"!==e&&"\\gt"!==e&&"\u27e9"!==e||(e="\\rangle"),o=l.contains(br,e)?xr:l.contains(gr,e)?kr:wr;var s=Mr(e,t,o,n);return"small"===s.type?function(e,t,r,n,a,i){var o=Ke.makeSymbol(e,"Main-Regular",a,n),s=ir(o,t,n,i);return r&&or(s,n,t),s}(e,s.style,r,n,a,i):"large"===s.type?sr(e,s.size,r,n,a,i):pr(e,t,r,n,a,i)},Ar={sqrtImage:function(e,t){var r,n,a=t.havingBaseSizing(),i=Mr("\\surd",e*a.sizeMultiplier,kr,a),o=a.sizeMultiplier,s=Math.max(0,t.minRuleThickness-t.fontMetrics().sqrtRuleThickness),l=0,h=0,m=0;return"small"===i.type?(e<1?o=1:e<1.4&&(o=.7),h=(1+s)/o,(r=fr("sqrtMain",l=(1+s+dr)/o,m=1e3+1e3*s+80,s,t)).style.minWidth="0.853em",n=.833/o):"large"===i.type?(m=1080*yr[i.size],h=(yr[i.size]+s)/o,l=(yr[i.size]+s+dr)/o,(r=fr("sqrtSize"+i.size,l,m,s,t)).style.minWidth="1.02em",n=1/o):(l=e+s+dr,h=e+s,m=Math.floor(1e3*e+s)+80,(r=fr("sqrtTall",l,m,s,t)).style.minWidth="0.742em",n=1.056),r.height=h,r.style.height=V(l),{span:r,advanceWidth:n,ruleWidth:(t.fontMetrics().sqrtRuleThickness+s)*o}},sizedDelim:function(e,t,r,a,i){if("<"===e||"\\lt"===e||"\u27e8"===e?e="\\langle":">"!==e&&"\\gt"!==e&&"\u27e9"!==e||(e="\\rangle"),l.contains(gr,e)||l.contains(br,e))return sr(e,t,!1,r,a,i);if(l.contains(vr,e))return pr(e,yr[t],!1,r,a,i);throw new n("Illegal delimiter: '"+e+"'")},sizeToMaxHeight:yr,customSizedDelim:zr,leftRightDelim:function(e,t,r,n,a,i){var o=n.fontMetrics().axisHeight*n.sizeMultiplier,s=5/n.fontMetrics().ptPerEm,l=Math.max(t-o,r+o),h=Math.max(l/500*901,2*l-s);return zr(e,h,!0,n,a,i)}},Tr={"\\bigl":{mclass:"mopen",size:1},"\\Bigl":{mclass:"mopen",size:2},"\\biggl":{mclass:"mopen",size:3},"\\Biggl":{mclass:"mopen",size:4},"\\bigr":{mclass:"mclose",size:1},"\\Bigr":{mclass:"mclose",size:2},"\\biggr":{mclass:"mclose",size:3},"\\Biggr":{mclass:"mclose",size:4},"\\bigm":{mclass:"mrel",size:1},"\\Bigm":{mclass:"mrel",size:2},"\\biggm":{mclass:"mrel",size:3},"\\Biggm":{mclass:"mrel",size:4},"\\big":{mclass:"mord",size:1},"\\Big":{mclass:"mord",size:2},"\\bigg":{mclass:"mord",size:3},"\\Bigg":{mclass:"mord",size:4}},Br=["(","\\lparen",")","\\rparen","[","\\lbrack","]","\\rbrack","\\{","\\lbrace","\\}","\\rbrace","\\lfloor","\\rfloor","\u230a","\u230b","\\lceil","\\rceil","\u2308","\u2309","<",">","\\langle","\u27e8","\\rangle","\u27e9","\\lt","\\gt","\\lvert","\\rvert","\\lVert","\\rVert","\\lgroup","\\rgroup","\u27ee","\u27ef","\\lmoustache","\\rmoustache","\u23b0","\u23b1","/","\\backslash","|","\\vert","\\|","\\Vert","\\uparrow","\\Uparrow","\\downarrow","\\Downarrow","\\updownarrow","\\Updownarrow","."];function Cr(e,t){var r=Xt(e);if(r&&l.contains(Br,r.text))return r;throw new n(r?"Invalid delimiter '"+r.text+"' after '"+t.funcName+"'":"Invalid delimiter type '"+e.type+"'",e)}function qr(e){if(!e.body)throw new Error("Bug: The leftright ParseNode wasn't fully parsed.")}ot({type:"delimsizing",names:["\\bigl","\\Bigl","\\biggl","\\Biggl","\\bigr","\\Bigr","\\biggr","\\Biggr","\\bigm","\\Bigm","\\biggm","\\Biggm","\\big","\\Big","\\bigg","\\Bigg"],props:{numArgs:1,argTypes:["primitive"]},handler:function(e,t){var r=Cr(t[0],e);return{type:"delimsizing",mode:e.parser.mode,size:Tr[e.funcName].size,mclass:Tr[e.funcName].mclass,delim:r.text}},htmlBuilder:function(e,t){return"."===e.delim?Ke.makeSpan([e.mclass]):Ar.sizedDelim(e.delim,e.size,t,e.mode,[e.mclass])},mathmlBuilder:function(e){var t=[];"."!==e.delim&&t.push(Bt(e.delim,e.mode));var r=new Tt.MathNode("mo",t);"mopen"===e.mclass||"mclose"===e.mclass?r.setAttribute("fence","true"):r.setAttribute("fence","false"),r.setAttribute("stretchy","true");var n=V(Ar.sizeToMaxHeight[e.size]);return r.setAttribute("minsize",n),r.setAttribute("maxsize",n),r}}),ot({type:"leftright-right",names:["\\right"],props:{numArgs:1,primitive:!0},handler:function(e,t){var r=e.parser.gullet.macros.get("\\current@color");if(r&&"string"!=typeof r)throw new n("\\current@color set to non-string in \\right");return{type:"leftright-right",mode:e.parser.mode,delim:Cr(t[0],e).text,color:r}}}),ot({type:"leftright",names:["\\left"],props:{numArgs:1,primitive:!0},handler:function(e,t){var r=Cr(t[0],e),n=e.parser;++n.leftrightDepth;var a=n.parseExpression(!1);--n.leftrightDepth,n.expect("\\right",!1);var i=Ut(n.parseFunction(),"leftright-right");return{type:"leftright",mode:n.mode,body:a,left:r.text,right:i.delim,rightColor:i.color}},htmlBuilder:function(e,t){qr(e);for(var r,n,a=ft(e.body,t,!0,["mopen","mclose"]),i=0,o=0,s=!1,l=0;l-1?"mpadded":"menclose",[Rt(e.body,t)]);switch(e.label){case"\\cancel":n.setAttribute("notation","updiagonalstrike");break;case"\\bcancel":n.setAttribute("notation","downdiagonalstrike");break;case"\\phase":n.setAttribute("notation","phasorangle");break;case"\\sout":n.setAttribute("notation","horizontalstrike");break;case"\\fbox":n.setAttribute("notation","box");break;case"\\angl":n.setAttribute("notation","actuarial");break;case"\\fcolorbox":case"\\colorbox":if(r=t.fontMetrics().fboxsep*t.fontMetrics().ptPerEm,n.setAttribute("width","+"+2*r+"pt"),n.setAttribute("height","+"+2*r+"pt"),n.setAttribute("lspace",r+"pt"),n.setAttribute("voffset",r+"pt"),"\\fcolorbox"===e.label){var a=Math.max(t.fontMetrics().fboxrule,t.minRuleThickness);n.setAttribute("style","border: "+a+"em solid "+String(e.borderColor))}break;case"\\xcancel":n.setAttribute("notation","updiagonalstrike downdiagonalstrike")}return e.backgroundColor&&n.setAttribute("mathbackground",e.backgroundColor),n};ot({type:"enclose",names:["\\colorbox"],props:{numArgs:2,allowedInText:!0,argTypes:["color","text"]},handler:function(e,t,r){var n=e.parser,a=e.funcName,i=Ut(t[0],"color-token").color,o=t[1];return{type:"enclose",mode:n.mode,label:a,backgroundColor:i,body:o}},htmlBuilder:Nr,mathmlBuilder:Ir}),ot({type:"enclose",names:["\\fcolorbox"],props:{numArgs:3,allowedInText:!0,argTypes:["color","color","text"]},handler:function(e,t,r){var n=e.parser,a=e.funcName,i=Ut(t[0],"color-token").color,o=Ut(t[1],"color-token").color,s=t[2];return{type:"enclose",mode:n.mode,label:a,backgroundColor:o,borderColor:i,body:s}},htmlBuilder:Nr,mathmlBuilder:Ir}),ot({type:"enclose",names:["\\fbox"],props:{numArgs:1,argTypes:["hbox"],allowedInText:!0},handler:function(e,t){return{type:"enclose",mode:e.parser.mode,label:"\\fbox",body:t[0]}}}),ot({type:"enclose",names:["\\cancel","\\bcancel","\\xcancel","\\sout","\\phase"],props:{numArgs:1},handler:function(e,t){var r=e.parser,n=e.funcName,a=t[0];return{type:"enclose",mode:r.mode,label:n,body:a}},htmlBuilder:Nr,mathmlBuilder:Ir}),ot({type:"enclose",names:["\\angl"],props:{numArgs:1,argTypes:["hbox"],allowedInText:!1},handler:function(e,t){return{type:"enclose",mode:e.parser.mode,label:"\\angl",body:t[0]}}});var Rr={};function Or(e){for(var t=e.type,r=e.names,n=e.props,a=e.handler,i=e.htmlBuilder,o=e.mathmlBuilder,s={type:t,numArgs:n.numArgs||0,allowedInText:!1,numOptionalArgs:0,handler:a},l=0;l1||!c)&&g.pop(),b.length0&&(y+=.25),m.push({pos:y,isDashed:e[t]})}for(w(o[0]),r=0;r0&&(M<(B+=b)&&(M=B),B=0),e.addJot&&(M+=f),z.height=S,z.depth=M,y+=S,z.pos=y,y+=M+B,h[r]=z,w(o[r+1])}var C,q,N=y/2+t.fontMetrics().axisHeight,I=e.cols||[],R=[],O=[];if(e.tags&&e.tags.some((function(e){return e})))for(r=0;r=s)){var W=void 0;(a>0||e.hskipBeforeAndAfter)&&0!==(W=l.deflt(P.pregap,p))&&((C=Ke.makeSpan(["arraycolsep"],[])).style.width=V(W),R.push(C));var _=[];for(r=0;r0){for(var K=Ke.makeLineSpan("hline",t,c),J=Ke.makeLineSpan("hdashline",t,c),Q=[{type:"elem",elem:h,shift:0}];m.length>0;){var ee=m.pop(),te=ee.pos-N;ee.isDashed?Q.push({type:"elem",elem:J,shift:te}):Q.push({type:"elem",elem:K,shift:te})}h=Ke.makeVList({positionType:"individualShift",children:Q},t)}if(0===O.length)return Ke.makeSpan(["mord"],[h],t);var re=Ke.makeVList({positionType:"individualShift",children:O},t);return re=Ke.makeSpan(["tag"],[re],t),Ke.makeFragment([h,re])},Xr={c:"center ",l:"left ",r:"right "},Wr=function(e,t){for(var r=[],n=new Tt.MathNode("mtd",[],["mtr-glue"]),a=new Tt.MathNode("mtd",[],["mml-eqn-num"]),i=0;i0){var p=e.cols,d="",f=!1,g=0,v=p.length;"separator"===p[0].type&&(c+="top ",g=1),"separator"===p[p.length-1].type&&(c+="bottom ",v-=1);for(var b=g;b0?"left ":"",c+=S[S.length-1].length>0?"right ":"";for(var M=1;M-1?"alignat":"align",o="split"===e.envName,s=Gr(e.parser,{cols:a,addJot:!0,autoTag:o?void 0:Vr(e.envName),emptySingleRow:!0,colSeparationType:i,maxNumCols:o?2:void 0,leqno:e.parser.settings.leqno},"display"),l=0,h={type:"ordgroup",mode:e.mode,body:[]};if(t[0]&&"ordgroup"===t[0].type){for(var m="",c=0;c0&&u&&(f=1),a[p]={type:"align",align:d,pregap:f,postgap:0}}return s.colSeparationType=u?"align":"alignat",s};Or({type:"array",names:["array","darray"],props:{numArgs:1},handler:function(e,t){var r=(Xt(t[0])?[t[0]]:Ut(t[0],"ordgroup").body).map((function(e){var t=Yt(e).text;if(-1!=="lcr".indexOf(t))return{type:"align",align:t};if("|"===t)return{type:"separator",separator:"|"};if(":"===t)return{type:"separator",separator:":"};throw new n("Unknown column alignment: "+t,e)})),a={cols:r,hskipBeforeAndAfter:!0,maxNumCols:r.length};return Gr(e.parser,a,Ur(e.envName))},htmlBuilder:Yr,mathmlBuilder:Wr}),Or({type:"array",names:["matrix","pmatrix","bmatrix","Bmatrix","vmatrix","Vmatrix","matrix*","pmatrix*","bmatrix*","Bmatrix*","vmatrix*","Vmatrix*"],props:{numArgs:0},handler:function(e){var t={matrix:null,pmatrix:["(",")"],bmatrix:["[","]"],Bmatrix:["\\{","\\}"],vmatrix:["|","|"],Vmatrix:["\\Vert","\\Vert"]}[e.envName.replace("*","")],r="c",a={hskipBeforeAndAfter:!1,cols:[{type:"align",align:r}]};if("*"===e.envName.charAt(e.envName.length-1)){var i=e.parser;if(i.consumeSpaces(),"["===i.fetch().text){if(i.consume(),i.consumeSpaces(),r=i.fetch().text,-1==="lcr".indexOf(r))throw new n("Expected l or c or r",i.nextToken);i.consume(),i.consumeSpaces(),i.expect("]"),i.consume(),a.cols=[{type:"align",align:r}]}}var o=Gr(e.parser,a,Ur(e.envName)),s=Math.max.apply(Math,[0].concat(o.body.map((function(e){return e.length}))));return o.cols=new Array(s).fill({type:"align",align:r}),t?{type:"leftright",mode:e.mode,body:[o],left:t[0],right:t[1],rightColor:void 0}:o},htmlBuilder:Yr,mathmlBuilder:Wr}),Or({type:"array",names:["smallmatrix"],props:{numArgs:0},handler:function(e){var t=Gr(e.parser,{arraystretch:.5},"script");return t.colSeparationType="small",t},htmlBuilder:Yr,mathmlBuilder:Wr}),Or({type:"array",names:["subarray"],props:{numArgs:1},handler:function(e,t){var r=(Xt(t[0])?[t[0]]:Ut(t[0],"ordgroup").body).map((function(e){var t=Yt(e).text;if(-1!=="lc".indexOf(t))return{type:"align",align:t};throw new n("Unknown column alignment: "+t,e)}));if(r.length>1)throw new n("{subarray} can contain only one column");var a={cols:r,hskipBeforeAndAfter:!1,arraystretch:.5};if((a=Gr(e.parser,a,"script")).body.length>0&&a.body[0].length>1)throw new n("{subarray} can contain only one column");return a},htmlBuilder:Yr,mathmlBuilder:Wr}),Or({type:"array",names:["cases","dcases","rcases","drcases"],props:{numArgs:0},handler:function(e){var t=Gr(e.parser,{arraystretch:1.2,cols:[{type:"align",align:"l",pregap:0,postgap:1},{type:"align",align:"l",pregap:0,postgap:0}]},Ur(e.envName));return{type:"leftright",mode:e.mode,body:[t],left:e.envName.indexOf("r")>-1?".":"\\{",right:e.envName.indexOf("r")>-1?"\\}":".",rightColor:void 0}},htmlBuilder:Yr,mathmlBuilder:Wr}),Or({type:"array",names:["align","align*","aligned","split"],props:{numArgs:0},handler:_r,htmlBuilder:Yr,mathmlBuilder:Wr}),Or({type:"array",names:["gathered","gather","gather*"],props:{numArgs:0},handler:function(e){l.contains(["gather","gather*"],e.envName)&&Fr(e);var t={cols:[{type:"align",align:"c"}],addJot:!0,colSeparationType:"gather",autoTag:Vr(e.envName),emptySingleRow:!0,leqno:e.parser.settings.leqno};return Gr(e.parser,t,"display")},htmlBuilder:Yr,mathmlBuilder:Wr}),Or({type:"array",names:["alignat","alignat*","alignedat"],props:{numArgs:1},handler:_r,htmlBuilder:Yr,mathmlBuilder:Wr}),Or({type:"array",names:["equation","equation*"],props:{numArgs:0},handler:function(e){Fr(e);var t={autoTag:Vr(e.envName),emptySingleRow:!0,singleRow:!0,maxNumCols:1,leqno:e.parser.settings.leqno};return Gr(e.parser,t,"display")},htmlBuilder:Yr,mathmlBuilder:Wr}),Or({type:"array",names:["CD"],props:{numArgs:0},handler:function(e){return Fr(e),function(e){var t=[];for(e.gullet.beginGroup(),e.gullet.macros.set("\\cr","\\\\\\relax"),e.gullet.beginGroup();;){t.push(e.parseExpression(!1,"\\\\")),e.gullet.endGroup(),e.gullet.beginGroup();var r=e.fetch().text;if("&"!==r&&"\\\\"!==r){if("\\end"===r){0===t[t.length-1].length&&t.pop();break}throw new n("Expected \\\\ or \\cr or \\end",e.nextToken)}e.consume()}for(var a,i,o=[],s=[o],l=0;l-1);else{if(!("<>AV".indexOf(u)>-1))throw new n('Expected one of "<>AV=|." after @',h[c]);for(var d=0;d<2;d++){for(var f=!0,g=c+1;g=x.SCRIPT.id?r.text():x.DISPLAY:"text"===e&&r.size===x.DISPLAY.size?r=x.TEXT:"script"===e?r=x.SCRIPT:"scriptscript"===e&&(r=x.SCRIPTSCRIPT),r},nn=function(e,t){var r,n=rn(e.size,t.style),a=n.fracNum(),i=n.fracDen();r=t.havingStyle(a);var o=wt(e.numer,r,t);if(e.continued){var s=8.5/t.fontMetrics().ptPerEm,l=3.5/t.fontMetrics().ptPerEm;o.height=o.height0?3*c:7*c,d=t.fontMetrics().denom1):(m>0?(u=t.fontMetrics().num2,p=c):(u=t.fontMetrics().num3,p=3*c),d=t.fontMetrics().denom2),h){var w=t.fontMetrics().axisHeight;u-o.depth-(w+.5*m)0&&(t="."===(t=e)?null:t),t};ot({type:"genfrac",names:["\\genfrac"],props:{numArgs:6,allowedInArgument:!0,argTypes:["math","math","size","text","math","math"]},handler:function(e,t){var r,n=e.parser,a=t[4],i=t[5],o=lt(t[0]),s="atom"===o.type&&"open"===o.family?sn(o.text):null,l=lt(t[1]),h="atom"===l.type&&"close"===l.family?sn(l.text):null,m=Ut(t[2],"size"),c=null;r=!!m.isBlank||(c=m.value).number>0;var u="auto",p=t[3];if("ordgroup"===p.type){if(p.body.length>0){var d=Ut(p.body[0],"textord");u=on[Number(d.text)]}}else p=Ut(p,"textord"),u=on[Number(p.text)];return{type:"genfrac",mode:n.mode,numer:a,denom:i,continued:!1,hasBarLine:r,barSize:c,leftDelim:s,rightDelim:h,size:u}},htmlBuilder:nn,mathmlBuilder:an}),ot({type:"infix",names:["\\above"],props:{numArgs:1,argTypes:["size"],infix:!0},handler:function(e,t){var r=e.parser,n=(e.funcName,e.token);return{type:"infix",mode:r.mode,replaceWith:"\\\\abovefrac",size:Ut(t[0],"size").value,token:n}}}),ot({type:"genfrac",names:["\\\\abovefrac"],props:{numArgs:3,argTypes:["math","size","math"]},handler:function(e,t){var r=e.parser,n=(e.funcName,t[0]),a=function(e){if(!e)throw new Error("Expected non-null, but got "+String(e));return e}(Ut(t[1],"infix").size),i=t[2],o=a.number>0;return{type:"genfrac",mode:r.mode,numer:n,denom:i,continued:!1,hasBarLine:o,barSize:a,leftDelim:null,rightDelim:null,size:"auto"}},htmlBuilder:nn,mathmlBuilder:an});var ln=function(e,t){var r,n,a=t.style;"supsub"===e.type?(r=e.sup?wt(e.sup,t.havingStyle(a.sup()),t):wt(e.sub,t.havingStyle(a.sub()),t),n=Ut(e.base,"horizBrace")):n=Ut(e,"horizBrace");var i,o=wt(n.base,t.havingBaseStyle(x.DISPLAY)),s=Gt(n,t);if(n.isOver?(i=Ke.makeVList({positionType:"firstBaseline",children:[{type:"elem",elem:o},{type:"kern",size:.1},{type:"elem",elem:s}]},t)).children[0].children[0].children[1].classes.push("svg-align"):(i=Ke.makeVList({positionType:"bottom",positionData:o.depth+.1+s.height,children:[{type:"elem",elem:s},{type:"kern",size:.1},{type:"elem",elem:o}]},t)).children[0].children[0].children[0].classes.push("svg-align"),r){var l=Ke.makeSpan(["mord",n.isOver?"mover":"munder"],[i],t);i=n.isOver?Ke.makeVList({positionType:"firstBaseline",children:[{type:"elem",elem:l},{type:"kern",size:.2},{type:"elem",elem:r}]},t):Ke.makeVList({positionType:"bottom",positionData:l.depth+.2+r.height+r.depth,children:[{type:"elem",elem:r},{type:"kern",size:.2},{type:"elem",elem:l}]},t)}return Ke.makeSpan(["mord",n.isOver?"mover":"munder"],[i],t)};ot({type:"horizBrace",names:["\\overbrace","\\underbrace"],props:{numArgs:1},handler:function(e,t){var r=e.parser,n=e.funcName;return{type:"horizBrace",mode:r.mode,label:n,isOver:/^\\over/.test(n),base:t[0]}},htmlBuilder:ln,mathmlBuilder:function(e,t){var r=Vt(e.label);return new Tt.MathNode(e.isOver?"mover":"munder",[Rt(e.base,t),r])}}),ot({type:"href",names:["\\href"],props:{numArgs:2,argTypes:["url","original"],allowedInText:!0},handler:function(e,t){var r=e.parser,n=t[1],a=Ut(t[0],"url").url;return r.settings.isTrusted({command:"\\href",url:a})?{type:"href",mode:r.mode,href:a,body:ht(n)}:r.formatUnsupportedCmd("\\href")},htmlBuilder:function(e,t){var r=ft(e.body,t,!1);return Ke.makeAnchor(e.href,[],r,t)},mathmlBuilder:function(e,t){var r=It(e.body,t);return r instanceof zt||(r=new zt("mrow",[r])),r.setAttribute("href",e.href),r}}),ot({type:"href",names:["\\url"],props:{numArgs:1,argTypes:["url"],allowedInText:!0},handler:function(e,t){var r=e.parser,n=Ut(t[0],"url").url;if(!r.settings.isTrusted({command:"\\url",url:n}))return r.formatUnsupportedCmd("\\url");for(var a=[],i=0;i0&&(n=F(e.totalheight,t)-r);var a=0;e.width.number>0&&(a=F(e.width,t));var i={height:V(r+n)};a>0&&(i.width=V(a)),n>0&&(i.verticalAlign=V(-n));var o=new j(e.src,e.alt,i);return o.height=r,o.depth=n,o},mathmlBuilder:function(e,t){var r=new Tt.MathNode("mglyph",[]);r.setAttribute("alt",e.alt);var n=F(e.height,t),a=0;if(e.totalheight.number>0&&(a=F(e.totalheight,t)-n,r.setAttribute("valign",V(-a))),r.setAttribute("height",V(n+a)),e.width.number>0){var i=F(e.width,t);r.setAttribute("width",V(i))}return r.setAttribute("src",e.src),r}}),ot({type:"kern",names:["\\kern","\\mkern","\\hskip","\\mskip"],props:{numArgs:1,argTypes:["size"],primitive:!0,allowedInText:!0},handler:function(e,t){var r=e.parser,n=e.funcName,a=Ut(t[0],"size");if(r.settings.strict){var i="m"===n[1],o="mu"===a.value.unit;i?(o||r.settings.reportNonstrict("mathVsTextUnits","LaTeX's "+n+" supports only mu units, not "+a.value.unit+" units"),"math"!==r.mode&&r.settings.reportNonstrict("mathVsTextUnits","LaTeX's "+n+" works only in math mode")):o&&r.settings.reportNonstrict("mathVsTextUnits","LaTeX's "+n+" doesn't support mu units")}return{type:"kern",mode:r.mode,dimension:a.value}},htmlBuilder:function(e,t){return Ke.makeGlue(e.dimension,t)},mathmlBuilder:function(e,t){var r=F(e.dimension,t);return new Tt.SpaceNode(r)}}),ot({type:"lap",names:["\\mathllap","\\mathrlap","\\mathclap"],props:{numArgs:1,allowedInText:!0},handler:function(e,t){var r=e.parser,n=e.funcName,a=t[0];return{type:"lap",mode:r.mode,alignment:n.slice(5),body:a}},htmlBuilder:function(e,t){var r;"clap"===e.alignment?(r=Ke.makeSpan([],[wt(e.body,t)]),r=Ke.makeSpan(["inner"],[r],t)):r=Ke.makeSpan(["inner"],[wt(e.body,t)]);var n=Ke.makeSpan(["fix"],[]),a=Ke.makeSpan([e.alignment],[r,n],t),i=Ke.makeSpan(["strut"]);return i.style.height=V(a.height+a.depth),a.depth&&(i.style.verticalAlign=V(-a.depth)),a.children.unshift(i),a=Ke.makeSpan(["thinbox"],[a],t),Ke.makeSpan(["mord","vbox"],[a],t)},mathmlBuilder:function(e,t){var r=new Tt.MathNode("mpadded",[Rt(e.body,t)]);if("rlap"!==e.alignment){var n="llap"===e.alignment?"-1":"-0.5";r.setAttribute("lspace",n+"width")}return r.setAttribute("width","0px"),r}}),ot({type:"styling",names:["\\(","$"],props:{numArgs:0,allowedInText:!0,allowedInMath:!1},handler:function(e,t){var r=e.funcName,n=e.parser,a=n.mode;n.switchMode("math");var i="\\("===r?"\\)":"$",o=n.parseExpression(!1,i);return n.expect(i),n.switchMode(a),{type:"styling",mode:n.mode,style:"text",body:o}}}),ot({type:"text",names:["\\)","\\]"],props:{numArgs:0,allowedInText:!0,allowedInMath:!1},handler:function(e,t){throw new n("Mismatched "+e.funcName)}});var mn=function(e,t){switch(t.style.size){case x.DISPLAY.size:return e.display;case x.TEXT.size:return e.text;case x.SCRIPT.size:return e.script;case x.SCRIPTSCRIPT.size:return e.scriptscript;default:return e.text}};ot({type:"mathchoice",names:["\\mathchoice"],props:{numArgs:4,primitive:!0},handler:function(e,t){return{type:"mathchoice",mode:e.parser.mode,display:ht(t[0]),text:ht(t[1]),script:ht(t[2]),scriptscript:ht(t[3])}},htmlBuilder:function(e,t){var r=mn(e,t),n=ft(r,t,!1);return Ke.makeFragment(n)},mathmlBuilder:function(e,t){var r=mn(e,t);return It(r,t)}});var cn=function(e,t,r,n,a,i,o){e=Ke.makeSpan([],[e]);var s,h,m,c=r&&l.isCharacterBox(r);if(t){var u=wt(t,n.havingStyle(a.sup()),n);h={elem:u,kern:Math.max(n.fontMetrics().bigOpSpacing1,n.fontMetrics().bigOpSpacing3-u.depth)}}if(r){var p=wt(r,n.havingStyle(a.sub()),n);s={elem:p,kern:Math.max(n.fontMetrics().bigOpSpacing2,n.fontMetrics().bigOpSpacing4-p.height)}}if(h&&s){var d=n.fontMetrics().bigOpSpacing5+s.elem.height+s.elem.depth+s.kern+e.depth+o;m=Ke.makeVList({positionType:"bottom",positionData:d,children:[{type:"kern",size:n.fontMetrics().bigOpSpacing5},{type:"elem",elem:s.elem,marginLeft:V(-i)},{type:"kern",size:s.kern},{type:"elem",elem:e},{type:"kern",size:h.kern},{type:"elem",elem:h.elem,marginLeft:V(i)},{type:"kern",size:n.fontMetrics().bigOpSpacing5}]},n)}else if(s){var f=e.height-o;m=Ke.makeVList({positionType:"top",positionData:f,children:[{type:"kern",size:n.fontMetrics().bigOpSpacing5},{type:"elem",elem:s.elem,marginLeft:V(-i)},{type:"kern",size:s.kern},{type:"elem",elem:e}]},n)}else{if(!h)return e;var g=e.depth+o;m=Ke.makeVList({positionType:"bottom",positionData:g,children:[{type:"elem",elem:e},{type:"kern",size:h.kern},{type:"elem",elem:h.elem,marginLeft:V(i)},{type:"kern",size:n.fontMetrics().bigOpSpacing5}]},n)}var v=[m];if(s&&0!==i&&!c){var b=Ke.makeSpan(["mspace"],[],n);b.style.marginRight=V(i),v.unshift(b)}return Ke.makeSpan(["mop","op-limits"],v,n)},un=["\\smallint"],pn=function(e,t){var r,n,a,i=!1;"supsub"===e.type?(r=e.sup,n=e.sub,a=Ut(e.base,"op"),i=!0):a=Ut(e,"op");var o,s=t.style,h=!1;if(s.size===x.DISPLAY.size&&a.symbol&&!l.contains(un,a.name)&&(h=!0),a.symbol){var m=h?"Size2-Regular":"Size1-Regular",c="";if("\\oiint"!==a.name&&"\\oiiint"!==a.name||(c=a.name.substr(1),a.name="oiint"===c?"\\iint":"\\iiint"),o=Ke.makeSymbol(a.name,m,"math",t,["mop","op-symbol",h?"large-op":"small-op"]),c.length>0){var u=o.italic,p=Ke.staticSvg(c+"Size"+(h?"2":"1"),t);o=Ke.makeVList({positionType:"individualShift",children:[{type:"elem",elem:o,shift:0},{type:"elem",elem:p,shift:h?.08:0}]},t),a.name="\\"+c,o.classes.unshift("mop"),o.italic=u}}else if(a.body){var d=ft(a.body,t,!0);1===d.length&&d[0]instanceof Z?(o=d[0]).classes[0]="mop":o=Ke.makeSpan(["mop"],d,t)}else{for(var f=[],g=1;g0){for(var s=a.body.map((function(e){var t=e.text;return"string"==typeof t?{type:"textord",mode:e.mode,text:t}:e})),l=ft(s,t.withFont("mathrm"),!0),h=0;h=0?s.setAttribute("height",V(a)):(s.setAttribute("height",V(a)),s.setAttribute("depth",V(-a))),s.setAttribute("voffset",V(a)),s}});var yn=["\\tiny","\\sixptsize","\\scriptsize","\\footnotesize","\\small","\\normalsize","\\large","\\Large","\\LARGE","\\huge","\\Huge"];ot({type:"sizing",names:yn,props:{numArgs:0,allowedInText:!0},handler:function(e,t){var r=e.breakOnTokenText,n=e.funcName,a=e.parser,i=a.parseExpression(!1,r);return{type:"sizing",mode:a.mode,size:yn.indexOf(n)+1,body:i}},htmlBuilder:function(e,t){var r=t.havingSize(e.size);return bn(e.body,r,t)},mathmlBuilder:function(e,t){var r=t.havingSize(e.size),n=Nt(e.body,r),a=new Tt.MathNode("mstyle",n);return a.setAttribute("mathsize",V(r.sizeMultiplier)),a}}),ot({type:"smash",names:["\\smash"],props:{numArgs:1,numOptionalArgs:1,allowedInText:!0},handler:function(e,t,r){var n=e.parser,a=!1,i=!1,o=r[0]&&Ut(r[0],"ordgroup");if(o)for(var s="",l=0;lr.height+r.depth+i&&(i=(i+c-r.height-r.depth)/2);var u=l.height-r.height-i-h;r.style.paddingLeft=V(m);var p=Ke.makeVList({positionType:"firstBaseline",children:[{type:"elem",elem:r,wrapperClasses:["svg-align"]},{type:"kern",size:-(r.height+u)},{type:"elem",elem:l},{type:"kern",size:h}]},t);if(e.index){var d=t.havingStyle(x.SCRIPTSCRIPT),f=wt(e.index,d,t),g=.6*(p.height-p.depth),v=Ke.makeVList({positionType:"shift",positionData:-g,children:[{type:"elem",elem:f}]},t),b=Ke.makeSpan(["root"],[v]);return Ke.makeSpan(["mord","sqrt"],[b,p],t)}return Ke.makeSpan(["mord","sqrt"],[p],t)},mathmlBuilder:function(e,t){var r=e.body,n=e.index;return n?new Tt.MathNode("mroot",[Rt(r,t),Rt(n,t)]):new Tt.MathNode("msqrt",[Rt(r,t)])}});var xn={display:x.DISPLAY,text:x.TEXT,script:x.SCRIPT,scriptscript:x.SCRIPTSCRIPT};ot({type:"styling",names:["\\displaystyle","\\textstyle","\\scriptstyle","\\scriptscriptstyle"],props:{numArgs:0,allowedInText:!0,primitive:!0},handler:function(e,t){var r=e.breakOnTokenText,n=e.funcName,a=e.parser,i=a.parseExpression(!0,r),o=n.slice(1,n.length-5);return{type:"styling",mode:a.mode,style:o,body:i}},htmlBuilder:function(e,t){var r=xn[e.style],n=t.havingStyle(r).withFont("");return bn(e.body,n,t)},mathmlBuilder:function(e,t){var r=xn[e.style],n=t.havingStyle(r),a=Nt(e.body,n),i=new Tt.MathNode("mstyle",a),o={display:["0","true"],text:["0","false"],script:["1","false"],scriptscript:["2","false"]}[e.style];return i.setAttribute("scriptlevel",o[0]),i.setAttribute("displaystyle",o[1]),i}});var wn=function(e,t){var r=e.base;return r?"op"===r.type?r.limits&&(t.style.size===x.DISPLAY.size||r.alwaysHandleSupSub)?pn:null:"operatorname"===r.type?r.alwaysHandleSupSub&&(t.style.size===x.DISPLAY.size||r.limits)?vn:null:"accent"===r.type?l.isCharacterBox(r.base)?Wt:null:"horizBrace"===r.type&&!e.sub===r.isOver?ln:null:null};st({type:"supsub",htmlBuilder:function(e,t){var r=wn(e,t);if(r)return r(e,t);var n,a,i,o=e.base,s=e.sup,h=e.sub,m=wt(o,t),c=t.fontMetrics(),u=0,p=0,d=o&&l.isCharacterBox(o);if(s){var f=t.havingStyle(t.style.sup());n=wt(s,f,t),d||(u=m.height-f.fontMetrics().supDrop*f.sizeMultiplier/t.sizeMultiplier)}if(h){var g=t.havingStyle(t.style.sub());a=wt(h,g,t),d||(p=m.depth+g.fontMetrics().subDrop*g.sizeMultiplier/t.sizeMultiplier)}i=t.style===x.DISPLAY?c.sup1:t.style.cramped?c.sup3:c.sup2;var v,b=t.sizeMultiplier,y=V(.5/c.ptPerEm/b),w=null;if(a){var k=e.base&&"op"===e.base.type&&e.base.name&&("\\oiint"===e.base.name||"\\oiiint"===e.base.name);(m instanceof Z||k)&&(w=V(-m.italic))}if(n&&a){u=Math.max(u,i,n.depth+.25*c.xHeight),p=Math.max(p,c.sub2);var S=4*c.defaultRuleThickness;if(u-n.depth-(a.height-p)0&&(u+=M,p-=M)}var z=[{type:"elem",elem:a,shift:p,marginRight:y,marginLeft:w},{type:"elem",elem:n,shift:-u,marginRight:y}];v=Ke.makeVList({positionType:"individualShift",children:z},t)}else if(a){p=Math.max(p,c.sub1,a.height-.8*c.xHeight);var A=[{type:"elem",elem:a,marginLeft:w,marginRight:y}];v=Ke.makeVList({positionType:"shift",positionData:p,children:A},t)}else{if(!n)throw new Error("supsub must have either sup or sub.");u=Math.max(u,i,n.depth+.25*c.xHeight),v=Ke.makeVList({positionType:"shift",positionData:-u,children:[{type:"elem",elem:n,marginRight:y}]},t)}var T=yt(m,"right")||"mord";return Ke.makeSpan([T],[m,Ke.makeSpan(["msupsub"],[v])],t)},mathmlBuilder:function(e,t){var r,n=!1;e.base&&"horizBrace"===e.base.type&&!!e.sup===e.base.isOver&&(n=!0,r=e.base.isOver),!e.base||"op"!==e.base.type&&"operatorname"!==e.base.type||(e.base.parentIsSupSub=!0);var a,i=[Rt(e.base,t)];if(e.sub&&i.push(Rt(e.sub,t)),e.sup&&i.push(Rt(e.sup,t)),n)a=r?"mover":"munder";else if(e.sub)if(e.sup){var o=e.base;a=o&&"op"===o.type&&o.limits&&t.style===x.DISPLAY||o&&"operatorname"===o.type&&o.alwaysHandleSupSub&&(t.style===x.DISPLAY||o.limits)?"munderover":"msubsup"}else{var s=e.base;a=s&&"op"===s.type&&s.limits&&(t.style===x.DISPLAY||s.alwaysHandleSupSub)||s&&"operatorname"===s.type&&s.alwaysHandleSupSub&&(s.limits||t.style===x.DISPLAY)?"munder":"msub"}else{var l=e.base;a=l&&"op"===l.type&&l.limits&&(t.style===x.DISPLAY||l.alwaysHandleSupSub)||l&&"operatorname"===l.type&&l.alwaysHandleSupSub&&(l.limits||t.style===x.DISPLAY)?"mover":"msup"}return new Tt.MathNode(a,i)}}),st({type:"atom",htmlBuilder:function(e,t){return Ke.mathsym(e.text,e.mode,t,["m"+e.family])},mathmlBuilder:function(e,t){var r=new Tt.MathNode("mo",[Bt(e.text,e.mode)]);if("bin"===e.family){var n=qt(e,t);"bold-italic"===n&&r.setAttribute("mathvariant",n)}else"punct"===e.family?r.setAttribute("separator","true"):"open"!==e.family&&"close"!==e.family||r.setAttribute("stretchy","false");return r}});var kn={mi:"italic",mn:"normal",mtext:"normal"};st({type:"mathord",htmlBuilder:function(e,t){return Ke.makeOrd(e,t,"mathord")},mathmlBuilder:function(e,t){var r=new Tt.MathNode("mi",[Bt(e.text,e.mode,t)]),n=qt(e,t)||"italic";return n!==kn[r.type]&&r.setAttribute("mathvariant",n),r}}),st({type:"textord",htmlBuilder:function(e,t){return Ke.makeOrd(e,t,"textord")},mathmlBuilder:function(e,t){var r,n=Bt(e.text,e.mode,t),a=qt(e,t)||"normal";return r="text"===e.mode?new Tt.MathNode("mtext",[n]):/[0-9]/.test(e.text)?new Tt.MathNode("mn",[n]):"\\prime"===e.text?new Tt.MathNode("mo",[n]):new Tt.MathNode("mi",[n]),a!==kn[r.type]&&r.setAttribute("mathvariant",a),r}});var Sn={"\\nobreak":"nobreak","\\allowbreak":"allowbreak"},Mn={" ":{},"\\ ":{},"~":{className:"nobreak"},"\\space":{},"\\nobreakspace":{className:"nobreak"}};st({type:"spacing",htmlBuilder:function(e,t){if(Mn.hasOwnProperty(e.text)){var r=Mn[e.text].className||"";if("text"===e.mode){var a=Ke.makeOrd(e,t,"textord");return a.classes.push(r),a}return Ke.makeSpan(["mspace",r],[Ke.mathsym(e.text,e.mode,t)],t)}if(Sn.hasOwnProperty(e.text))return Ke.makeSpan(["mspace",Sn[e.text]],[],t);throw new n('Unknown type of space "'+e.text+'"')},mathmlBuilder:function(e,t){if(!Mn.hasOwnProperty(e.text)){if(Sn.hasOwnProperty(e.text))return new Tt.MathNode("mspace");throw new n('Unknown type of space "'+e.text+'"')}return new Tt.MathNode("mtext",[new Tt.TextNode("\xa0")])}});var zn=function(){var e=new Tt.MathNode("mtd",[]);return e.setAttribute("width","50%"),e};st({type:"tag",mathmlBuilder:function(e,t){var r=new Tt.MathNode("mtable",[new Tt.MathNode("mtr",[zn(),new Tt.MathNode("mtd",[It(e.body,t)]),zn(),new Tt.MathNode("mtd",[It(e.tag,t)])])]);return r.setAttribute("width","100%"),r}});var An={"\\text":void 0,"\\textrm":"textrm","\\textsf":"textsf","\\texttt":"texttt","\\textnormal":"textrm"},Tn={"\\textbf":"textbf","\\textmd":"textmd"},Bn={"\\textit":"textit","\\textup":"textup"},Cn=function(e,t){var r=e.font;return r?An[r]?t.withTextFontFamily(An[r]):Tn[r]?t.withTextFontWeight(Tn[r]):t.withTextFontShape(Bn[r]):t};ot({type:"text",names:["\\text","\\textrm","\\textsf","\\texttt","\\textnormal","\\textbf","\\textmd","\\textit","\\textup"],props:{numArgs:1,argTypes:["text"],allowedInArgument:!0,allowedInText:!0},handler:function(e,t){var r=e.parser,n=e.funcName,a=t[0];return{type:"text",mode:r.mode,body:ht(a),font:n}},htmlBuilder:function(e,t){var r=Cn(e,t),n=ft(e.body,r,!0);return Ke.makeSpan(["mord","text"],n,r)},mathmlBuilder:function(e,t){var r=Cn(e,t);return It(e.body,r)}}),ot({type:"underline",names:["\\underline"],props:{numArgs:1,allowedInText:!0},handler:function(e,t){return{type:"underline",mode:e.parser.mode,body:t[0]}},htmlBuilder:function(e,t){var r=wt(e.body,t),n=Ke.makeLineSpan("underline-line",t),a=t.fontMetrics().defaultRuleThickness,i=Ke.makeVList({positionType:"top",positionData:r.height,children:[{type:"kern",size:a},{type:"elem",elem:n},{type:"kern",size:3*a},{type:"elem",elem:r}]},t);return Ke.makeSpan(["mord","underline"],[i],t)},mathmlBuilder:function(e,t){var r=new Tt.MathNode("mo",[new Tt.TextNode("\u203e")]);r.setAttribute("stretchy","true");var n=new Tt.MathNode("munder",[Rt(e.body,t),r]);return n.setAttribute("accentunder","true"),n}}),ot({type:"vcenter",names:["\\vcenter"],props:{numArgs:1,argTypes:["original"],allowedInText:!1},handler:function(e,t){return{type:"vcenter",mode:e.parser.mode,body:t[0]}},htmlBuilder:function(e,t){var r=wt(e.body,t),n=t.fontMetrics().axisHeight,a=.5*(r.height-n-(r.depth+n));return Ke.makeVList({positionType:"shift",positionData:a,children:[{type:"elem",elem:r}]},t)},mathmlBuilder:function(e,t){return new Tt.MathNode("mpadded",[Rt(e.body,t)],["vcenter"])}}),ot({type:"verb",names:["\\verb"],props:{numArgs:0,allowedInText:!0},handler:function(e,t,r){throw new n("\\verb ended by end of line instead of matching delimiter")},htmlBuilder:function(e,t){for(var r=qn(e),n=[],a=t.havingStyle(t.style.text()),i=0;i0;)this.endGroup()},t.has=function(e){return this.current.hasOwnProperty(e)||this.builtins.hasOwnProperty(e)},t.get=function(e){return this.current.hasOwnProperty(e)?this.current[e]:this.builtins[e]},t.set=function(e,t,r){if(void 0===r&&(r=!1),r){for(var n=0;n0&&(this.undefStack[this.undefStack.length-1][e]=t)}else{var a=this.undefStack[this.undefStack.length-1];a&&!a.hasOwnProperty(e)&&(a[e]=this.current[e])}null==t?delete this.current[e]:this.current[e]=t},e}(),Hn=Hr;Er("\\noexpand",(function(e){var t=e.popToken();return e.isExpandable(t.text)&&(t.noexpand=!0,t.treatAsRelax=!0),{tokens:[t],numArgs:0}})),Er("\\expandafter",(function(e){var t=e.popToken();return e.expandOnce(!0),{tokens:[t],numArgs:0}})),Er("\\@firstoftwo",(function(e){return{tokens:e.consumeArgs(2)[0],numArgs:0}})),Er("\\@secondoftwo",(function(e){return{tokens:e.consumeArgs(2)[1],numArgs:0}})),Er("\\@ifnextchar",(function(e){var t=e.consumeArgs(3);e.consumeSpaces();var r=e.future();return 1===t[0].length&&t[0][0].text===r.text?{tokens:t[1],numArgs:0}:{tokens:t[2],numArgs:0}})),Er("\\@ifstar","\\@ifnextchar *{\\@firstoftwo{#1}}"),Er("\\TextOrMath",(function(e){var t=e.consumeArgs(2);return"text"===e.mode?{tokens:t[0],numArgs:0}:{tokens:t[1],numArgs:0}}));var En={0:0,1:1,2:2,3:3,4:4,5:5,6:6,7:7,8:8,9:9,a:10,A:10,b:11,B:11,c:12,C:12,d:13,D:13,e:14,E:14,f:15,F:15};Er("\\char",(function(e){var t,r=e.popToken(),a="";if("'"===r.text)t=8,r=e.popToken();else if('"'===r.text)t=16,r=e.popToken();else if("`"===r.text)if("\\"===(r=e.popToken()).text[0])a=r.text.charCodeAt(1);else{if("EOF"===r.text)throw new n("\\char` missing argument");a=r.text.charCodeAt(0)}else t=10;if(t){if(null==(a=En[r.text])||a>=t)throw new n("Invalid base-"+t+" digit "+r.text);for(var i;null!=(i=En[e.future().text])&&i":"\\dotsb","-":"\\dotsb","*":"\\dotsb",":":"\\dotsb","\\DOTSB":"\\dotsb","\\coprod":"\\dotsb","\\bigvee":"\\dotsb","\\bigwedge":"\\dotsb","\\biguplus":"\\dotsb","\\bigcap":"\\dotsb","\\bigcup":"\\dotsb","\\prod":"\\dotsb","\\sum":"\\dotsb","\\bigotimes":"\\dotsb","\\bigoplus":"\\dotsb","\\bigodot":"\\dotsb","\\bigsqcup":"\\dotsb","\\And":"\\dotsb","\\longrightarrow":"\\dotsb","\\Longrightarrow":"\\dotsb","\\longleftarrow":"\\dotsb","\\Longleftarrow":"\\dotsb","\\longleftrightarrow":"\\dotsb","\\Longleftrightarrow":"\\dotsb","\\mapsto":"\\dotsb","\\longmapsto":"\\dotsb","\\hookrightarrow":"\\dotsb","\\doteq":"\\dotsb","\\mathbin":"\\dotsb","\\mathrel":"\\dotsb","\\relbar":"\\dotsb","\\Relbar":"\\dotsb","\\xrightarrow":"\\dotsb","\\xleftarrow":"\\dotsb","\\DOTSI":"\\dotsi","\\int":"\\dotsi","\\oint":"\\dotsi","\\iint":"\\dotsi","\\iiint":"\\dotsi","\\iiiint":"\\dotsi","\\idotsint":"\\dotsi","\\DOTSX":"\\dotsx"};Er("\\dots",(function(e){var t="\\dotso",r=e.expandAfterFuture().text;return r in Dn?t=Dn[r]:("\\not"===r.substr(0,4)||r in ae.math&&l.contains(["bin","rel"],ae.math[r].group))&&(t="\\dotsb"),t}));var Pn={")":!0,"]":!0,"\\rbrack":!0,"\\}":!0,"\\rbrace":!0,"\\rangle":!0,"\\rceil":!0,"\\rfloor":!0,"\\rgroup":!0,"\\rmoustache":!0,"\\right":!0,"\\bigr":!0,"\\biggr":!0,"\\Bigr":!0,"\\Biggr":!0,$:!0,";":!0,".":!0,",":!0};Er("\\dotso",(function(e){return e.future().text in Pn?"\\ldots\\,":"\\ldots"})),Er("\\dotsc",(function(e){var t=e.future().text;return t in Pn&&","!==t?"\\ldots\\,":"\\ldots"})),Er("\\cdots",(function(e){return e.future().text in Pn?"\\@cdots\\,":"\\@cdots"})),Er("\\dotsb","\\cdots"),Er("\\dotsm","\\cdots"),Er("\\dotsi","\\!\\cdots"),Er("\\dotsx","\\ldots\\,"),Er("\\DOTSI","\\relax"),Er("\\DOTSB","\\relax"),Er("\\DOTSX","\\relax"),Er("\\tmspace","\\TextOrMath{\\kern#1#3}{\\mskip#1#2}\\relax"),Er("\\,","\\tmspace+{3mu}{.1667em}"),Er("\\thinspace","\\,"),Er("\\>","\\mskip{4mu}"),Er("\\:","\\tmspace+{4mu}{.2222em}"),Er("\\medspace","\\:"),Er("\\;","\\tmspace+{5mu}{.2777em}"),Er("\\thickspace","\\;"),Er("\\!","\\tmspace-{3mu}{.1667em}"),Er("\\negthinspace","\\!"),Er("\\negmedspace","\\tmspace-{4mu}{.2222em}"),Er("\\negthickspace","\\tmspace-{5mu}{.277em}"),Er("\\enspace","\\kern.5em "),Er("\\enskip","\\hskip.5em\\relax"),Er("\\quad","\\hskip1em\\relax"),Er("\\qquad","\\hskip2em\\relax"),Er("\\tag","\\@ifstar\\tag@literal\\tag@paren"),Er("\\tag@paren","\\tag@literal{({#1})}"),Er("\\tag@literal",(function(e){if(e.macros.get("\\df@tag"))throw new n("Multiple \\tag");return"\\gdef\\df@tag{\\text{#1}}"})),Er("\\bmod","\\mathchoice{\\mskip1mu}{\\mskip1mu}{\\mskip5mu}{\\mskip5mu}\\mathbin{\\rm mod}\\mathchoice{\\mskip1mu}{\\mskip1mu}{\\mskip5mu}{\\mskip5mu}"),Er("\\pod","\\allowbreak\\mathchoice{\\mkern18mu}{\\mkern8mu}{\\mkern8mu}{\\mkern8mu}(#1)"),Er("\\pmod","\\pod{{\\rm mod}\\mkern6mu#1}"),Er("\\mod","\\allowbreak\\mathchoice{\\mkern18mu}{\\mkern12mu}{\\mkern12mu}{\\mkern12mu}{\\rm mod}\\,\\,#1"),Er("\\pmb","\\html@mathml{\\@binrel{#1}{\\mathrlap{#1}\\kern0.5px#1}}{\\mathbf{#1}}"),Er("\\newline","\\\\\\relax"),Er("\\TeX","\\textrm{\\html@mathml{T\\kern-.1667em\\raisebox{-.5ex}{E}\\kern-.125emX}{TeX}}");var Fn=V(T["Main-Regular"]["T".charCodeAt(0)][1]-.7*T["Main-Regular"]["A".charCodeAt(0)][1]);Er("\\LaTeX","\\textrm{\\html@mathml{L\\kern-.36em\\raisebox{"+Fn+"}{\\scriptstyle A}\\kern-.15em\\TeX}{LaTeX}}"),Er("\\KaTeX","\\textrm{\\html@mathml{K\\kern-.17em\\raisebox{"+Fn+"}{\\scriptstyle A}\\kern-.15em\\TeX}{KaTeX}}"),Er("\\hspace","\\@ifstar\\@hspacer\\@hspace"),Er("\\@hspace","\\hskip #1\\relax"),Er("\\@hspacer","\\rule{0pt}{0pt}\\hskip #1\\relax"),Er("\\ordinarycolon",":"),Er("\\vcentcolon","\\mathrel{\\mathop\\ordinarycolon}"),Er("\\dblcolon",'\\html@mathml{\\mathrel{\\vcentcolon\\mathrel{\\mkern-.9mu}\\vcentcolon}}{\\mathop{\\char"2237}}'),Er("\\coloneqq",'\\html@mathml{\\mathrel{\\vcentcolon\\mathrel{\\mkern-1.2mu}=}}{\\mathop{\\char"2254}}'),Er("\\Coloneqq",'\\html@mathml{\\mathrel{\\dblcolon\\mathrel{\\mkern-1.2mu}=}}{\\mathop{\\char"2237\\char"3d}}'),Er("\\coloneq",'\\html@mathml{\\mathrel{\\vcentcolon\\mathrel{\\mkern-1.2mu}\\mathrel{-}}}{\\mathop{\\char"3a\\char"2212}}'),Er("\\Coloneq",'\\html@mathml{\\mathrel{\\dblcolon\\mathrel{\\mkern-1.2mu}\\mathrel{-}}}{\\mathop{\\char"2237\\char"2212}}'),Er("\\eqqcolon",'\\html@mathml{\\mathrel{=\\mathrel{\\mkern-1.2mu}\\vcentcolon}}{\\mathop{\\char"2255}}'),Er("\\Eqqcolon",'\\html@mathml{\\mathrel{=\\mathrel{\\mkern-1.2mu}\\dblcolon}}{\\mathop{\\char"3d\\char"2237}}'),Er("\\eqcolon",'\\html@mathml{\\mathrel{\\mathrel{-}\\mathrel{\\mkern-1.2mu}\\vcentcolon}}{\\mathop{\\char"2239}}'),Er("\\Eqcolon",'\\html@mathml{\\mathrel{\\mathrel{-}\\mathrel{\\mkern-1.2mu}\\dblcolon}}{\\mathop{\\char"2212\\char"2237}}'),Er("\\colonapprox",'\\html@mathml{\\mathrel{\\vcentcolon\\mathrel{\\mkern-1.2mu}\\approx}}{\\mathop{\\char"3a\\char"2248}}'),Er("\\Colonapprox",'\\html@mathml{\\mathrel{\\dblcolon\\mathrel{\\mkern-1.2mu}\\approx}}{\\mathop{\\char"2237\\char"2248}}'),Er("\\colonsim",'\\html@mathml{\\mathrel{\\vcentcolon\\mathrel{\\mkern-1.2mu}\\sim}}{\\mathop{\\char"3a\\char"223c}}'),Er("\\Colonsim",'\\html@mathml{\\mathrel{\\dblcolon\\mathrel{\\mkern-1.2mu}\\sim}}{\\mathop{\\char"2237\\char"223c}}'),Er("\u2237","\\dblcolon"),Er("\u2239","\\eqcolon"),Er("\u2254","\\coloneqq"),Er("\u2255","\\eqqcolon"),Er("\u2a74","\\Coloneqq"),Er("\\ratio","\\vcentcolon"),Er("\\coloncolon","\\dblcolon"),Er("\\colonequals","\\coloneqq"),Er("\\coloncolonequals","\\Coloneqq"),Er("\\equalscolon","\\eqqcolon"),Er("\\equalscoloncolon","\\Eqqcolon"),Er("\\colonminus","\\coloneq"),Er("\\coloncolonminus","\\Coloneq"),Er("\\minuscolon","\\eqcolon"),Er("\\minuscoloncolon","\\Eqcolon"),Er("\\coloncolonapprox","\\Colonapprox"),Er("\\coloncolonsim","\\Colonsim"),Er("\\simcolon","\\mathrel{\\sim\\mathrel{\\mkern-1.2mu}\\vcentcolon}"),Er("\\simcoloncolon","\\mathrel{\\sim\\mathrel{\\mkern-1.2mu}\\dblcolon}"),Er("\\approxcolon","\\mathrel{\\approx\\mathrel{\\mkern-1.2mu}\\vcentcolon}"),Er("\\approxcoloncolon","\\mathrel{\\approx\\mathrel{\\mkern-1.2mu}\\dblcolon}"),Er("\\notni","\\html@mathml{\\not\\ni}{\\mathrel{\\char`\u220c}}"),Er("\\limsup","\\DOTSB\\operatorname*{lim\\,sup}"),Er("\\liminf","\\DOTSB\\operatorname*{lim\\,inf}"),Er("\\injlim","\\DOTSB\\operatorname*{inj\\,lim}"),Er("\\projlim","\\DOTSB\\operatorname*{proj\\,lim}"),Er("\\varlimsup","\\DOTSB\\operatorname*{\\overline{lim}}"),Er("\\varliminf","\\DOTSB\\operatorname*{\\underline{lim}}"),Er("\\varinjlim","\\DOTSB\\operatorname*{\\underrightarrow{lim}}"),Er("\\varprojlim","\\DOTSB\\operatorname*{\\underleftarrow{lim}}"),Er("\\gvertneqq","\\html@mathml{\\@gvertneqq}{\u2269}"),Er("\\lvertneqq","\\html@mathml{\\@lvertneqq}{\u2268}"),Er("\\ngeqq","\\html@mathml{\\@ngeqq}{\u2271}"),Er("\\ngeqslant","\\html@mathml{\\@ngeqslant}{\u2271}"),Er("\\nleqq","\\html@mathml{\\@nleqq}{\u2270}"),Er("\\nleqslant","\\html@mathml{\\@nleqslant}{\u2270}"),Er("\\nshortmid","\\html@mathml{\\@nshortmid}{\u2224}"),Er("\\nshortparallel","\\html@mathml{\\@nshortparallel}{\u2226}"),Er("\\nsubseteqq","\\html@mathml{\\@nsubseteqq}{\u2288}"),Er("\\nsupseteqq","\\html@mathml{\\@nsupseteqq}{\u2289}"),Er("\\varsubsetneq","\\html@mathml{\\@varsubsetneq}{\u228a}"),Er("\\varsubsetneqq","\\html@mathml{\\@varsubsetneqq}{\u2acb}"),Er("\\varsupsetneq","\\html@mathml{\\@varsupsetneq}{\u228b}"),Er("\\varsupsetneqq","\\html@mathml{\\@varsupsetneqq}{\u2acc}"),Er("\\imath","\\html@mathml{\\@imath}{\u0131}"),Er("\\jmath","\\html@mathml{\\@jmath}{\u0237}"),Er("\\llbracket","\\html@mathml{\\mathopen{[\\mkern-3.2mu[}}{\\mathopen{\\char`\u27e6}}"),Er("\\rrbracket","\\html@mathml{\\mathclose{]\\mkern-3.2mu]}}{\\mathclose{\\char`\u27e7}}"),Er("\u27e6","\\llbracket"),Er("\u27e7","\\rrbracket"),Er("\\lBrace","\\html@mathml{\\mathopen{\\{\\mkern-3.2mu[}}{\\mathopen{\\char`\u2983}}"),Er("\\rBrace","\\html@mathml{\\mathclose{]\\mkern-3.2mu\\}}}{\\mathclose{\\char`\u2984}}"),Er("\u2983","\\lBrace"),Er("\u2984","\\rBrace"),Er("\\minuso","\\mathbin{\\html@mathml{{\\mathrlap{\\mathchoice{\\kern{0.145em}}{\\kern{0.145em}}{\\kern{0.1015em}}{\\kern{0.0725em}}\\circ}{-}}}{\\char`\u29b5}}"),Er("\u29b5","\\minuso"),Er("\\darr","\\downarrow"),Er("\\dArr","\\Downarrow"),Er("\\Darr","\\Downarrow"),Er("\\lang","\\langle"),Er("\\rang","\\rangle"),Er("\\uarr","\\uparrow"),Er("\\uArr","\\Uparrow"),Er("\\Uarr","\\Uparrow"),Er("\\N","\\mathbb{N}"),Er("\\R","\\mathbb{R}"),Er("\\Z","\\mathbb{Z}"),Er("\\alef","\\aleph"),Er("\\alefsym","\\aleph"),Er("\\Alpha","\\mathrm{A}"),Er("\\Beta","\\mathrm{B}"),Er("\\bull","\\bullet"),Er("\\Chi","\\mathrm{X}"),Er("\\clubs","\\clubsuit"),Er("\\cnums","\\mathbb{C}"),Er("\\Complex","\\mathbb{C}"),Er("\\Dagger","\\ddagger"),Er("\\diamonds","\\diamondsuit"),Er("\\empty","\\emptyset"),Er("\\Epsilon","\\mathrm{E}"),Er("\\Eta","\\mathrm{H}"),Er("\\exist","\\exists"),Er("\\harr","\\leftrightarrow"),Er("\\hArr","\\Leftrightarrow"),Er("\\Harr","\\Leftrightarrow"),Er("\\hearts","\\heartsuit"),Er("\\image","\\Im"),Er("\\infin","\\infty"),Er("\\Iota","\\mathrm{I}"),Er("\\isin","\\in"),Er("\\Kappa","\\mathrm{K}"),Er("\\larr","\\leftarrow"),Er("\\lArr","\\Leftarrow"),Er("\\Larr","\\Leftarrow"),Er("\\lrarr","\\leftrightarrow"),Er("\\lrArr","\\Leftrightarrow"),Er("\\Lrarr","\\Leftrightarrow"),Er("\\Mu","\\mathrm{M}"),Er("\\natnums","\\mathbb{N}"),Er("\\Nu","\\mathrm{N}"),Er("\\Omicron","\\mathrm{O}"),Er("\\plusmn","\\pm"),Er("\\rarr","\\rightarrow"),Er("\\rArr","\\Rightarrow"),Er("\\Rarr","\\Rightarrow"),Er("\\real","\\Re"),Er("\\reals","\\mathbb{R}"),Er("\\Reals","\\mathbb{R}"),Er("\\Rho","\\mathrm{P}"),Er("\\sdot","\\cdot"),Er("\\sect","\\S"),Er("\\spades","\\spadesuit"),Er("\\sub","\\subset"),Er("\\sube","\\subseteq"),Er("\\supe","\\supseteq"),Er("\\Tau","\\mathrm{T}"),Er("\\thetasym","\\vartheta"),Er("\\weierp","\\wp"),Er("\\Zeta","\\mathrm{Z}"),Er("\\argmin","\\DOTSB\\operatorname*{arg\\,min}"),Er("\\argmax","\\DOTSB\\operatorname*{arg\\,max}"),Er("\\plim","\\DOTSB\\mathop{\\operatorname{plim}}\\limits"),Er("\\bra","\\mathinner{\\langle{#1}|}"),Er("\\ket","\\mathinner{|{#1}\\rangle}"),Er("\\braket","\\mathinner{\\langle{#1}\\rangle}"),Er("\\Bra","\\left\\langle#1\\right|"),Er("\\Ket","\\left|#1\\right\\rangle"),Er("\\angln","{\\angl n}"),Er("\\blue","\\textcolor{##6495ed}{#1}"),Er("\\orange","\\textcolor{##ffa500}{#1}"),Er("\\pink","\\textcolor{##ff00af}{#1}"),Er("\\red","\\textcolor{##df0030}{#1}"),Er("\\green","\\textcolor{##28ae7b}{#1}"),Er("\\gray","\\textcolor{gray}{#1}"),Er("\\purple","\\textcolor{##9d38bd}{#1}"),Er("\\blueA","\\textcolor{##ccfaff}{#1}"),Er("\\blueB","\\textcolor{##80f6ff}{#1}"),Er("\\blueC","\\textcolor{##63d9ea}{#1}"),Er("\\blueD","\\textcolor{##11accd}{#1}"),Er("\\blueE","\\textcolor{##0c7f99}{#1}"),Er("\\tealA","\\textcolor{##94fff5}{#1}"),Er("\\tealB","\\textcolor{##26edd5}{#1}"),Er("\\tealC","\\textcolor{##01d1c1}{#1}"),Er("\\tealD","\\textcolor{##01a995}{#1}"),Er("\\tealE","\\textcolor{##208170}{#1}"),Er("\\greenA","\\textcolor{##b6ffb0}{#1}"),Er("\\greenB","\\textcolor{##8af281}{#1}"),Er("\\greenC","\\textcolor{##74cf70}{#1}"),Er("\\greenD","\\textcolor{##1fab54}{#1}"),Er("\\greenE","\\textcolor{##0d923f}{#1}"),Er("\\goldA","\\textcolor{##ffd0a9}{#1}"),Er("\\goldB","\\textcolor{##ffbb71}{#1}"),Er("\\goldC","\\textcolor{##ff9c39}{#1}"),Er("\\goldD","\\textcolor{##e07d10}{#1}"),Er("\\goldE","\\textcolor{##a75a05}{#1}"),Er("\\redA","\\textcolor{##fca9a9}{#1}"),Er("\\redB","\\textcolor{##ff8482}{#1}"),Er("\\redC","\\textcolor{##f9685d}{#1}"),Er("\\redD","\\textcolor{##e84d39}{#1}"),Er("\\redE","\\textcolor{##bc2612}{#1}"),Er("\\maroonA","\\textcolor{##ffbde0}{#1}"),Er("\\maroonB","\\textcolor{##ff92c6}{#1}"),Er("\\maroonC","\\textcolor{##ed5fa6}{#1}"),Er("\\maroonD","\\textcolor{##ca337c}{#1}"),Er("\\maroonE","\\textcolor{##9e034e}{#1}"),Er("\\purpleA","\\textcolor{##ddd7ff}{#1}"),Er("\\purpleB","\\textcolor{##c6b9fc}{#1}"),Er("\\purpleC","\\textcolor{##aa87ff}{#1}"),Er("\\purpleD","\\textcolor{##7854ab}{#1}"),Er("\\purpleE","\\textcolor{##543b78}{#1}"),Er("\\mintA","\\textcolor{##f5f9e8}{#1}"),Er("\\mintB","\\textcolor{##edf2df}{#1}"),Er("\\mintC","\\textcolor{##e0e5cc}{#1}"),Er("\\grayA","\\textcolor{##f6f7f7}{#1}"),Er("\\grayB","\\textcolor{##f0f1f2}{#1}"),Er("\\grayC","\\textcolor{##e3e5e6}{#1}"),Er("\\grayD","\\textcolor{##d6d8da}{#1}"),Er("\\grayE","\\textcolor{##babec2}{#1}"),Er("\\grayF","\\textcolor{##888d93}{#1}"),Er("\\grayG","\\textcolor{##626569}{#1}"),Er("\\grayH","\\textcolor{##3b3e40}{#1}"),Er("\\grayI","\\textcolor{##21242c}{#1}"),Er("\\kaBlue","\\textcolor{##314453}{#1}"),Er("\\kaGreen","\\textcolor{##71B307}{#1}");var Vn={"^":!0,_:!0,"\\limits":!0,"\\nolimits":!0},Gn=function(){function e(e,t,r){this.settings=void 0,this.expansionCount=void 0,this.lexer=void 0,this.macros=void 0,this.stack=void 0,this.mode=void 0,this.settings=t,this.expansionCount=0,this.feed(e),this.macros=new On(Hn,t.macros),this.mode=r,this.stack=[]}var t=e.prototype;return t.feed=function(e){this.lexer=new Rn(e,this.settings)},t.switchMode=function(e){this.mode=e},t.beginGroup=function(){this.macros.beginGroup()},t.endGroup=function(){this.macros.endGroup()},t.endGroups=function(){this.macros.endGroups()},t.future=function(){return 0===this.stack.length&&this.pushToken(this.lexer.lex()),this.stack[this.stack.length-1]},t.popToken=function(){return this.future(),this.stack.pop()},t.pushToken=function(e){this.stack.push(e)},t.pushTokens=function(e){var t;(t=this.stack).push.apply(t,e)},t.scanArgument=function(e){var t,r,n;if(e){if(this.consumeSpaces(),"["!==this.future().text)return null;t=this.popToken();var a=this.consumeArg(["]"]);n=a.tokens,r=a.end}else{var i=this.consumeArg();n=i.tokens,t=i.start,r=i.end}return this.pushToken(new Dr("EOF",r.loc)),this.pushTokens(n),t.range(r,"")},t.consumeSpaces=function(){for(;;){if(" "!==this.future().text)break;this.stack.pop()}},t.consumeArg=function(e){var t=[],r=e&&e.length>0;r||this.consumeSpaces();var a,i=this.future(),o=0,s=0;do{if(a=this.popToken(),t.push(a),"{"===a.text)++o;else if("}"===a.text){if(-1===--o)throw new n("Extra }",a)}else if("EOF"===a.text)throw new n("Unexpected end of input in a macro argument, expected '"+(e&&r?e[s]:"}")+"'",a);if(e&&r)if((0===o||1===o&&"{"===e[s])&&a.text===e[s]){if(++s===e.length){t.splice(-s,s);break}}else s=0}while(0!==o||r);return"{"===i.text&&"}"===t[t.length-1].text&&(t.pop(),t.shift()),t.reverse(),{tokens:t,start:i,end:a}},t.consumeArgs=function(e,t){if(t){if(t.length!==e+1)throw new n("The length of delimiters doesn't match the number of args!");for(var r=t[0],a=0;athis.settings.maxExpand)throw new n("Too many expansions: infinite loop or need to increase maxExpand setting");var i=a.tokens,o=this.consumeArgs(a.numArgs,a.delimiters);if(a.numArgs)for(var s=(i=i.slice()).length-1;s>=0;--s){var l=i[s];if("#"===l.text){if(0===s)throw new n("Incomplete placeholder at end of macro body",l);if("#"===(l=i[--s]).text)i.splice(s+1,1);else{if(!/^[1-9]$/.test(l.text))throw new n("Not a valid argument number",l);var h;(h=i).splice.apply(h,[s,2].concat(o[+l.text-1]))}}}return this.pushTokens(i),i},t.expandAfterFuture=function(){return this.expandOnce(),this.future()},t.expandNextToken=function(){for(;;){var e=this.expandOnce();if(e instanceof Dr)return e.treatAsRelax&&(e.text="\\relax"),this.stack.pop()}throw new Error},t.expandMacro=function(e){return this.macros.has(e)?this.expandTokens([new Dr(e)]):void 0},t.expandTokens=function(e){var t=[],r=this.stack.length;for(this.pushTokens(e);this.stack.length>r;){var n=this.expandOnce(!0);n instanceof Dr&&(n.treatAsRelax&&(n.noexpand=!1,n.treatAsRelax=!1),t.push(this.stack.pop()))}return t},t.expandMacroAsText=function(e){var t=this.expandMacro(e);return t?t.map((function(e){return e.text})).join(""):t},t._getExpansion=function(e){var t=this.macros.get(e);if(null==t)return t;if(1===e.length){var r=this.lexer.catcodes[e];if(null!=r&&13!==r)return}var n="function"==typeof t?t(this):t;if("string"==typeof n){var a=0;if(-1!==n.indexOf("#"))for(var i=n.replace(/##/g,"");-1!==i.indexOf("#"+(a+1));)++a;for(var o=new Rn(n,this.settings),s=[],l=o.lex();"EOF"!==l.text;)s.push(l),l=o.lex();return s.reverse(),{tokens:s,numArgs:a}}return n},t.isDefined=function(e){return this.macros.has(e)||Nn.hasOwnProperty(e)||ae.math.hasOwnProperty(e)||ae.text.hasOwnProperty(e)||Vn.hasOwnProperty(e)},t.isExpandable=function(e){var t=this.macros.get(e);return null!=t?"string"==typeof t||"function"==typeof t||!t.unexpandable:Nn.hasOwnProperty(e)&&!Nn[e].primitive},e}(),Un={"\u0301":{text:"\\'",math:"\\acute"},"\u0300":{text:"\\`",math:"\\grave"},"\u0308":{text:'\\"',math:"\\ddot"},"\u0303":{text:"\\~",math:"\\tilde"},"\u0304":{text:"\\=",math:"\\bar"},"\u0306":{text:"\\u",math:"\\breve"},"\u030c":{text:"\\v",math:"\\check"},"\u0302":{text:"\\^",math:"\\hat"},"\u0307":{text:"\\.",math:"\\dot"},"\u030a":{text:"\\r",math:"\\mathring"},"\u030b":{text:"\\H"},"\u0327":{text:"\\c"}},Yn={"\xe1":"a\u0301","\xe0":"a\u0300","\xe4":"a\u0308","\u01df":"a\u0308\u0304","\xe3":"a\u0303","\u0101":"a\u0304","\u0103":"a\u0306","\u1eaf":"a\u0306\u0301","\u1eb1":"a\u0306\u0300","\u1eb5":"a\u0306\u0303","\u01ce":"a\u030c","\xe2":"a\u0302","\u1ea5":"a\u0302\u0301","\u1ea7":"a\u0302\u0300","\u1eab":"a\u0302\u0303","\u0227":"a\u0307","\u01e1":"a\u0307\u0304","\xe5":"a\u030a","\u01fb":"a\u030a\u0301","\u1e03":"b\u0307","\u0107":"c\u0301","\u1e09":"c\u0327\u0301","\u010d":"c\u030c","\u0109":"c\u0302","\u010b":"c\u0307","\xe7":"c\u0327","\u010f":"d\u030c","\u1e0b":"d\u0307","\u1e11":"d\u0327","\xe9":"e\u0301","\xe8":"e\u0300","\xeb":"e\u0308","\u1ebd":"e\u0303","\u0113":"e\u0304","\u1e17":"e\u0304\u0301","\u1e15":"e\u0304\u0300","\u0115":"e\u0306","\u1e1d":"e\u0327\u0306","\u011b":"e\u030c","\xea":"e\u0302","\u1ebf":"e\u0302\u0301","\u1ec1":"e\u0302\u0300","\u1ec5":"e\u0302\u0303","\u0117":"e\u0307","\u0229":"e\u0327","\u1e1f":"f\u0307","\u01f5":"g\u0301","\u1e21":"g\u0304","\u011f":"g\u0306","\u01e7":"g\u030c","\u011d":"g\u0302","\u0121":"g\u0307","\u0123":"g\u0327","\u1e27":"h\u0308","\u021f":"h\u030c","\u0125":"h\u0302","\u1e23":"h\u0307","\u1e29":"h\u0327","\xed":"i\u0301","\xec":"i\u0300","\xef":"i\u0308","\u1e2f":"i\u0308\u0301","\u0129":"i\u0303","\u012b":"i\u0304","\u012d":"i\u0306","\u01d0":"i\u030c","\xee":"i\u0302","\u01f0":"j\u030c","\u0135":"j\u0302","\u1e31":"k\u0301","\u01e9":"k\u030c","\u0137":"k\u0327","\u013a":"l\u0301","\u013e":"l\u030c","\u013c":"l\u0327","\u1e3f":"m\u0301","\u1e41":"m\u0307","\u0144":"n\u0301","\u01f9":"n\u0300","\xf1":"n\u0303","\u0148":"n\u030c","\u1e45":"n\u0307","\u0146":"n\u0327","\xf3":"o\u0301","\xf2":"o\u0300","\xf6":"o\u0308","\u022b":"o\u0308\u0304","\xf5":"o\u0303","\u1e4d":"o\u0303\u0301","\u1e4f":"o\u0303\u0308","\u022d":"o\u0303\u0304","\u014d":"o\u0304","\u1e53":"o\u0304\u0301","\u1e51":"o\u0304\u0300","\u014f":"o\u0306","\u01d2":"o\u030c","\xf4":"o\u0302","\u1ed1":"o\u0302\u0301","\u1ed3":"o\u0302\u0300","\u1ed7":"o\u0302\u0303","\u022f":"o\u0307","\u0231":"o\u0307\u0304","\u0151":"o\u030b","\u1e55":"p\u0301","\u1e57":"p\u0307","\u0155":"r\u0301","\u0159":"r\u030c","\u1e59":"r\u0307","\u0157":"r\u0327","\u015b":"s\u0301","\u1e65":"s\u0301\u0307","\u0161":"s\u030c","\u1e67":"s\u030c\u0307","\u015d":"s\u0302","\u1e61":"s\u0307","\u015f":"s\u0327","\u1e97":"t\u0308","\u0165":"t\u030c","\u1e6b":"t\u0307","\u0163":"t\u0327","\xfa":"u\u0301","\xf9":"u\u0300","\xfc":"u\u0308","\u01d8":"u\u0308\u0301","\u01dc":"u\u0308\u0300","\u01d6":"u\u0308\u0304","\u01da":"u\u0308\u030c","\u0169":"u\u0303","\u1e79":"u\u0303\u0301","\u016b":"u\u0304","\u1e7b":"u\u0304\u0308","\u016d":"u\u0306","\u01d4":"u\u030c","\xfb":"u\u0302","\u016f":"u\u030a","\u0171":"u\u030b","\u1e7d":"v\u0303","\u1e83":"w\u0301","\u1e81":"w\u0300","\u1e85":"w\u0308","\u0175":"w\u0302","\u1e87":"w\u0307","\u1e98":"w\u030a","\u1e8d":"x\u0308","\u1e8b":"x\u0307","\xfd":"y\u0301","\u1ef3":"y\u0300","\xff":"y\u0308","\u1ef9":"y\u0303","\u0233":"y\u0304","\u0177":"y\u0302","\u1e8f":"y\u0307","\u1e99":"y\u030a","\u017a":"z\u0301","\u017e":"z\u030c","\u1e91":"z\u0302","\u017c":"z\u0307","\xc1":"A\u0301","\xc0":"A\u0300","\xc4":"A\u0308","\u01de":"A\u0308\u0304","\xc3":"A\u0303","\u0100":"A\u0304","\u0102":"A\u0306","\u1eae":"A\u0306\u0301","\u1eb0":"A\u0306\u0300","\u1eb4":"A\u0306\u0303","\u01cd":"A\u030c","\xc2":"A\u0302","\u1ea4":"A\u0302\u0301","\u1ea6":"A\u0302\u0300","\u1eaa":"A\u0302\u0303","\u0226":"A\u0307","\u01e0":"A\u0307\u0304","\xc5":"A\u030a","\u01fa":"A\u030a\u0301","\u1e02":"B\u0307","\u0106":"C\u0301","\u1e08":"C\u0327\u0301","\u010c":"C\u030c","\u0108":"C\u0302","\u010a":"C\u0307","\xc7":"C\u0327","\u010e":"D\u030c","\u1e0a":"D\u0307","\u1e10":"D\u0327","\xc9":"E\u0301","\xc8":"E\u0300","\xcb":"E\u0308","\u1ebc":"E\u0303","\u0112":"E\u0304","\u1e16":"E\u0304\u0301","\u1e14":"E\u0304\u0300","\u0114":"E\u0306","\u1e1c":"E\u0327\u0306","\u011a":"E\u030c","\xca":"E\u0302","\u1ebe":"E\u0302\u0301","\u1ec0":"E\u0302\u0300","\u1ec4":"E\u0302\u0303","\u0116":"E\u0307","\u0228":"E\u0327","\u1e1e":"F\u0307","\u01f4":"G\u0301","\u1e20":"G\u0304","\u011e":"G\u0306","\u01e6":"G\u030c","\u011c":"G\u0302","\u0120":"G\u0307","\u0122":"G\u0327","\u1e26":"H\u0308","\u021e":"H\u030c","\u0124":"H\u0302","\u1e22":"H\u0307","\u1e28":"H\u0327","\xcd":"I\u0301","\xcc":"I\u0300","\xcf":"I\u0308","\u1e2e":"I\u0308\u0301","\u0128":"I\u0303","\u012a":"I\u0304","\u012c":"I\u0306","\u01cf":"I\u030c","\xce":"I\u0302","\u0130":"I\u0307","\u0134":"J\u0302","\u1e30":"K\u0301","\u01e8":"K\u030c","\u0136":"K\u0327","\u0139":"L\u0301","\u013d":"L\u030c","\u013b":"L\u0327","\u1e3e":"M\u0301","\u1e40":"M\u0307","\u0143":"N\u0301","\u01f8":"N\u0300","\xd1":"N\u0303","\u0147":"N\u030c","\u1e44":"N\u0307","\u0145":"N\u0327","\xd3":"O\u0301","\xd2":"O\u0300","\xd6":"O\u0308","\u022a":"O\u0308\u0304","\xd5":"O\u0303","\u1e4c":"O\u0303\u0301","\u1e4e":"O\u0303\u0308","\u022c":"O\u0303\u0304","\u014c":"O\u0304","\u1e52":"O\u0304\u0301","\u1e50":"O\u0304\u0300","\u014e":"O\u0306","\u01d1":"O\u030c","\xd4":"O\u0302","\u1ed0":"O\u0302\u0301","\u1ed2":"O\u0302\u0300","\u1ed6":"O\u0302\u0303","\u022e":"O\u0307","\u0230":"O\u0307\u0304","\u0150":"O\u030b","\u1e54":"P\u0301","\u1e56":"P\u0307","\u0154":"R\u0301","\u0158":"R\u030c","\u1e58":"R\u0307","\u0156":"R\u0327","\u015a":"S\u0301","\u1e64":"S\u0301\u0307","\u0160":"S\u030c","\u1e66":"S\u030c\u0307","\u015c":"S\u0302","\u1e60":"S\u0307","\u015e":"S\u0327","\u0164":"T\u030c","\u1e6a":"T\u0307","\u0162":"T\u0327","\xda":"U\u0301","\xd9":"U\u0300","\xdc":"U\u0308","\u01d7":"U\u0308\u0301","\u01db":"U\u0308\u0300","\u01d5":"U\u0308\u0304","\u01d9":"U\u0308\u030c","\u0168":"U\u0303","\u1e78":"U\u0303\u0301","\u016a":"U\u0304","\u1e7a":"U\u0304\u0308","\u016c":"U\u0306","\u01d3":"U\u030c","\xdb":"U\u0302","\u016e":"U\u030a","\u0170":"U\u030b","\u1e7c":"V\u0303","\u1e82":"W\u0301","\u1e80":"W\u0300","\u1e84":"W\u0308","\u0174":"W\u0302","\u1e86":"W\u0307","\u1e8c":"X\u0308","\u1e8a":"X\u0307","\xdd":"Y\u0301","\u1ef2":"Y\u0300","\u0178":"Y\u0308","\u1ef8":"Y\u0303","\u0232":"Y\u0304","\u0176":"Y\u0302","\u1e8e":"Y\u0307","\u0179":"Z\u0301","\u017d":"Z\u030c","\u1e90":"Z\u0302","\u017b":"Z\u0307","\u03ac":"\u03b1\u0301","\u1f70":"\u03b1\u0300","\u1fb1":"\u03b1\u0304","\u1fb0":"\u03b1\u0306","\u03ad":"\u03b5\u0301","\u1f72":"\u03b5\u0300","\u03ae":"\u03b7\u0301","\u1f74":"\u03b7\u0300","\u03af":"\u03b9\u0301","\u1f76":"\u03b9\u0300","\u03ca":"\u03b9\u0308","\u0390":"\u03b9\u0308\u0301","\u1fd2":"\u03b9\u0308\u0300","\u1fd1":"\u03b9\u0304","\u1fd0":"\u03b9\u0306","\u03cc":"\u03bf\u0301","\u1f78":"\u03bf\u0300","\u03cd":"\u03c5\u0301","\u1f7a":"\u03c5\u0300","\u03cb":"\u03c5\u0308","\u03b0":"\u03c5\u0308\u0301","\u1fe2":"\u03c5\u0308\u0300","\u1fe1":"\u03c5\u0304","\u1fe0":"\u03c5\u0306","\u03ce":"\u03c9\u0301","\u1f7c":"\u03c9\u0300","\u038e":"\u03a5\u0301","\u1fea":"\u03a5\u0300","\u03ab":"\u03a5\u0308","\u1fe9":"\u03a5\u0304","\u1fe8":"\u03a5\u0306","\u038f":"\u03a9\u0301","\u1ffa":"\u03a9\u0300"},Xn=function(){function e(e,t){this.mode=void 0,this.gullet=void 0,this.settings=void 0,this.leftrightDepth=void 0,this.nextToken=void 0,this.mode="math",this.gullet=new Gn(e,t,this.mode),this.settings=t,this.leftrightDepth=0}var t=e.prototype;return t.expect=function(e,t){if(void 0===t&&(t=!0),this.fetch().text!==e)throw new n("Expected '"+e+"', got '"+this.fetch().text+"'",this.fetch());t&&this.consume()},t.consume=function(){this.nextToken=null},t.fetch=function(){return null==this.nextToken&&(this.nextToken=this.gullet.expandNextToken()),this.nextToken},t.switchMode=function(e){this.mode=e,this.gullet.switchMode(e)},t.parse=function(){this.settings.globalGroup||this.gullet.beginGroup(),this.settings.colorIsTextColor&&this.gullet.macros.set("\\color","\\textcolor");try{var e=this.parseExpression(!1);return this.expect("EOF"),this.settings.globalGroup||this.gullet.endGroup(),e}finally{this.gullet.endGroups()}},t.subparse=function(e){var t=this.nextToken;this.consume(),this.gullet.pushToken(new Dr("}")),this.gullet.pushTokens(e);var r=this.parseExpression(!1);return this.expect("}"),this.nextToken=t,r},t.parseExpression=function(t,r){for(var n=[];;){"math"===this.mode&&this.consumeSpaces();var a=this.fetch();if(-1!==e.endOfExpression.indexOf(a.text))break;if(r&&a.text===r)break;if(t&&Nn[a.text]&&Nn[a.text].infix)break;var i=this.parseAtom(r);if(!i)break;"internal"!==i.type&&n.push(i)}return"text"===this.mode&&this.formLigatures(n),this.handleInfixNodes(n)},t.handleInfixNodes=function(e){for(var t,r=-1,a=0;a=0&&this.settings.reportNonstrict("unicodeTextInMathMode",'Latin-1/Unicode text character "'+t[0]+'" used in math mode',e);var s,l=ae[this.mode][t].group,h=Lr.range(e);if(te.hasOwnProperty(l)){var m=l;s={type:"atom",mode:this.mode,family:m,loc:h,text:t}}else s={type:l,mode:this.mode,loc:h,text:t};i=s}else{if(!(t.charCodeAt(0)>=128))return null;this.settings.strict&&(S(t.charCodeAt(0))?"math"===this.mode&&this.settings.reportNonstrict("unicodeTextInMathMode",'Unicode text character "'+t[0]+'" used in math mode',e):this.settings.reportNonstrict("unknownSymbol",'Unrecognized Unicode character "'+t[0]+'" ('+t.charCodeAt(0)+")",e)),i={type:"textord",mode:"text",loc:Lr.range(e),text:t}}if(this.consume(),o)for(var c=0;c=t.length&&(t=void 0),{value:t&&t[n++],done:!t}}};throw new TypeError(e?"Object is not iterable.":"Symbol.iterator is not defined.")},m=this&&this.__read||function(t,e){var r="function"==typeof Symbol&&t[Symbol.iterator];if(!r)return t;var n,i,o=r.call(t),a=[];try{for(;(void 0===e||0=t.length&&(t=void 0),{value:t&&t[n++],done:!t}}};throw new TypeError(e?"Object is not iterable.":"Symbol.iterator is not defined.")},v=this&&this.__read||function(t,e){var r="function"==typeof Symbol&&t[Symbol.iterator];if(!r)return t;var n,i,o=r.call(t),a=[];try{for(;(void 0===e||0 mjx-mid"]={"margin-top":this.em(-p/2),"margin-bottom":this.em(-p/2)}}l&&(h["border-top-width"]=this.em0(l-.03)),u&&(h["border-bottom-width"]=this.em0(u-.03),t[f+"mjx-stretchy-v"+e+" > mjx-end"]={"margin-top":this.em(-u)}),Object.keys(h).length&&(t[f+"mjx-stretchy-v"+e+" > mjx-ext"]=h)},s.prototype.addDelimiterVPart=function(t,e,r,n,i){if(!i)return 0;var o=this.getDelimiterData(i),a=(r-o[2])/2,s={content:this.charContent(i)};return"ext"!==n?s.padding=this.padding(o,a):a&&(s["padding-left"]=this.em0(a)),t[this.cssRoot+"mjx-stretchy-v"+e+" mjx-"+n+" mjx-c::before"]=s,o[0]+o[1]},s.prototype.addDelimiterHStyles=function(t,e,r){var n=v(r.stretch,4),i=n[0],o=n[1],a=n[2],s=n[3];this.addDelimiterHPart(t,e,"beg",i),this.addDelimiterHPart(t,e,"ext",o,!(i||a)),this.addDelimiterHPart(t,e,"end",a),s&&(this.addDelimiterHPart(t,e,"mid",s),t[this.cssRoot+"mjx-stretchy-h"+e+" > mjx-ext"]={width:"50%"})},s.prototype.addDelimiterHPart=function(t,e,r,n,i){if(void 0===i&&(i=!1),!n)return 0;var o=this.getDelimiterData(n),a=o[3],s={content:a&&a.c?'"'+a.c+'"':this.charContent(n)};"ext"===r&&!i||(s.padding=this.padding(o,0,-o[2])),t[this.cssRoot+"mjx-stretchy-h"+e+" mjx-"+r+" mjx-c::before"]=s},s.prototype.addCharStyles=function(t,e,r,n,i){var o=v(n,4),a=(o[0],o[1],o[2]),s=o[3];if(!this.options.adaptiveCSS||s.used){var c={},l="mjx-c"+this.charSelector(r),u=this.cssRoot;c.padding=this.padding(n,0,s.ic||0);var h=s.c?'"'+s.c+'"':this.charContent(r);i.get(r)!==h&&(i.has(r)||s.c?t[u+e+" "+l+"::before"]={content:h}:(t[u+l+"::before"]={content:h},i.set(r,h))),void 0!==s.f&&(c["font-family"]="MJXZERO, MJXTEX"+(s.f?"-"+s.f:""));var f=(e?e+" ":"")+l;if(t[u+f]=c,s.ic){var p=v([u+"mjx-","[noIC]"+f+":last-child"],2),d=p[0],m=p[1];t[d+"mi"+m]=t[d+"mo"+m]={"padding-right":this.em(a)}}}},s.prototype.getDelimiterData=function(t){return this.getChar("-smallop",t)},s.charOptions=function(t,e){return h.charOptions.call(this,t,e)},s.prototype.em=function(t){return o.em(t)},s.prototype.em0=function(t){return o.em(Math.max(0,t))},s.prototype.padding=function(t,e,r){var n=v(t,3),i=n[0],o=n[1];return void 0===e&&(e=0),void 0===r&&(r=0),[i,n[2]+r,o,e].map(this.em0).join(" ")},s.prototype.charContent=function(t){return'"'+(32<=t&&t<=126&&34!==t&&39!==t&&92!==t?String.fromCharCode(t):"\\"+t.toString(16).toUpperCase())+'"'},s.prototype.charSelector=function(t){return".mjx-c"+t.toString(16).toUpperCase()},s.OPTIONS={fontURL:"js/output/chtml/fonts/tex-woff-v2"},s.defaultVariantClasses={},s.defaultStyles={"mjx-c::before":{display:"inline-block",width:0}},s.defaultFonts={"@font-face /* 0 */":{"font-family":"MJXZERO",src:'url("%%URL%%/MathJax_Zero.woff") format("woff")'}},s);function s(t){var e,r;void 0===t&&(t=null);var n=h.call(this)||this;n.cssRoot="";var i=n.constructor;n.options=u.userOptions(u.defaultOptions({},i.OPTIONS),t);try{for(var o=y(Object.keys(i.defaultVariantClasses)),a=o.next();!a.done;a=o.next()){var s=a.value;n.variant[s].classes=i.defaultVariantClasses[s]}}catch(t){e={error:t}}finally{try{a&&!a.done&&(r=o.return)&&r.call(o)}finally{if(e)throw e.error}}return n}r.CHTMLFontData=a,r.AddCSS=function(t,e){var r,n;try{for(var i=y(Object.keys(e)),o=i.next();!o.done;o=i.next()){var a=o.value,s=parseInt(a);Object.assign(c.FontData.charOptions(t,s),e[s])}}catch(t){r={error:t}}finally{try{o&&!o.done&&(n=i.return)&&n.call(i)}finally{if(r)throw r.error}}return t}},function(t,u,e){"use strict";var n,r,i=this&&this.__extends||(n=function(t,e){return(n=Object.setPrototypeOf||{__proto__:[]}instanceof Array&&function(t,e){t.__proto__=e}||function(t,e){for(var r in e)e.hasOwnProperty(r)&&(t[r]=e[r])})(t,e)},function(t,e){function r(){this.constructor=t}n(t,e),t.prototype=null===e?Object.create(e):(r.prototype=e.prototype,new r)}),h=this&&this.__values||function(t){var e="function"==typeof Symbol&&Symbol.iterator,r=e&&t[e],n=0;if(r)return r.call(t);if(t&&"number"==typeof t.length)return{next:function(){return t&&n>=t.length&&(t=void 0),{value:t&&t[n++],done:!t}}};throw new TypeError(e?"Object is not iterable.":"Symbol.iterator is not defined.")},f=this&&this.__read||function(t,e){var r="function"==typeof Symbol&&t[Symbol.iterator];if(!r)return t;var n,i,o=r.call(t),a=[];try{for(;(void 0===e||0=t.length&&(t=void 0),{value:t&&t[n++],done:!t}}};throw new TypeError(e?"Object is not iterable.":"Symbol.iterator is not defined.")},r=this&&this.__read||function(t,e){var r="function"==typeof Symbol&&t[Symbol.iterator];if(!r)return t;var n,i,o=r.call(t),a=[];try{for(;(void 0===e||0=t.length&&(t=void 0),{value:t&&t[n++],done:!t}}};throw new TypeError(e?"Object is not iterable.":"Symbol.iterator is not defined.")};function s(t,e){var r,n;try{for(var i=l(Object.keys(e)),o=i.next();!o.done;o=i.next()){var a=o.value;"__esModule"!==a&&("object"==typeof t[a]&&"object"==typeof e[a]?s(t[a],e[a]):null!==e[a]&&void 0!==e[a]&&(t[a]=e[a]))}}catch(t){r={error:t}}finally{try{o&&!o.done&&(n=i.return)&&n.call(i)}finally{if(r)throw r.error}}return t}Object.defineProperty(e,"__esModule",{value:!0}),e.combineConfig=s,e.combineDefaults=function t(e,r,n){var i,o;e[r]||(e[r]={}),e=e[r];try{for(var a=l(Object.keys(n)),s=a.next();!s.done;s=a.next()){var c=s.value;"object"==typeof e[c]&&"object"==typeof n[c]?t(e,c,n[c]):null==e[c]&&null!=n[c]&&(e[c]=n[c])}}catch(t){i={error:t}}finally{try{s&&!s.done&&(o=a.return)&&o.call(a)}finally{if(i)throw i.error}}return e},e.combineWithMathJax=function(t){return s(e.MathJax,t)},void 0===t.MathJax&&(t.MathJax={}),t.MathJax.version||(t.MathJax={version:"3.0.0",_:{},config:t.MathJax}),e.MathJax=t.MathJax}).call(this,r(28))},function(t,e,r){"use strict";var l=this&&this.__values||function(t){var e="function"==typeof Symbol&&Symbol.iterator,r=e&&t[e],n=0;if(r)return r.call(t);if(t&&"number"==typeof t.length)return{next:function(){return t&&n>=t.length&&(t=void 0),{value:t&&t[n++],done:!t}}};throw new TypeError(e?"Object is not iterable.":"Symbol.iterator is not defined.")},n=this&&this.__read||function(t,e){var r="function"==typeof Symbol&&t[Symbol.iterator];if(!r)return t;var n,i,o=r.call(t),a=[];try{for(;(void 0===e||0=t.length&&(t=void 0),{value:t&&t[n++],done:!t}}};throw new TypeError(e?"Object is not iterable.":"Symbol.iterator is not defined.")};Object.defineProperty(e,"__esModule",{value:!0});var n,i,a,o,s,c,h,f=r(0),p=r(6),d=r(21),m=r(4),y=r(12);function v(t,e){void 0===e&&(e=!1);var r=t.match(e?h:c);return r?[r[1].replace(/,/,"."),r[4],r[0].length]:[null,null,0]}function b(t,e,r){"{"!==e&&"}"!==e||(e="\\"+e);var n="{\\bigg"+r+" "+e+"}",i="{\\big"+r+" "+e+"}";return new d.default("\\mathchoice"+n+i+i+i,{},t).mml()}function g(t,e,r){e=e.replace(/^\s+/,y.entities.nbsp).replace(/\s+$/,y.entities.nbsp);var n=t.create("text",e);return t.create("node","mtext",[],r,n)}function M(t,e,r){if(r.match(/^[a-z]/i)&&e.match(/(^|[^\\])(\\\\)*\\[a-z]+$/i)&&(e+=" "),e.length+r.length>t.configuration.options.maxBuffer)throw new m.default("MaxBufferSize","MathJax internal buffer size exceeded; is there a recursive macro call?");return e+r}function O(t,e){for(;0e.length)throw new m.default("IllegalMacroParam","Illegal macro parameter reference");i=M(t,M(t,i,n),e[parseInt(a,10)-1]),n=""}else n+=a}return M(t,i,n)},i.addArgs=M,i.checkEqnEnv=function(t){if(t.stack.global.eqnenv)throw new m.default("ErroneousNestingEq","Erroneous nesting of equation structures");t.stack.global.eqnenv=!0},i.MmlFilterAttribute=function(t,e,r){return r},i.getFontDef=function(t){var e=t.stack.env.font;return e?{mathvariant:e}:{}},i.keyvalOptions=function(t,e,r){var n,i;void 0===e&&(e=null),void 0===r&&(r=!1);var o=function(t){for(var e,r,n,i,o,a={},s=t;s;)e=l(x(s,["=",","]),3),i=e[0],n=e[1],s=e[2],"="===n?(r=l(x(s,[","]),3),o=r[0],n=r[1],s=r[2],o="false"===o||"true"===o?JSON.parse(o):o,a[i]=o):i&&(a[i]=!0);return a}(t);if(e)try{for(var a=u(Object.keys(o)),s=a.next();!s.done;s=a.next()){var c=s.value;if(!e.hasOwnProperty(c)){if(r)throw new m.default("InvalidOption","Invalid optional argument: %1",c);delete o[c]}}}catch(t){n={error:t}}finally{try{s&&!s.done&&(i=a.return)&&i.call(a)}finally{if(n)throw n.error}}return o},e.default=n},function(t,e,r){"use strict";var n,i,o,l=this&&this.__values||function(t){var e="function"==typeof Symbol&&Symbol.iterator,r=e&&t[e],n=0;if(r)return r.call(t);if(t&&"number"==typeof t.length)return{next:function(){return t&&n>=t.length&&(t=void 0),{value:t&&t[n++],done:!t}}};throw new TypeError(e?"Object is not iterable.":"Symbol.iterator is not defined.")},u=this&&this.__read||function(t,e){var r="function"==typeof Symbol&&t[Symbol.iterator];if(!r)return t;var n,i,o=r.call(t),a=[];try{for(;(void 0===e||0=t.length&&(t=void 0),{value:t&&t[n++],done:!t}}};throw new TypeError(e?"Object is not iterable.":"Symbol.iterator is not defined.")},c=this&&this.__spread||function(){for(var t=[],e=0;e=t.length&&(t=void 0),{value:t&&t[n++],done:!t}}};throw new TypeError(e?"Object is not iterable.":"Symbol.iterator is not defined.")};Object.defineProperty(e,"__esModule",{value:!0});var b,n,i,o=r(33),a=r(8),L=r(3),s=r(9),g=r(8),M=r(25),c=(l.create=function(t,e){return void 0===e&&(e={}),new l(t,e.handler||{},e.fallback||{},e.items||{},e.tags||{},e.options||{},e.nodes||{},e.preprocessors||[],e.postprocessors||[],[e.init,e.priority],[e.config,e.configPriority])},l.empty=function(){return l.create("empty")},l.extension=function(){return new s.MacroMap(a.ExtensionMaps.NEW_MACRO,{},{}),new s.DelimiterMap(a.ExtensionMaps.NEW_DELIMITER,o.default.delimiter,{}),new s.CommandMap(a.ExtensionMaps.NEW_COMMAND,{},{}),new s.EnvironmentMap(a.ExtensionMaps.NEW_ENVIRONMENT,o.default.environment,{},{}),l.create("extension",{handler:{character:[],delimiter:[a.ExtensionMaps.NEW_DELIMITER],macro:[a.ExtensionMaps.NEW_DELIMITER,a.ExtensionMaps.NEW_COMMAND,a.ExtensionMaps.NEW_MACRO],environment:[a.ExtensionMaps.NEW_ENVIRONMENT]}})},l.prototype.init=function(t){this.initMethod.execute(t)},l.prototype.config=function(t,e){var r,n,i,o;this.configMethod.execute(t,e);try{for(var a=I(this.preprocessors),s=a.next();!s.done;s=a.next()){var c=s.value;"function"==typeof c?e.preFilters.add(c):e.preFilters.add(c[0],c[1])}}catch(t){r={error:t}}finally{try{s&&!s.done&&(n=a.return)&&n.call(a)}finally{if(r)throw r.error}}try{for(var l=I(this.postprocessors),u=l.next();!u.done;u=l.next()){var h=u.value;"function"==typeof h?e.postFilters.add(h):e.postFilters.add(h[0],h[1])}}catch(t){i={error:t}}finally{try{u&&!u.done&&(o=l.return)&&o.call(l)}finally{if(i)throw i.error}}},l.prototype.append=function(t){var e,r,n,i,o,a,s,c,l,u,h,f,p=Object.keys(t.handler);try{for(var d=I(p),m=d.next();!m.done;m=d.next()){var y=m.value;try{for(var v=(n=void 0,I(t.handler[y])),b=v.next();!b.done;b=v.next()){var g=b.value;this.handler[y].unshift(g)}}catch(t){n={error:t}}finally{try{b&&!b.done&&(i=v.return)&&i.call(v)}finally{if(n)throw n.error}}}}catch(t){e={error:t}}finally{try{m&&!m.done&&(r=d.return)&&r.call(d)}finally{if(e)throw e.error}}Object.assign(this.fallback,t.fallback),Object.assign(this.items,t.items),Object.assign(this.tags,t.tags),L.defaultOptions(this.options,t.options),Object.assign(this.nodes,t.nodes);try{for(var M=I(t.preprocessors),O=M.next();!O.done;O=M.next()){var x=O.value;this.preprocessors.push(x)}}catch(t){o={error:t}}finally{try{O&&!O.done&&(a=M.return)&&a.call(M)}finally{if(o)throw o.error}}try{for(var S=I(t.postprocessors),E=S.next();!E.done;E=S.next()){var C=E.value;this.postprocessors.push(C)}}catch(t){s={error:t}}finally{try{E&&!E.done&&(c=S.return)&&c.call(S)}finally{if(s)throw s.error}}try{for(var _=I(t.initMethod),T=_.next();!T.done;T=_.next()){var w=T.value;this.initMethod.add(w.item,w.priority)}}catch(t){l={error:t}}finally{try{T&&!T.done&&(u=_.return)&&u.call(_)}finally{if(l)throw l.error}}try{for(var A=I(t.configMethod),k=A.next();!k.done;k=A.next())w=k.value,this.configMethod.add(w.item,w.priority)}catch(t){h={error:t}}finally{try{k&&!k.done&&(f=A.return)&&f.call(A)}finally{if(h)throw h.error}}},l.prototype.register=function(t,e,r){var n,i,o,a,s,c;void 0===r&&(r={}),this.append(t),t.init(this);var l=e.parseOptions;l.handlers=new g.SubHandlers(this),l.nodeFactory.setCreators(t.nodes);try{for(var u=I(Object.keys(t.items)),h=u.next();!h.done;h=u.next()){var f=h.value;l.itemFactory.setNodeClass(f,t.items[f])}}catch(t){n={error:t}}finally{try{h&&!h.done&&(i=u.return)&&i.call(u)}finally{if(n)throw n.error}}L.defaultOptions(l.options,t.options),L.userOptions(l.options,r),t.config(this,e);try{for(var p=I(t.preprocessors),d=p.next();!d.done;d=p.next()){var m=d.value;Array.isArray(m)?e.preFilters.add(m[0],m[1]):e.preFilters.add(m)}}catch(t){o={error:t}}finally{try{d&&!d.done&&(a=p.return)&&a.call(p)}finally{if(o)throw o.error}}try{for(var y=I(t.postprocessors),v=y.next();!v.done;v=y.next()){var b=v.value;Array.isArray(b)?e.postFilters.add(b[0],b[1]):e.postFilters.add(b)}}catch(t){s={error:t}}finally{try{v&&!v.done&&(c=y.return)&&c.call(y)}finally{if(s)throw s.error}}},l);function l(t,e,r,n,i,o,a,s,c,l,u){void 0===e&&(e={}),void 0===r&&(r={}),void 0===n&&(n={}),void 0===i&&(i={}),void 0===o&&(o={}),void 0===a&&(a={}),void 0===s&&(s=[]),void 0===c&&(c=[]);var h=v(l,2),f=h[0],p=h[1],d=v(u,2),m=d[0],y=d[1];this.name=t,this.handler=e,this.fallback=r,this.items=n,this.tags=i,this.options=o,this.nodes=a,this.preprocessors=s,this.postprocessors=c,this.initMethod=new M.FunctionList,this.configMethod=new M.FunctionList,f&&this.initMethod.add(f,p||0),m&&this.configMethod.add(m,y||p||0),this.handler=Object.assign({character:[],delimiter:[],macro:[],environment:[]},e),b.set(t,this)}e.Configuration=c,n=b=e.ConfigurationHandler||(e.ConfigurationHandler={}),i=new Map,n.set=function(t,e){i.set(t,e)},n.get=function(t){return i.get(t)},n.keys=function(){return i.keys()}},function(t,n,e){"use strict";Object.defineProperty(n,"__esModule",{value:!0});var i=e(69),o=e(103);n.options={loadMissingEntities:!0},n.entities={ApplyFunction:"\u2061",Backslash:"\u2216",Because:"\u2235",Breve:"\u02d8",Cap:"\u22d2",CenterDot:"\xb7",CircleDot:"\u2299",CircleMinus:"\u2296",CirclePlus:"\u2295",CircleTimes:"\u2297",Congruent:"\u2261",ContourIntegral:"\u222e",Coproduct:"\u2210",Cross:"\u2a2f",Cup:"\u22d3",CupCap:"\u224d",Dagger:"\u2021",Del:"\u2207",Delta:"\u0394",Diamond:"\u22c4",DifferentialD:"\u2146",DotEqual:"\u2250",DoubleDot:"\xa8",DoubleRightTee:"\u22a8",DoubleVerticalBar:"\u2225",DownArrow:"\u2193",DownLeftVector:"\u21bd",DownRightVector:"\u21c1",DownTee:"\u22a4",Downarrow:"\u21d3",Element:"\u2208",EqualTilde:"\u2242",Equilibrium:"\u21cc",Exists:"\u2203",ExponentialE:"\u2147",FilledVerySmallSquare:"\u25aa",ForAll:"\u2200",Gamma:"\u0393",Gg:"\u22d9",GreaterEqual:"\u2265",GreaterEqualLess:"\u22db",GreaterFullEqual:"\u2267",GreaterLess:"\u2277",GreaterSlantEqual:"\u2a7e",GreaterTilde:"\u2273",Hacek:"\u02c7",Hat:"^",HumpDownHump:"\u224e",HumpEqual:"\u224f",Im:"\u2111",ImaginaryI:"\u2148",Integral:"\u222b",Intersection:"\u22c2",InvisibleComma:"\u2063",InvisibleTimes:"\u2062",Lambda:"\u039b",Larr:"\u219e",LeftAngleBracket:"\u27e8",LeftArrow:"\u2190",LeftArrowRightArrow:"\u21c6",LeftCeiling:"\u2308",LeftDownVector:"\u21c3",LeftFloor:"\u230a",LeftRightArrow:"\u2194",LeftTee:"\u22a3",LeftTriangle:"\u22b2",LeftTriangleEqual:"\u22b4",LeftUpVector:"\u21bf",LeftVector:"\u21bc",Leftarrow:"\u21d0",Leftrightarrow:"\u21d4",LessEqualGreater:"\u22da",LessFullEqual:"\u2266",LessGreater:"\u2276",LessSlantEqual:"\u2a7d",LessTilde:"\u2272",Ll:"\u22d8",Lleftarrow:"\u21da",LongLeftArrow:"\u27f5",LongLeftRightArrow:"\u27f7",LongRightArrow:"\u27f6",Longleftarrow:"\u27f8",Longleftrightarrow:"\u27fa",Longrightarrow:"\u27f9",Lsh:"\u21b0",MinusPlus:"\u2213",NestedGreaterGreater:"\u226b",NestedLessLess:"\u226a",NotDoubleVerticalBar:"\u2226",NotElement:"\u2209",NotEqual:"\u2260",NotExists:"\u2204",NotGreater:"\u226f",NotGreaterEqual:"\u2271",NotLeftTriangle:"\u22ea",NotLeftTriangleEqual:"\u22ec",NotLess:"\u226e",NotLessEqual:"\u2270",NotPrecedes:"\u2280",NotPrecedesSlantEqual:"\u22e0",NotRightTriangle:"\u22eb",NotRightTriangleEqual:"\u22ed",NotSubsetEqual:"\u2288",NotSucceeds:"\u2281",NotSucceedsSlantEqual:"\u22e1",NotSupersetEqual:"\u2289",NotTilde:"\u2241",NotVerticalBar:"\u2224",Omega:"\u03a9",OverBar:"\u203e",OverBrace:"\u23de",PartialD:"\u2202",Phi:"\u03a6",Pi:"\u03a0",PlusMinus:"\xb1",Precedes:"\u227a",PrecedesEqual:"\u2aaf",PrecedesSlantEqual:"\u227c",PrecedesTilde:"\u227e",Product:"\u220f",Proportional:"\u221d",Psi:"\u03a8",Rarr:"\u21a0",Re:"\u211c",ReverseEquilibrium:"\u21cb",RightAngleBracket:"\u27e9",RightArrow:"\u2192",RightArrowLeftArrow:"\u21c4",RightCeiling:"\u2309",RightDownVector:"\u21c2",RightFloor:"\u230b",RightTee:"\u22a2",RightTeeArrow:"\u21a6",RightTriangle:"\u22b3",RightTriangleEqual:"\u22b5",RightUpVector:"\u21be",RightVector:"\u21c0",Rightarrow:"\u21d2",Rrightarrow:"\u21db",Rsh:"\u21b1",Sigma:"\u03a3",SmallCircle:"\u2218",Sqrt:"\u221a",Square:"\u25a1",SquareIntersection:"\u2293",SquareSubset:"\u228f",SquareSubsetEqual:"\u2291",SquareSuperset:"\u2290",SquareSupersetEqual:"\u2292",SquareUnion:"\u2294",Star:"\u22c6",Subset:"\u22d0",SubsetEqual:"\u2286",Succeeds:"\u227b",SucceedsEqual:"\u2ab0",SucceedsSlantEqual:"\u227d",SucceedsTilde:"\u227f",SuchThat:"\u220b",Sum:"\u2211",Superset:"\u2283",SupersetEqual:"\u2287",Supset:"\u22d1",Therefore:"\u2234",Theta:"\u0398",Tilde:"\u223c",TildeEqual:"\u2243",TildeFullEqual:"\u2245",TildeTilde:"\u2248",UnderBar:"_",UnderBrace:"\u23df",Union:"\u22c3",UnionPlus:"\u228e",UpArrow:"\u2191",UpDownArrow:"\u2195",UpTee:"\u22a5",Uparrow:"\u21d1",Updownarrow:"\u21d5",Upsilon:"\u03a5",Vdash:"\u22a9",Vee:"\u22c1",VerticalBar:"\u2223",VerticalTilde:"\u2240",Vvdash:"\u22aa",Wedge:"\u22c0",Xi:"\u039e",amp:"&",acute:"\xb4",aleph:"\u2135",alpha:"\u03b1",amalg:"\u2a3f",and:"\u2227",ang:"\u2220",angmsd:"\u2221",angsph:"\u2222",ape:"\u224a",backprime:"\u2035",backsim:"\u223d",backsimeq:"\u22cd",beta:"\u03b2",beth:"\u2136",between:"\u226c",bigcirc:"\u25ef",bigodot:"\u2a00",bigoplus:"\u2a01",bigotimes:"\u2a02",bigsqcup:"\u2a06",bigstar:"\u2605",bigtriangledown:"\u25bd",bigtriangleup:"\u25b3",biguplus:"\u2a04",blacklozenge:"\u29eb",blacktriangle:"\u25b4",blacktriangledown:"\u25be",blacktriangleleft:"\u25c2",bowtie:"\u22c8",boxdl:"\u2510",boxdr:"\u250c",boxminus:"\u229f",boxplus:"\u229e",boxtimes:"\u22a0",boxul:"\u2518",boxur:"\u2514",bsol:"\\",bull:"\u2022",cap:"\u2229",check:"\u2713",chi:"\u03c7",circ:"\u02c6",circeq:"\u2257",circlearrowleft:"\u21ba",circlearrowright:"\u21bb",circledR:"\xae",circledS:"\u24c8",circledast:"\u229b",circledcirc:"\u229a",circleddash:"\u229d",clubs:"\u2663",colon:":",comp:"\u2201",ctdot:"\u22ef",cuepr:"\u22de",cuesc:"\u22df",cularr:"\u21b6",cup:"\u222a",curarr:"\u21b7",curlyvee:"\u22ce",curlywedge:"\u22cf",dagger:"\u2020",daleth:"\u2138",ddarr:"\u21ca",deg:"\xb0",delta:"\u03b4",digamma:"\u03dd",div:"\xf7",divideontimes:"\u22c7",dot:"\u02d9",doteqdot:"\u2251",dotplus:"\u2214",dotsquare:"\u22a1",dtdot:"\u22f1",ecir:"\u2256",efDot:"\u2252",egs:"\u2a96",ell:"\u2113",els:"\u2a95",empty:"\u2205",epsi:"\u03b5",epsiv:"\u03f5",erDot:"\u2253",eta:"\u03b7",eth:"\xf0",flat:"\u266d",fork:"\u22d4",frown:"\u2322",gEl:"\u2a8c",gamma:"\u03b3",gap:"\u2a86",gimel:"\u2137",gnE:"\u2269",gnap:"\u2a8a",gne:"\u2a88",gnsim:"\u22e7",gt:">",gtdot:"\u22d7",harrw:"\u21ad",hbar:"\u210f",hellip:"\u2026",hookleftarrow:"\u21a9",hookrightarrow:"\u21aa",imath:"\u0131",infin:"\u221e",intcal:"\u22ba",iota:"\u03b9",jmath:"\u0237",kappa:"\u03ba",kappav:"\u03f0",lEg:"\u2a8b",lambda:"\u03bb",lap:"\u2a85",larrlp:"\u21ab",larrtl:"\u21a2",lbrace:"{",lbrack:"[",le:"\u2264",leftleftarrows:"\u21c7",leftthreetimes:"\u22cb",lessdot:"\u22d6",lmoust:"\u23b0",lnE:"\u2268",lnap:"\u2a89",lne:"\u2a87",lnsim:"\u22e6",longmapsto:"\u27fc",looparrowright:"\u21ac",lowast:"\u2217",loz:"\u25ca",lt:"<",ltimes:"\u22c9",ltri:"\u25c3",macr:"\xaf",malt:"\u2720",mho:"\u2127",mu:"\u03bc",multimap:"\u22b8",nLeftarrow:"\u21cd",nLeftrightarrow:"\u21ce",nRightarrow:"\u21cf",nVDash:"\u22af",nVdash:"\u22ae",natur:"\u266e",nearr:"\u2197",nharr:"\u21ae",nlarr:"\u219a",not:"\xac",nrarr:"\u219b",nu:"\u03bd",nvDash:"\u22ad",nvdash:"\u22ac",nwarr:"\u2196",omega:"\u03c9",omicron:"\u03bf",or:"\u2228",osol:"\u2298",period:".",phi:"\u03c6",phiv:"\u03d5",pi:"\u03c0",piv:"\u03d6",prap:"\u2ab7",precnapprox:"\u2ab9",precneqq:"\u2ab5",precnsim:"\u22e8",prime:"\u2032",psi:"\u03c8",quot:'"',rarrtl:"\u21a3",rbrace:"}",rbrack:"]",rho:"\u03c1",rhov:"\u03f1",rightrightarrows:"\u21c9",rightthreetimes:"\u22cc",ring:"\u02da",rmoust:"\u23b1",rtimes:"\u22ca",rtri:"\u25b9",scap:"\u2ab8",scnE:"\u2ab6",scnap:"\u2aba",scnsim:"\u22e9",sdot:"\u22c5",searr:"\u2198",sect:"\xa7",sharp:"\u266f",sigma:"\u03c3",sigmav:"\u03c2",simne:"\u2246",smile:"\u2323",spades:"\u2660",sub:"\u2282",subE:"\u2ac5",subnE:"\u2acb",subne:"\u228a",supE:"\u2ac6",supnE:"\u2acc",supne:"\u228b",swarr:"\u2199",tau:"\u03c4",theta:"\u03b8",thetav:"\u03d1",tilde:"\u02dc",times:"\xd7",triangle:"\u25b5",triangleq:"\u225c",upsi:"\u03c5",upuparrows:"\u21c8",veebar:"\u22bb",vellip:"\u22ee",weierp:"\u2118",xi:"\u03be",yen:"\xa5",zeta:"\u03b6",zigrarr:"\u21dd"};var a={};function r(t,e){if("#"===e.charAt(0))return s(e.slice(1));if(n.entities[e])return n.entities[e];if(n.options.loadMissingEntities){var r=e.match(/^[a-zA-Z](fr|scr|opf)$/)?RegExp.$1:e.charAt(0).toLowerCase();a[r]||(a[r]=!0,i.retryAfter(o.asyncLoad("./util/entities/"+r+".js")))}return t}function s(t){var e="x"===t.charAt(0)?parseInt(t.slice(1),16):parseInt(t);if(e<65536)return String.fromCharCode(e);var r=55296+((e-=65536)>>10),n=56320+(1023&e);return String.fromCharCode(r,n)}n.add=function(t,e){Object.assign(n.entities,t),a[e]=!0},n.remove=function(t){delete n.entities[t]},n.translate=function(t){return t.replace(/&([a-z][a-z0-9]*|#(?:[0-9]+|x[0-9a-f]+));/gi,r)},n.numeric=s},function(t,o,e){"use strict";Object.defineProperty(o,"__esModule",{value:!0}),o.protoItem=function(t,e,r,n,i,o,a){return void 0===a&&(a=null),{open:t,math:e,close:r,n:n,start:{n:i},end:{n:o},display:a}};var r=(n.prototype.render=function(t){t.renderActions.renderMath(this,t)},n.prototype.rerender=function(t,e){void 0===e&&(e=o.STATE.RERENDER),this.state()>=e&&this.state(e-1),t.renderActions.renderMath(this,t,e)},n.prototype.convert=function(t,e){void 0===e&&(e=o.STATE.LAST),t.renderActions.renderConvert(this,t,e)},n.prototype.compile=function(t){this.state()=o.STATE.INSERTED&&this.removeFromDocument(e),t=o.STATE.TYPESET&&(this.bbox={},this.outputData={}),t=o.STATE.COMPILED&&(this.inputData={}),this._state=t),this._state},n.prototype.reset=function(t){void 0===t&&(t=!1),this.state(o.STATE.UNPROCESSED)},n);function n(t,e,r,n,i){void 0===r&&(r=!0),void 0===n&&(n={i:0,n:0,delim:""}),void 0===i&&(i={i:0,n:0,delim:""}),this.root=null,this.typesetRoot=null,this._state=o.STATE.UNPROCESSED,this.metrics={},this.bbox={},this.inputData={},this.outputData={},this.math=t,this.inputJax=e,this.display=r,this.start=n,this.end=i,this.root=null,this.typesetRoot=null,this.metrics={},this.bbox={},this.inputData={},this.outputData={}}o.AbstractMathItem=r,o.STATE={UNPROCESSED:0,FINDMATH:10,COMPILED:20,CONVERT:100,METRICS:110,RERENDER:125,TYPESET:150,INSERTED:200,RESET:500,LAST:1e4},o.newState=function(t,e){if(t in o.STATE)throw Error("State "+t+" already exists");o.STATE[t]=e}},function(t,s,e){"use strict";Object.defineProperty(s,"__esModule",{value:!0}),s.BIGDIMEN=1e6,s.UNITS={px:1,pt:96/72,pc:8,in:96,cm:96/2.54,mm:96/25.4},s.RELUNITS={em:1,ex:.431,mu:1/18},s.MATHSPACE={veryverythinmathspace:1/18,verythinmathspace:2/18,thinmathspace:3/18,mediummathspace:4/18,thickmathspace:5/18,verythickmathspace:6/18,veryverythickmathspace:7/18,negativeveryverythinmathspace:-1/18,negativeverythinmathspace:-2/18,negativethinmathspace:-3/18,negativemediummathspace:-4/18,negativethickmathspace:-5/18,negativeverythickmathspace:-6/18,negativeveryverythickmathspace:-7/18,thin:.04,medium:.06,thick:.1,normal:1,big:2,small:1/Math.sqrt(2),infinity:s.BIGDIMEN},s.length2em=function(t,e,r,n){if(void 0===e&&(e=0),void 0===r&&(r=1),void 0===n&&(n=16),"string"!=typeof t&&(t=String(t)),""===t||null==t)return e;if(s.MATHSPACE[t])return s.MATHSPACE[t];var i=t.match(/^\s*([-+]?(?:\.\d+|\d+(?:\.\d*)?))?(pt|em|ex|mu|px|pc|in|mm|cm|%)?/);if(!i)return e;var o=parseFloat(i[1]||"1"),a=i[2];return s.UNITS.hasOwnProperty(a)?o*s.UNITS[a]/n/r:s.RELUNITS.hasOwnProperty(a)?o*s.RELUNITS[a]:"%"===a?o/100*e:o*e},s.percent=function(t){return(100*t).toFixed(1).replace(/\.?0+$/,"")+"%"},s.em=function(t){return Math.abs(t)<.001?"0":t.toFixed(3).replace(/\.?0+$/,"")+"em"},s.emRounded=function(t,e){return void 0===e&&(e=16),t=(Math.round(t*e)+.05)/e,Math.abs(t)<.001?"0em":t.toFixed(3).replace(/\.?0+$/,"")+"em"},s.px=function(t,e,r){return void 0===e&&(e=-s.BIGDIMEN),void 0===r&&(r=16),t*=r,e&&tthis.w&&(this.w=i),o>this.h&&(this.h=o),a>this.d&&(this.d=a)},o.prototype.append=function(t){var e=t.rscale;this.w+=e*(t.w+t.L+t.R),e*t.h>this.h&&(this.h=e*t.h),e*t.d>this.d&&(this.d=e*t.d)},o.prototype.updateFrom=function(t){this.h=t.h,this.d=t.d,this.w=t.w,t.pwidth&&(this.pwidth=t.pwidth)},o.fullWidth="100%",o);function o(t){void 0===t&&(t={w:0,h:-n.BIGDIMEN,d:-n.BIGDIMEN}),this.w=t.w||0,this.h="h"in t?t.h:-n.BIGDIMEN,this.d="d"in t?t.d:-n.BIGDIMEN,this.L=this.R=this.ic=this.sk=0,this.scale=this.rscale=1,this.pwidth=""}e.BBox=i},function(t,h,o){"use strict";(function(r){var l=this&&this.__values||function(t){var e="function"==typeof Symbol&&Symbol.iterator,r=e&&t[e],n=0;if(r)return r.call(t);if(t&&"number"==typeof t.length)return{next:function(){return t&&n>=t.length&&(t=void 0),{value:t&&t[n++],done:!t}}};throw new TypeError(e?"Object is not iterable.":"Symbol.iterator is not defined.")};Object.defineProperty(h,"__esModule",{value:!0});var t,e,n=o(5),u=o(18),i=o(18);h.Package=i.Package,h.PackageError=i.PackageError,(e=t=h.Loader||(h.Loader={})).ready=function(){for(var e,t,r=[],n=0;n=t.length&&(t=void 0),{value:t&&t[n++],done:!t}}};throw new TypeError(e?"Object is not iterable.":"Symbol.iterator is not defined.")};Object.defineProperty(e,"__esModule",{value:!0});var a,p=r(17),s=(a=Error,i(c,a),c);function c(t,e){var r=a.call(this,t)||this;return r.package=e,r}e.PackageError=s;var l=(d.resolvePath=function(t,e){void 0===e&&(e=!0);var r,n=p.CONFIG.source[t]||t;for(n.match(/^(?:[a-z]+:\/)?\/|\[/)||(n="[mathjax]/"+n.replace(/^\.\//,"")),e&&!n.match(/\.[^\/]+$/)&&(n+=".js");(r=n.match(/^\[([^\]]*)\]/))&&p.CONFIG.paths.hasOwnProperty(r[1]);)n=p.CONFIG.paths[r[1]]+n.substr(r[0].length);return n},Object.defineProperty(d.prototype,"canLoad",{get:function(){return 0===this.dependencyCount&&!this.noLoad&&!this.isLoading&&!this.hasFailed},enumerable:!0,configurable:!0}),d.prototype.makeDependencies=function(){var e,t,r=[],n=d.packages,i=this.noLoad,o=this.name,a=[];p.CONFIG.dependencies.hasOwnProperty(o)?a.push.apply(a,h(p.CONFIG.dependencies[o])):"core"!==o&&a.push("core");try{for(var s=f(a),c=s.next();!c.done;c=s.next()){var l=c.value,u=n.get(l)||new d(l,i);this.dependencies.indexOf(u)<0&&(u.addDependent(this,i),this.dependencies.push(u),u.isLoaded||(this.dependencyCount++,r.push(u.promise)))}}catch(t){e={error:t}}finally{try{c&&!c.done&&(t=s.return)&&t.call(s)}finally{if(e)throw e.error}}return r},d.prototype.makePromise=function(t){var r=this,e=new Promise(function(t,e){r.resolve=t,r.reject=e}),n=p.CONFIG[this.name]||{};return n.ready&&(e=e.then(function(t){return n.ready(r.name)})),t.length&&(t.push(e),e=Promise.all(t).then(function(t){return t.join(", ")})),n.failed&&e.catch(function(t){return n.failed(new s(t,r.name))}),e},d.prototype.load=function(){if(!this.isLoaded&&!this.isLoading&&!this.noLoad){this.isLoading=!0;var t=d.resolvePath(this.name);p.CONFIG.require?this.loadCustom(t):this.loadScript(t)}},d.prototype.loadCustom=function(t){var e=this;try{var r=p.CONFIG.require(t);r instanceof Promise?r.then(function(){return e.checkLoad()}).catch(function(){return e.failed("Can't load \""+t+'"')}):this.checkLoad()}catch(t){this.failed(t.message)}},d.prototype.loadScript=function(e){var r=this,t=document.createElement("script");t.src=e,t.charset="UTF-8",t.onload=function(t){return r.checkLoad()},t.onerror=function(t){return r.failed("Can't load \""+e+'"')},document.head.appendChild(t)},d.prototype.loaded=function(){var e,t,r,n;this.isLoaded=!0,this.isLoading=!1;try{for(var i=f(this.dependents),o=i.next();!o.done;o=i.next())o.value.requirementSatisfied()}catch(t){e={error:t}}finally{try{o&&!o.done&&(t=i.return)&&t.call(i)}finally{if(e)throw e.error}}try{for(var a=f(this.provided),s=a.next();!s.done;s=a.next())s.value.loaded()}catch(t){r={error:t}}finally{try{s&&!s.done&&(n=a.return)&&n.call(a)}finally{if(r)throw r.error}}this.resolve(this.name)},d.prototype.failed=function(t){this.hasFailed=!0,this.isLoading=!1,this.reject(new s(t,this.name))},d.prototype.checkLoad=function(){var e=this;((p.CONFIG[this.name]||{}).checkReady||function(){return Promise.resolve()})().then(function(){return e.loaded()}).catch(function(t){return e.failed(t)})},d.prototype.requirementSatisfied=function(){this.dependencyCount&&(this.dependencyCount--,this.canLoad&&this.load())},d.prototype.provides=function(t){var e,r;void 0===t&&(t=[]);try{for(var n=f(t),i=n.next();!i.done;i=n.next()){var o=i.value,a=d.packages.get(o);a||(p.CONFIG.dependencies[o]||(p.CONFIG.dependencies[o]=[]),p.CONFIG.dependencies[o].push(o),(a=new d(o,!0)).isLoading=!0),this.provided.push(a)}}catch(t){e={error:t}}finally{try{i&&!i.done&&(r=n.return)&&r.call(n)}finally{if(e)throw e.error}}},d.prototype.addDependent=function(t,e){this.dependents.push(t),e||this.checkNoLoad()},d.prototype.checkNoLoad=function(){var e,t;if(this.noLoad){this.noLoad=!1;try{for(var r=f(this.dependencies),n=r.next();!n.done;n=r.next())n.value.checkNoLoad()}catch(t){e={error:t}}finally{try{n&&!n.done&&(t=r.return)&&t.call(r)}finally{if(e)throw e.error}}}},d.loadAll=function(){var e,t;try{for(var r=f(this.packages.values()),n=r.next();!n.done;n=r.next()){var i=n.value;i.canLoad&&i.load()}}catch(t){e={error:t}}finally{try{n&&!n.done&&(t=r.return)&&t.call(r)}finally{if(e)throw e.error}}},d.packages=new Map,d);function d(t,e){void 0===e&&(e=!1),this.isLoaded=!1,this.isLoading=!1,this.hasFailed=!1,this.dependents=[],this.dependencies=[],this.dependencyCount=0,this.provided=[],this.name=t,this.noLoad=e,d.packages.set(t,this),this.promise=this.makePromise(this.makeDependencies())}e.Package=l},function(t,r,e){"use strict";var c=this&&this.__values||function(t){var e="function"==typeof Symbol&&Symbol.iterator,r=e&&t[e],n=0;if(r)return r.call(t);if(t&&"number"==typeof t.length)return{next:function(){return t&&n>=t.length&&(t=void 0),{value:t&&t[n++],done:!t}}};throw new TypeError(e?"Object is not iterable.":"Symbol.iterator is not defined.")};Object.defineProperty(r,"__esModule",{value:!0}),r.INHERIT="_inherit_";var n=(i.prototype.set=function(t,e){this.attributes[t]=e},i.prototype.setList=function(t){Object.assign(this.attributes,t)},i.prototype.get=function(t){var e=this.attributes[t];return e===r.INHERIT&&(e=this.global[t]),e},i.prototype.getExplicit=function(t){if(this.attributes.hasOwnProperty(t))return this.attributes[t]},i.prototype.getList=function(){for(var e,t,r=[],n=0;n=t.length&&(t=void 0),{value:t&&t[n++],done:!t}}};throw new TypeError(e?"Object is not iterable.":"Symbol.iterator is not defined.")},s=this&&this.__read||function(t,e){var r="function"==typeof Symbol&&t[Symbol.iterator];if(!r)return t;var n,i,o=r.call(t),a=[];try{for(;(void 0===e||0=t.length&&(t=void 0),{value:t&&t[n++],done:!t}}};throw new TypeError(e?"Object is not iterable.":"Symbol.iterator is not defined.")};Object.defineProperty(e,"__esModule",{value:!0}),e.V=1,e.H=2,e.NOSTRETCH={dir:0};var i=(o.charOptions=function(t,e){var r=t[e];return 3===r.length&&(r[3]={}),r[3]},o.prototype.createVariant=function(t,e,r){void 0===e&&(e=null),void 0===r&&(r=null);var n={linked:[],chars:e?Object.create(this.variant[e].chars):{}};r&&this.variant[r]&&(Object.assign(n.chars,this.variant[r].chars),this.variant[r].linked.push(n.chars),n.chars=Object.create(n.chars)),this.variant[t]=n},o.prototype.createVariants=function(t){var e,r;try{for(var n=c(t),i=n.next();!i.done;i=n.next()){var o=i.value;this.createVariant(o[0],o[1],o[2])}}catch(t){e={error:t}}finally{try{i&&!i.done&&(r=n.return)&&r.call(n)}finally{if(e)throw e.error}}},o.prototype.defineChars=function(t,e){var r,n,i=this.variant[t];Object.assign(i.chars,e);try{for(var o=c(i.linked),a=o.next();!a.done;a=o.next()){var s=a.value;Object.assign(s,e)}}catch(t){r={error:t}}finally{try{a&&!a.done&&(n=o.return)&&n.call(o)}finally{if(r)throw r.error}}},o.prototype.defineDelimiters=function(t){Object.assign(this.delimiters,t)},o.prototype.defineRemap=function(t,e){this.remapChars.hasOwnProperty(t)||(this.remapChars[t]={}),Object.assign(this.remapChars[t],e)},o.prototype.getDelimiter=function(t){return this.delimiters[t]},o.prototype.getSizeVariant=function(t,e){return this.delimiters[t].variants&&(e=this.delimiters[t].variants[e]),this.sizeVariants[e]},o.prototype.getChar=function(t,e){return this.variant[t].chars[e]},o.prototype.getVariant=function(t){return this.variant[t]},o.prototype.getCssFont=function(t){return this.cssFontMap[t]||["serif",!1,!1]},o.prototype.getRemappedChar=function(t,e){return(this.remapChars[t]||{})[e]},o.OPTIONS={},o.defaultVariants=[["normal"],["bold","normal"],["italic","normal"],["bold-italic","italic","bold"],["double-struck","bold"],["fraktur","normal"],["bold-fraktur","bold","fraktur"],["script","normal"],["bold-script","bold","script"],["sans-serif","normal"],["bold-sans-serif","bold","sans-serif"],["sans-serif-italic","italic","sans-serif"],["bold-sans-serif-italic","bold-italic","sans-serif"],["monospace","normal"]],o.defaultCssFonts={normal:["serif",!1,!1],bold:["serif",!1,!0],italic:["serif",!0,!1],"bold-italic":["serif",!0,!0],"double-struck":["serif",!1,!0],fraktur:["serif",!1,!1],"bold-fraktur":["serif",!1,!0],script:["cursive",!1,!1],"bold-script":["cursive",!1,!0],"sans-serif":["sans-serif",!1,!1],"bold-sans-serif":["sans-serif",!1,!0],"sans-serif-italic":["sans-serif",!0,!1],"bold-sans-serif-italic":["sans-serif",!0,!0],monospace:["monospace",!1,!1]},o.defaultAccentMap={768:"\u02cb",769:"\u02ca",770:"\u02c6",771:"\u02dc",772:"\u02c9",774:"\u02d8",775:"\u02d9",776:"\xa8",778:"\u02da",780:"\u02c7",8594:"\u20d7",8242:"'",8243:"''",8244:"'''",8245:"`",8246:"``",8247:"```",8279:"''''",8400:"\u21bc",8401:"\u21c0",8406:"\u2190",8417:"\u2194",8432:"*",8411:"...",8412:"....",8428:"\u21c1",8429:"\u21bd",8430:"\u2190",8431:"\u2192"},o.defaultMoMap={45:"\u2212"},o.defaultMnMap={45:"\u2212"},o.defaultParams={x_height:.442,quad:1,num1:.676,num2:.394,num3:.444,denom1:.686,denom2:.345,sup1:.413,sup2:.363,sup3:.289,sub1:.15,sub2:.247,sup_drop:.386,sub_drop:.05,delim1:2.39,delim2:1,axis_height:.25,rule_thickness:.06,big_op_spacing1:.111,big_op_spacing2:.167,big_op_spacing3:.2,big_op_spacing4:.6,big_op_spacing5:.1,surd_height:.075,scriptspace:.05,nulldelimiterspace:.12,delimiterfactor:901,delimitershortfall:.3,min_rule_thickness:1.25},o.defaultDelimiters={},o.defaultChars={},o.defaultSizeVariants=[],o);function o(){var e,t;this.variant={},this.delimiters={},this.cssFontMap={},this.remapChars={};var r=this.constructor;this.params=a({},r.defaultParams),this.sizeVariants=s(r.defaultSizeVariants),this.cssFontMap=a({},r.defaultCssFonts),this.createVariants(r.defaultVariants),this.defineDelimiters(r.defaultDelimiters);try{for(var n=c(Object.keys(r.defaultChars)),i=n.next();!i.done;i=n.next()){var o=i.value;this.defineChars(o,r.defaultChars[o])}}catch(t){e={error:t}}finally{try{i&&!i.done&&(t=n.return)&&t.call(n)}finally{if(e)throw e.error}}this.defineRemap("accent",r.defaultAccentMap),this.defineRemap("mo",r.defaultMoMap),this.defineRemap("mn",r.defaultMnMap)}e.FontData=i},function(t,e,r){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var n=(i.prototype[Symbol.iterator]=function(){var t=0,e=this.items;return{next:function(){return{value:e[t++],done:t>e.length}}}},i.prototype.add=function(t,e){void 0===e&&(e=i.DEFAULTPRIORITY);for(var r=this.items.length;0<=--r&&e=t.length&&(t=void 0),{value:t&&t[n++],done:!t}}};throw new TypeError(e?"Object is not iterable.":"Symbol.iterator is not defined.")},o=this&&this.__read||function(t,e){var r="function"==typeof Symbol&&t[Symbol.iterator];if(!r)return t;var n,i,o=r.call(t),a=[];try{for(;(void 0===e||0=t.length&&(t=void 0),{value:t&&t[n++],done:!t}}};throw new TypeError(e?"Object is not iterable.":"Symbol.iterator is not defined.")};Object.defineProperty(e,"__esModule",{value:!0});var M,a=r(0),s=r(86),c=(M=a.AbstractMmlTokenNode,i(l,M),Object.defineProperty(l.prototype,"kind",{get:function(){return"mo"},enumerable:!0,configurable:!0}),Object.defineProperty(l.prototype,"isEmbellished",{get:function(){return!0},enumerable:!0,configurable:!0}),Object.defineProperty(l.prototype,"hasNewLine",{get:function(){return"newline"===this.attributes.get("linebreak")},enumerable:!0,configurable:!0}),l.prototype.coreParent=function(){for(var t=this,e=this.factory.getNodeClass("math");t&&t.isEmbellished&&t.coreMO()===this&&!(t instanceof e);)t=t.Parent;return t},l.prototype.coreText=function(t){if(!t)return"";if(t.isEmbellished)return t.coreMO().getText();for(;((t.isKind("mrow")||t.isKind("TeXAtom")||t.isKind("mstyle")||t.isKind("mphantom"))&&1===t.childNodes.length||t.isKind("munderover"))&&t.childNodes[0];)t=t.childNodes[0];return t.isToken?t.getText():""},l.prototype.hasSpacingAttributes=function(){return this.attributes.isSet("lspace")||this.attributes.isSet("rspace")},Object.defineProperty(l.prototype,"isAccent",{get:function(){var t=!1,e=this.coreParent();if(e){var r=e.isKind("mover")?e.childNodes[e.over].coreMO()?"accent":"":e.isKind("munder")?e.childNodes[e.under].coreMO()?"accentunder":"":e.isKind("munderover")?this===e.childNodes[e.over].coreMO()?"accent":this===e.childNodes[e.under].coreMO()?"accentunder":"":"";r&&(t=void 0!==e.attributes.getExplicit(r)?t:this.attributes.get("accent"))}return t},enumerable:!0,configurable:!0}),l.prototype.setTeXclass=function(t){var e=this.attributes.getList("form","fence"),r=e.form,n=e.fence;return this.attributes.isSet("lspace")||this.attributes.isSet("rspace")?(this.texClass=a.TEXCLASS.NONE,null):(n&&this.texClass===a.TEXCLASS.REL&&("prefix"===r&&(this.texClass=a.TEXCLASS.OPEN),"postfix"===r&&(this.texClass=a.TEXCLASS.CLOSE)),"\u2061"===this.getText()?(t&&(t.texClass=a.TEXCLASS.OP,t.setProperty("fnOP",!0)),this.texClass=this.prevClass=a.TEXCLASS.NONE,t):this.adjustTeXclass(t))},l.prototype.adjustTeXclass=function(t){var e=this.texClass,r=this.prevClass;if(e===a.TEXCLASS.NONE)return t;if(t?(!t.getProperty("autoOp")||e!==a.TEXCLASS.BIN&&e!==a.TEXCLASS.REL||(e=this.texClass=a.TEXCLASS.ORD),r=this.prevClass=t.texClass||a.TEXCLASS.ORD,this.prevLevel=this.attributes.getInherited("scriptlevel")):r=this.prevClass=a.TEXCLASS.NONE,e!==a.TEXCLASS.BIN||r!==a.TEXCLASS.NONE&&r!==a.TEXCLASS.BIN&&r!==a.TEXCLASS.OP&&r!==a.TEXCLASS.REL&&r!==a.TEXCLASS.OPEN&&r!==a.TEXCLASS.PUNCT)if(r!==a.TEXCLASS.BIN||e!==a.TEXCLASS.REL&&e!==a.TEXCLASS.CLOSE&&e!==a.TEXCLASS.PUNCT){if(e===a.TEXCLASS.BIN){for(var n=this,i=this.parent;i&&i.parent&&i.isEmbellished&&(1===i.childNodes.length||!i.isKind("mrow")&&i.core()===n);)i=(n=i).parent;i.childNodes[i.childNodes.length-1]===n&&(this.texClass=a.TEXCLASS.ORD)}}else t.texClass=this.prevClass=a.TEXCLASS.ORD;else this.texClass=a.TEXCLASS.ORD;return this},l.prototype.setInheritedAttributes=function(t,e,r,n){var i,o;void 0===t&&(t={}),void 0===e&&(e=!1),void 0===r&&(r=0),void 0===n&&(n=!1),M.prototype.setInheritedAttributes.call(this,t,e,r,n);var a=this.getText(),s=b(this.handleExplicitForm(this.getForms()),3),c=s[0],l=s[1],u=s[2];this.attributes.setInherited("form",c);var h=this.constructor.OPTABLE,f=h[c][a]||h[l][a]||h[u][a];if(f){void 0===this.getProperty("texClass")&&(this.texClass=f[2]);try{for(var p=g(Object.keys(f[3]||{})),d=p.next();!d.done;d=p.next()){var m=d.value;this.attributes.setInherited(m,f[3][m])}}catch(t){i={error:t}}finally{try{d&&!d.done&&(o=p.return)&&o.call(p)}finally{if(i)throw i.error}}this.lspace=(f[0]+1)/18,this.rspace=(f[1]+1)/18}else{var y=this.getRange(a);if(y){void 0===this.getProperty("texClass")&&(this.texClass=y[2]);var v=this.constructor.MMLSPACING[y[2]];this.lspace=(v[0]+1)/18,this.rspace=(v[1]+1)/18}}},l.prototype.getForms=function(){for(var t=this,e=this.parent,r=this.Parent;r&&r.isEmbellished;)t=e,e=r.parent,r=r.Parent;if(e&&e.isKind("mrow")&&1!==e.nonSpaceLength()){if(e.firstNonSpace()===t)return["prefix","infix","postfix"];if(e.lastNonSpace()===t)return["postfix","infix","prefix"]}return["infix","prefix","postfix"]},l.prototype.handleExplicitForm=function(t){if(this.attributes.isSet("form")){var e=this.attributes.get("form");t=[e].concat(t.filter(function(t){return t!==e}))}return t},l.prototype.getRange=function(t){var e,r;if(!t.match(/^[\uD800-\uDBFF]?.$/))return null;var n=t.charCodeAt(0);2===t.length&&(n=1024*(n-55296)+t.charCodeAt(1)-56320+65536);var i=this.constructor.RANGES;try{for(var o=g(i),a=o.next();!a.done;a=o.next()){var s=a.value;if(s[0]<=n&&n<=s[1])return s;if(n=t.length&&(t=void 0),{value:t&&t[n++],done:!t}}};throw new TypeError(e?"Object is not iterable.":"Symbol.iterator is not defined.")};Object.defineProperty(e,"__esModule",{value:!0});var o=r(21),s=function(t,e){void 0===t&&(t="???"),void 0===e&&(e=""),this.tag=t,this.id=e};e.Label=s;var c=function(t,e,r,n,i,o,a,s){void 0===t&&(t=""),void 0===e&&(e=!1),void 0===r&&(r=!1),void 0===n&&(n=null),void 0===i&&(i=""),void 0===o&&(o=""),void 0===a&&(a=!1),void 0===s&&(s=""),this.env=t,this.taggable=e,this.defaultTags=r,this.tag=n,this.tagId=i,this.tagFormat=o,this.noTag=a,this.labelId=s};e.TagInfo=c;var l=(u.prototype.start=function(t,e,r){this.currentTag&&this.stack.push(this.currentTag),this.currentTag=new c(t,e,r)},Object.defineProperty(u.prototype,"env",{get:function(){return this.currentTag.env},enumerable:!0,configurable:!0}),u.prototype.end=function(){this.history.push(this.currentTag),this.currentTag=this.stack.pop()},u.prototype.tag=function(t,e){this.currentTag.tag=t,this.currentTag.tagFormat=e?t:this.formatTag(t),this.currentTag.noTag=!1},u.prototype.notag=function(){this.tag("",!0),this.currentTag.noTag=!0},Object.defineProperty(u.prototype,"noTag",{get:function(){return this.currentTag.noTag},enumerable:!0,configurable:!0}),Object.defineProperty(u.prototype,"label",{get:function(){return this.currentTag.labelId},set:function(t){this.currentTag.labelId=t},enumerable:!0,configurable:!0}),u.prototype.formatUrl=function(t,e){return e+"#"+encodeURIComponent(t)},u.prototype.formatTag=function(t){return"("+t+")"},u.prototype.formatId=function(t){return"mjx-eqn-"+t.replace(/\s/g,"_")},u.prototype.formatNumber=function(t){return t.toString()},u.prototype.autoTag=function(){null==this.currentTag.tag&&(this.counter++,this.tag(this.formatNumber(this.counter),!1))},u.prototype.clearTag=function(){this.label="",this.tag(null,!0),this.currentTag.tagId=""},u.prototype.getTag=function(t){if(void 0===t&&(t=!1),t)return this.autoTag(),this.makeTag();var e=this.currentTag;return e.taggable&&!e.noTag&&(e.defaultTags&&this.autoTag(),e.tag)?this.makeTag():null},u.prototype.resetTag=function(){this.history=[],this.redo=!1,this.refUpdate=!1,this.clearTag()},u.prototype.reset=function(t){void 0===t&&(t=0),this.resetTag(),this.counter=this.allCounter=t,this.allLabels={},this.allIds={}},u.prototype.startEquation=function(t){this.labels={},this.ids={},this.counter=this.allCounter,this.redo=!1;var e=t.inputData.recompile;e&&(this.refUpdate=!0,this.counter=e.counter)},u.prototype.finishEquation=function(t){this.redo&&(t.inputData.recompile={state:t.state(),counter:this.allCounter}),this.refUpdate||(this.allCounter=this.counter),Object.assign(this.allIds,this.ids),Object.assign(this.allLabels,this.labels)},u.prototype.finalize=function(t,e){if(!e.display||this.currentTag.env||null==this.currentTag.tag)return t;var r=this.makeTag();return this.enTag(t,r)},u.prototype.makeId=function(){this.currentTag.tagId=this.formatId(this.configuration.options.useLabelIds&&this.label||this.currentTag.tag)},u.prototype.makeTag=function(){this.makeId(),this.label&&(this.labels[this.label]=new s(this.currentTag.tag,this.currentTag.tagId));var t=new o.default("\\text{"+this.currentTag.tagFormat+"}",{},this.configuration).mml();return this.configuration.nodeFactory.create("node","mtd",[t],{id:this.currentTag.tagId})},u);function u(){this.counter=0,this.allCounter=0,this.configuration=null,this.ids={},this.allIds={},this.labels={},this.allLabels={},this.redo=!1,this.refUpdate=!1,this.currentTag=new c,this.history=[],this.stack=[],this.enTag=function(t,e){var r=this.configuration.nodeFactory,n=r.create("node","mtd",[t]),i=r.create("node","mlabeledtr",[e,n]);return r.create("node","mtable",[i],{side:this.configuration.options.tagSide,minlabelspacing:this.configuration.options.tagIndent,displaystyle:!0})}}e.AbstractTags=l;var h,f=(i(p,h=l),p.prototype.autoTag=function(){},p.prototype.getTag=function(){return this.currentTag.tag?h.prototype.getTag.call(this):null},p);function p(){return null!==h&&h.apply(this,arguments)||this}e.NoTags=f;var d,m,y,v,b=(i(g,d=l),g.prototype.finalize=function(t,e){if(!e.display||this.history.find(function(t){return t.taggable}))return t;var r=this.getTag(!0);return this.enTag(t,r)},g);function g(){return null!==d&&d.apply(this,arguments)||this}e.AllTags=b,m=e.TagsFactory||(e.TagsFactory={}),y=new Map([["none",f],["all",b]]),v="none",m.OPTIONS={tags:v,tagSide:"right",tagIndent:"0.8em",multlineWidth:"85%",useLabelIds:!0,ignoreDuplicateLabels:!1},m.add=function(t,e){y.set(t,e)},m.addTags=function(t){var e,r;try{for(var n=a(Object.keys(t)),i=n.next();!i.done;i=n.next()){var o=i.value;m.add(o,t[o])}}catch(t){e={error:t}}finally{try{i&&!i.done&&(r=n.return)&&r.call(n)}finally{if(e)throw e.error}}},m.create=function(t){return new(y.get(t)||this.defaultTags)},m.setDefault=function(t){v=t},m.getDefault=function(){return m.create(v)}},function($K,_K){var aL;aL=function(){return this}();try{aL=aL||Function("return this")()||eval("this")}catch(t){"object"==typeof window&&(aL=window)}$K.exports=aL},function(t,e,r){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var n=r(3),i=r(25),o=(Object.defineProperty(a.prototype,"name",{get:function(){return this.constructor.NAME},enumerable:!0,configurable:!0}),a.prototype.setAdaptor=function(t){this.adaptor=t},a.prototype.setMmlFactory=function(t){this.mmlFactory=t},a.prototype.initialize=function(){},Object.defineProperty(a.prototype,"processStrings",{get:function(){return!0},enumerable:!0,configurable:!0}),a.prototype.findMath=function(t,e){return[]},a.prototype.executeFilters=function(t,e,r,n){var i={math:e,document:r,data:n};return t.execute(i),i.data},a.NAME="generic",a.OPTIONS={},a);function a(t){void 0===t&&(t={}),this.adaptor=null,this.mmlFactory=null;var e=this.constructor;this.options=n.userOptions(n.defaultOptions({},e.OPTIONS),t),this.preFilters=new i.FunctionList,this.postFilters=new i.FunctionList}e.AbstractInputJax=o},function(t,e,r){"use strict";var a=this&&this.__values||function(t){var e="function"==typeof Symbol&&Symbol.iterator,r=e&&t[e],n=0;if(r)return r.call(t);if(t&&"number"==typeof t.length)return{next:function(){return t&&n>=t.length&&(t=void 0),{value:t&&t[n++],done:!t}}};throw new TypeError(e?"Object is not iterable.":"Symbol.iterator is not defined.")},n=this&&this.__read||function(t,e){var r="function"==typeof Symbol&&t[Symbol.iterator];if(!r)return t;var n,i,o=r.call(t),a=[];try{for(;(void 0===e||0=t.length&&(t=void 0),{value:t&&t[n++],done:!t}}};throw new TypeError(e?"Object is not iterable.":"Symbol.iterator is not defined.")};Object.defineProperty(e,"__esModule",{value:!0});var c=r(4),l=(Object.defineProperty(u.prototype,"nodes",{get:function(){return this._nodes},enumerable:!0,configurable:!0}),u.prototype.Push=function(){for(var t,e=[],r=0;rt.configuration.options.maxMacros)throw new d.default("MaxMacroSub2","MathJax maximum substitution count exceeded; is there a recursive latex environment?");t.parse("environment",[t,r])},i.Array=function(t,e,r,n,i,o,a,s,c){var l=("c"+(i=i||t.GetArgument("\\begin{"+e.getName()+"}"))).replace(/[^clr|:]/g,"").replace(/[^|:]([|:])+/g,"$1");i=(i=i.replace(/[^clr]/g,"").split("").join(" ")).replace(/l/g,"left").replace(/r/g,"right").replace(/c/g,"center");var u=t.itemFactory.create("array");return u.arraydef={columnalign:i,columnspacing:o||"1em",rowspacing:a||"4pt"},l.match(/[|:]/)&&(l.charAt(0).match(/[|:]/)&&(u.frame.push("left"),u.dashed=":"===l.charAt(0)),l.charAt(l.length-1).match(/[|:]/)&&u.frame.push("right"),l=l.substr(1,l.length-2),u.arraydef.columnlines=l.split("").join(" ").replace(/[^|: ]/g,"none").replace(/\|/g,"solid").replace(/:/g,"dashed")),r&&u.setProperty("open",t.convertDelimiter(r)),n&&u.setProperty("close",t.convertDelimiter(n)),"D"===s?u.arraydef.displaystyle=!0:s&&(u.arraydef.displaystyle=!1),"S"===s&&(u.arraydef.scriptlevel=1),c&&(u.arraydef.useHeight=!1),t.Push(e),u},i.AlignedArray=function(t,e){var r=t.GetBrackets("\\begin{"+e.getName()+"}"),n=i.Array(t,e);return y.default.setArrayAlign(n,r)},i.Equation=function(t,e,r){return t.Push(e),y.default.checkEqnEnv(t),t.itemFactory.create("equation",r).setProperty("name",e.getName())},i.EqnArray=function(t,e,r,n,i,o){t.Push(e),n&&y.default.checkEqnEnv(t),i=(i=i.replace(/[^clr]/g,"").split("").join(" ")).replace(/l/g,"left").replace(/r/g,"right").replace(/c/g,"center");var a=t.itemFactory.create("eqnarray",e.getName(),r,n,t.stack.global);return a.arraydef={displaystyle:!0,columnalign:i,columnspacing:o||"1em",rowspacing:"3pt",side:t.options.tagSide,minlabelspacing:t.options.tagIndent},a},i.HandleNoTag=function(t,e){t.tags.notag()},i.HandleLabel=function(t,e){t.stack.global;var r=t.GetArgument(e);if(""!==r&&!t.tags.refUpdate){if(t.tags.label)throw new d.default("MultipleCommand","Multiple %1",t.currentCS);if(t.tags.label=r,(t.tags.allLabels[r]||t.tags.labels[r])&&!t.options.ignoreDuplicateLabels)throw new d.default("MultipleLabel","Label '%1' multiply defined",r);t.tags.labels[r]=new s.Label}},i.HandleRef=function(t,e,r){var n=t.GetArgument(e),i=t.tags.allLabels[n]||t.tags.labels[n];i||(t.tags.refUpdate||(t.tags.redo=!0),i=new s.Label);var o=i.tag;r&&(o=t.tags.formatTag(o));var a=t.create("node","mrow",y.default.internalMath(t,o),{href:t.tags.formatUrl(i.id,t.options.baseURL),class:"MathJax_ref"});t.Push(a)},i.Macro=function(t,e,r,n,i){if(n){var o=[];if(null!=i){var a=t.GetBrackets(e);o.push(null==a?i:a)}for(var s=o.length;st.configuration.options.maxMacros)throw new d.default("MaxMacroSub1","MathJax maximum macro substitution count exceeded; is there a recursive macro call?")},i.MathChoice=function(t,e){var r=t.ParseArg(e),n=t.ParseArg(e),i=t.ParseArg(e),o=t.ParseArg(e);t.Push(t.create("node","mathchoice",[r,n,i,o]))},e.default=i},function(t,p,e){"use strict";var d=this&&this.__read||function(t,e){var r="function"==typeof Symbol&&t[Symbol.iterator];if(!r)return t;var n,i,o=r.call(t),a=[];try{for(;(void 0===e||0Math.PI/2-r?t.thickness*h*Math.sin(u+r-Math.PI/2):0);return[f,p,f,p]},remove:e[3]}]}},p.CommonArrow=function(f){return function(t){var e=d(p.arrowDef[t],4),l=e[0],u=e[1],h=e[2],r=e[3];return[t+"arrow",{renderer:function(t,e){var r=t.getBBox(),n=r.w,i=r.h,o=r.d,a=d(h?[i+o,n]:[n,i+o],2),s=a[0],c=(a[1],t.arrow(s,l,u));f(t,c)},bbox:p.arrowBBox[t],remove:r}]}}},function(t,e,r){"use strict";var n,i=this&&this.__extends||(n=function(t,e){return(n=Object.setPrototypeOf||{__proto__:[]}instanceof Array&&function(t,e){t.__proto__=e}||function(t,e){for(var r in e)e.hasOwnProperty(r)&&(t[r]=e[r])})(t,e)},function(t,e){function r(){this.constructor=t}n(t,e),t.prototype=null===e?Object.create(e):(r.prototype=e.prototype,new r)}),h=this&&this.__read||function(t,e){var r="function"==typeof Symbol&&t[Symbol.iterator];if(!r)return t;var n,i,o=r.call(t),a=[];try{for(;(void 0===e||0 *":{display:"block"}},g.useIC=!1,g);function g(){return null!==v&&v.apply(this,arguments)||this}e.CHTMLmsubsup=b},function(t,e,r){"use strict";var n,i=this&&this.__extends||(n=function(t,e){return(n=Object.setPrototypeOf||{__proto__:[]}instanceof Array&&function(t,e){t.__proto__=e}||function(t,e){for(var r in e)e.hasOwnProperty(r)&&(t[r]=e[r])})(t,e)},function(t,e){function r(){this.constructor=t}n(t,e),t.prototype=null===e?Object.create(e):(r.prototype=e.prototype,new r)}),f=this&&this.__read||function(t,e){var r="function"==typeof Symbol&&t[Symbol.iterator];if(!r)return t;var n,i,o=r.call(t),a=[];try{for(;(void 0===e||0=t.length&&(t=void 0),{value:t&&t[n++],done:!t}}};throw new TypeError(e?"Object is not iterable.":"Symbol.iterator is not defined.")},p=this&&this.__read||function(t,e){var r="function"==typeof Symbol&&t[Symbol.iterator];if(!r)return t;var n,i,o=r.call(t),a=[];try{for(;(void 0===e||0=e&&a.item.renderDoc(t))return}}catch(t){r={error:t}}finally{try{o&&!o.done&&(n=i.return)&&n.call(i)}finally{if(r)throw r.error}}},v.prototype.renderMath=function(t,e,r){var n,i;void 0===r&&(r=m.STATE.UNPROCESSED);try{for(var o=h(this.items),a=o.next();!a.done;a=o.next()){var s=a.value;if(s.priority>=r&&s.item.renderMath(t,e))return}}catch(t){n={error:t}}finally{try{a&&!a.done&&(i=o.return)&&i.call(o)}finally{if(n)throw n.error}}},v.prototype.renderConvert=function(t,e,r){var n,i;void 0===r&&(r=m.STATE.LAST);try{for(var o=h(this.items),a=o.next();!a.done;a=o.next()){var s=a.value;if(s.priority>=r)return;if(s.item.convert&&s.item.renderMath(t,e))return}}catch(t){n={error:t}}finally{try{a&&!a.done&&(i=o.return)&&i.call(o)}finally{if(n)throw n.error}}},v.prototype.findID=function(t){var e,r;try{for(var n=h(this.items),i=n.next();!i.done;i=n.next()){var o=i.value;if(o.item.id===t)return o.item}}catch(t){e={error:t}}finally{try{i&&!i.done&&(r=n.return)&&r.call(n)}finally{if(e)throw e.error}}return null},v);function v(){return null!==o&&o.apply(this,arguments)||this}e.RenderList=y;var b,g=(b=a.AbstractInputJax,i(M,b),M.prototype.compile=function(t){return null},M);function M(){return null!==b&&b.apply(this,arguments)||this}var O,x=(O=s.AbstractOutputJax,i(S,O),S.prototype.typeset=function(t,e){return void 0===e&&(e=null),null},S.prototype.escaped=function(t,e){return null},S);function S(){return null!==O&&O.apply(this,arguments)||this}var E,C=(E=c.AbstractMathList,i(_,E),_);function _(){return null!==E&&E.apply(this,arguments)||this}var T,w=(T=m.AbstractMathItem,i(A,T),A);function A(){return null!==T&&T.apply(this,arguments)||this}var k=(Object.defineProperty(I.prototype,"kind",{get:function(){return this.constructor.KIND},enumerable:!0,configurable:!0}),I.prototype.addRenderAction=function(t){for(var e=[],r=1;r=t.length&&(t=void 0),{value:t&&t[n++],done:!t}}};throw new TypeError(e?"Object is not iterable.":"Symbol.iterator is not defined.")};Object.defineProperty(e,"__esModule",{value:!0});var o=(Object.defineProperty(a.prototype,"factory",{get:function(){return this._factory},enumerable:!0,configurable:!0}),Object.defineProperty(a.prototype,"kind",{get:function(){return"unknown"},enumerable:!0,configurable:!0}),a.prototype.setProperty=function(t,e){this.properties[t]=e},a.prototype.getProperty=function(t){return this.properties[t]},a.prototype.getPropertyNames=function(){return Object.keys(this.properties)},a.prototype.getAllProperties=function(){return this.properties},a.prototype.removeProperty=function(){for(var e,t,r=[],n=0;n=t.length&&(t=void 0),{value:t&&t[n++],done:!t}}};throw new TypeError(e?"Object is not iterable.":"Symbol.iterator is not defined.")};Object.defineProperty(e,"__esModule",{value:!0});var a,u=r(0),s=(a=u.AbstractMmlNode,i(c,a),Object.defineProperty(c.prototype,"kind",{get:function(){return"mrow"},enumerable:!0,configurable:!0}),Object.defineProperty(c.prototype,"isSpacelike",{get:function(){var e,t;try{for(var r=l(this.childNodes),n=r.next();!n.done;n=r.next())if(!n.value.isSpacelike)return!1}catch(t){e={error:t}}finally{try{n&&!n.done&&(t=r.return)&&t.call(r)}finally{if(e)throw e.error}}return!0},enumerable:!0,configurable:!0}),Object.defineProperty(c.prototype,"isEmbellished",{get:function(){var e,t,r=!1,n=0;try{for(var i=l(this.childNodes),o=i.next();!o.done;o=i.next()){var a=o.value;if(a)if(a.isEmbellished){if(r)return!1;r=!0,this._core=n}else if(!a.isSpacelike)return!1;n++}}catch(t){e={error:t}}finally{try{o&&!o.done&&(t=i.return)&&t.call(i)}finally{if(e)throw e.error}}return r},enumerable:!0,configurable:!0}),c.prototype.core=function(){return this.isEmbellished&&null!=this._core?this.childNodes[this._core]:this},c.prototype.coreMO=function(){return this.isEmbellished&&null!=this._core?this.childNodes[this._core].coreMO():this},c.prototype.nonSpaceLength=function(){var e,t,r=0;try{for(var n=l(this.childNodes),i=n.next();!i.done;i=n.next()){var o=i.value;o&&!o.isSpacelike&&r++}}catch(t){e={error:t}}finally{try{i&&!i.done&&(t=n.return)&&t.call(n)}finally{if(e)throw e.error}}return r},c.prototype.firstNonSpace=function(){var e,t;try{for(var r=l(this.childNodes),n=r.next();!n.done;n=r.next()){var i=n.value;if(i&&!i.isSpacelike)return i}}catch(t){e={error:t}}finally{try{n&&!n.done&&(t=r.return)&&t.call(r)}finally{if(e)throw e.error}}return null},c.prototype.lastNonSpace=function(){for(var t=this.childNodes.length;0<=--t;){var e=this.childNodes[t];if(e&&!e.isSpacelike)return e}return null},c.prototype.setTeXclass=function(t){var e,r,n,i;if(null==this.getProperty("open")&&null==this.getProperty("close")||t&&null==t.getProperty("fnOp")){try{for(var o=l(this.childNodes),a=o.next();!a.done;a=o.next())t=a.value.setTeXclass(t)}catch(t){n={error:t}}finally{try{a&&!a.done&&(i=o.return)&&i.call(o)}finally{if(n)throw n.error}}this.childNodes[0]&&this.updateTeXclass(this.childNodes[0])}else{this.getPrevClass(t),t=null;try{for(var s=l(this.childNodes),c=s.next();!c.done;c=s.next())t=c.value.setTeXclass(t)}catch(t){e={error:t}}finally{try{c&&!c.done&&(r=s.return)&&r.call(s)}finally{if(e)throw e.error}}null==this.texClass&&(this.texClass=u.TEXCLASS.INNER)}return t},c.defaults=o({},u.AbstractMmlNode.defaults),c);function c(){var t=null!==a&&a.apply(this,arguments)||this;return t._core=null,t}e.MmlMrow=s;var h,f=(i(p,h=s),Object.defineProperty(p.prototype,"kind",{get:function(){return"inferredMrow"},enumerable:!0,configurable:!0}),Object.defineProperty(p.prototype,"isInferred",{get:function(){return!0},enumerable:!0,configurable:!0}),Object.defineProperty(p.prototype,"notParent",{get:function(){return!0},enumerable:!0,configurable:!0}),p.prototype.toString=function(){return"["+this.childNodes.join(",")+"]"},p.defaults=s.defaults,p);function p(){return null!==h&&h.apply(this,arguments)||this}e.MmlInferredMrow=f},function(t,e,r){"use strict";var n,i=this&&this.__extends||(n=function(t,e){return(n=Object.setPrototypeOf||{__proto__:[]}instanceof Array&&function(t,e){t.__proto__=e}||function(t,e){for(var r in e)e.hasOwnProperty(r)&&(t[r]=e[r])})(t,e)},function(t,e){function r(){this.constructor=t}n(t,e),t.prototype=null===e?Object.create(e):(r.prototype=e.prototype,new r)}),o=this&&this.__assign||function(){return(o=Object.assign||function(t){for(var e,r=1,n=arguments.length;r=t.length&&(t=void 0),{value:t&&t[n++],done:!t}}};throw new TypeError(e?"Object is not iterable.":"Symbol.iterator is not defined.")};Object.defineProperty(e,"__esModule",{value:!0});var s,c=r(0),l=(s=c.AbstractMmlBaseNode,i(u,s),Object.defineProperty(u.prototype,"kind",{get:function(){return"mfrac"},enumerable:!0,configurable:!0}),Object.defineProperty(u.prototype,"arity",{get:function(){return 2},enumerable:!0,configurable:!0}),Object.defineProperty(u.prototype,"linebreakContainer",{get:function(){return!0},enumerable:!0,configurable:!0}),u.prototype.setTeXclass=function(t){var e,r;this.getPrevClass(t);try{for(var n=a(this.childNodes),i=n.next();!i.done;i=n.next())i.value.setTeXclass(null)}catch(t){e={error:t}}finally{try{i&&!i.done&&(r=n.return)&&r.call(n)}finally{if(e)throw e.error}}return this.isEmbellished&&this.updateTeXclass(this.core()),this},u.prototype.setChildInheritedAttributes=function(t,e,r,n){(!e||0=t.length&&(t=void 0),{value:t&&t[n++],done:!t}}};throw new TypeError(e?"Object is not iterable.":"Symbol.iterator is not defined.")};Object.defineProperty(e,"__esModule",{value:!0});var u,h=r(0),a=(u=h.AbstractMmlNode,i(s,u),Object.defineProperty(s.prototype,"kind",{get:function(){return"mfenced"},enumerable:!0,configurable:!0}),s.prototype.setTeXclass=function(t){this.getPrevClass(t),this.open&&(t=this.open.setTeXclass(t)),this.childNodes[0]&&(t=this.childNodes[0].setTeXclass(t));for(var e=1,r=this.childNodes.length;ethis.childNodes.length&&(t=1),this.attributes.set("selection",t)},l.defaults=o(o({},s.AbstractMmlNode.defaults),{actiontype:"toggle",selection:1}),l);function l(){return null!==a&&a.apply(this,arguments)||this}e.MmlMaction=c},function(t,e,r){"use strict";var n,i=this&&this.__extends||(n=function(t,e){return(n=Object.setPrototypeOf||{__proto__:[]}instanceof Array&&function(t,e){t.__proto__=e}||function(t,e){for(var r in e)e.hasOwnProperty(r)&&(t[r]=e[r])})(t,e)},function(t,e){function r(){this.constructor=t}n(t,e),t.prototype=null===e?Object.create(e):(r.prototype=e.prototype,new r)}),o=this&&this.__assign||function(){return(o=Object.assign||function(t){for(var e,r=1,n=arguments.length;r=t.length&&(t=void 0),{value:t&&t[n++],done:!t}}};throw new TypeError(e?"Object is not iterable.":"Symbol.iterator is not defined.")};Object.defineProperty(e,"__esModule",{value:!0});var l,u=r(0),m=r(10),a=(l=u.AbstractMmlNode,i(s,l),Object.defineProperty(s.prototype,"kind",{get:function(){return"mtable"},enumerable:!0,configurable:!0}),Object.defineProperty(s.prototype,"linebreakContainer",{get:function(){return!0},enumerable:!0,configurable:!0}),s.prototype.setInheritedAttributes=function(t,e,r,n){var i,o;try{for(var a=d(u.indentAttributes),s=a.next();!s.done;s=a.next()){var c=s.value;t[c]&&this.attributes.setInherited(c,t[c][1]),void 0!==this.attributes.getExplicit(c)&&delete this.attributes.getAllAttributes()[c]}}catch(t){i={error:t}}finally{try{s&&!s.done&&(o=a.return)&&o.call(a)}finally{if(i)throw i.error}}l.prototype.setInheritedAttributes.call(this,t,e,r,n)},s.prototype.setChildInheritedAttributes=function(t,e,r,n){var i,o,a,s;try{for(var c=d(this.childNodes),l=c.next();!l.done;l=c.next())(p=l.value).isKind("mtr")||this.replaceChild(this.factory.create("mtr"),p).appendChild(p)}catch(t){i={error:t}}finally{try{l&&!l.done&&(o=c.return)&&o.call(c)}finally{if(i)throw i.error}}e=!(!this.attributes.getExplicit("displaystyle")&&!this.attributes.getDefault("displaystyle")),t=this.addInheritedAttributes(t,{columnalign:this.attributes.get("columnalign"),rowalign:"center"});var u=m.split(this.attributes.get("rowalign"));try{for(var h=d(this.childNodes),f=h.next();!f.done;f=h.next()){var p=f.value;t.rowalign[1]=u.shift()||t.rowalign[1],p.setInheritedAttributes(t,e,r,n)}}catch(t){a={error:t}}finally{try{f&&!f.done&&(s=h.return)&&s.call(h)}finally{if(a)throw a.error}}},s.prototype.verifyChildren=function(t){var e,r;if(!t.fixMtables)try{for(var n=d(this.childNodes),i=n.next();!i.done;i=n.next())i.value.isKind("mtr")||this.mError("Children of "+this.kind+" must be mtr or mlabeledtr",t)}catch(t){e={error:t}}finally{try{i&&!i.done&&(r=n.return)&&r.call(n)}finally{if(e)throw e.error}}l.prototype.verifyChildren.call(this,t)},s.prototype.setTeXclass=function(t){var e,r;this.getPrevClass(t);try{for(var n=d(this.childNodes),i=n.next();!i.done;i=n.next())i.value.setTeXclass(null)}catch(t){e={error:t}}finally{try{i&&!i.done&&(r=n.return)&&r.call(n)}finally{if(e)throw e.error}}return this},s.defaults=o(o({},u.AbstractMmlNode.defaults),{align:"axis",rowalign:"baseline",columnalign:"center",groupalign:"{left}",alignmentscope:!0,columnwidth:"auto",width:"auto",rowspacing:"1ex",columnspacing:".8em",rowlines:"none",columnlines:"none",frame:"none",framespacing:"0.4em 0.5ex",equalrows:!1,equalcolumns:!1,displaystyle:!1,side:"right",minlabelspacing:"0.8em"}),s);function s(){var t=null!==l&&l.apply(this,arguments)||this;return t.properties={useHeight:1},t.texClass=u.TEXCLASS.ORD,t}e.MmlMtable=a},function(t,e,r){"use strict";var n,i=this&&this.__extends||(n=function(t,e){return(n=Object.setPrototypeOf||{__proto__:[]}instanceof Array&&function(t,e){t.__proto__=e}||function(t,e){for(var r in e)e.hasOwnProperty(r)&&(t[r]=e[r])})(t,e)},function(t,e){function r(){this.constructor=t}n(t,e),t.prototype=null===e?Object.create(e):(r.prototype=e.prototype,new r)}),o=this&&this.__assign||function(){return(o=Object.assign||function(t){for(var e,r=1,n=arguments.length;r=t.length&&(t=void 0),{value:t&&t[n++],done:!t}}};throw new TypeError(e?"Object is not iterable.":"Symbol.iterator is not defined.")};Object.defineProperty(e,"__esModule",{value:!0});var a,s=r(0),c=r(19),m=r(10),l=(a=s.AbstractMmlNode,i(u,a),Object.defineProperty(u.prototype,"kind",{get:function(){return"mtr"},enumerable:!0,configurable:!0}),Object.defineProperty(u.prototype,"linebreakContainer",{get:function(){return!0},enumerable:!0,configurable:!0}),u.prototype.setChildInheritedAttributes=function(t,e,r,n){var i,o,a,s;try{for(var c=d(this.childNodes),l=c.next();!l.done;l=c.next())(p=l.value).isKind("mtd")||this.replaceChild(this.factory.create("mtd"),p).appendChild(p)}catch(t){i={error:t}}finally{try{l&&!l.done&&(o=c.return)&&o.call(c)}finally{if(i)throw i.error}}var u=m.split(this.attributes.get("columnalign"));1===this.arity&&u.unshift(this.parent.attributes.get("side")),t=this.addInheritedAttributes(t,{rowalign:this.attributes.get("rowalign"),columnalign:"center"});try{for(var h=d(this.childNodes),f=h.next();!f.done;f=h.next()){var p=f.value;t.columnalign[1]=u.shift()||t.columnalign[1],p.setInheritedAttributes(t,e,r,n)}}catch(t){a={error:t}}finally{try{f&&!f.done&&(s=h.return)&&s.call(h)}finally{if(a)throw a.error}}},u.prototype.verifyChildren=function(t){var e,r;if(!this.parent||this.parent.isKind("mtable")){if(!t.fixMtables)try{for(var n=d(this.childNodes),i=n.next();!i.done;i=n.next()){var o=i.value;o.isKind("mtd")||this.replaceChild(this.factory.create("mtr"),o).mError("Children of "+this.kind+" must be mtd",t,!0)}}catch(t){e={error:t}}finally{try{i&&!i.done&&(r=n.return)&&r.call(n)}finally{if(e)throw e.error}}a.prototype.verifyChildren.call(this,t)}else this.mError(this.kind+" can only be a child of an mtable",t,!0)},u.prototype.setTeXclass=function(t){var e,r;this.getPrevClass(t);try{for(var n=d(this.childNodes),i=n.next();!i.done;i=n.next())i.value.setTeXclass(null)}catch(t){e={error:t}}finally{try{i&&!i.done&&(r=n.return)&&r.call(n)}finally{if(e)throw e.error}}return this},u.defaults=o(o({},s.AbstractMmlNode.defaults),{rowalign:c.INHERIT,columnalign:c.INHERIT,groupalign:c.INHERIT}),u);function u(){return null!==a&&a.apply(this,arguments)||this}e.MmlMtr=l;var h,f=(i(p,h=l),Object.defineProperty(p.prototype,"kind",{get:function(){return"mlabeledtr"},enumerable:!0,configurable:!0}),Object.defineProperty(p.prototype,"arity",{get:function(){return 1},enumerable:!0,configurable:!0}),p);function p(){return null!==h&&h.apply(this,arguments)||this}e.MmlMlabeledtr=f},function(t,e,r){"use strict";var n,i=this&&this.__extends||(n=function(t,e){return(n=Object.setPrototypeOf||{__proto__:[]}instanceof Array&&function(t,e){t.__proto__=e}||function(t,e){for(var r in e)e.hasOwnProperty(r)&&(t[r]=e[r])})(t,e)},function(t,e){function r(){this.constructor=t}n(t,e),t.prototype=null===e?Object.create(e):(r.prototype=e.prototype,new r)}),o=this&&this.__assign||function(){return(o=Object.assign||function(t){for(var e,r=1,n=arguments.length;r=t.length&&(t=void 0),{value:t&&t[n++],done:!t}}};throw new TypeError(e?"Object is not iterable.":"Symbol.iterator is not defined.")},v=this&&this.__read||function(t,e){var r="function"==typeof Symbol&&t[Symbol.iterator];if(!r)return t;var n,i,o=r.call(t),a=[];try{for(;(void 0===e||0t.configuration.options.maxMacros)throw new l.default("MaxMacroSub1","MathJax maximum macro substitution count exceeded; is here a recursive macro call?")},BeginEnv:function(t,e,r,n,i,o){if(e.getProperty("end")&&t.stack.env.closing===e.getName()){delete t.stack.env.closing;var a=t.string.slice(t.i);return t.string=n,t.i=0,t.Parse(),t.string=a,t.i=0,t.itemFactory.create("end").setProperty("name",e.getName())}if(i){var s=[];if(null!=o){var c=t.GetBrackets("\\begin{"+e.getName()+"}");s.push(null==c?o:c)}for(var l=s.length;l=t.length&&(t=void 0),{value:t&&t[n++],done:!t}}};throw new TypeError(e?"Object is not iterable.":"Symbol.iterator is not defined.")};Object.defineProperty(e,"__esModule",{value:!0});var l=r(16);e.CommonMrowMixin=function(t){return i(e,s=t),Object.defineProperty(e.prototype,"fixesPWidth",{get:function(){return!1},enumerable:!0,configurable:!0}),e.prototype.stretchChildren=function(){var e,t,r,n,i,o,a=[];try{for(var s=S(this.childNodes),c=s.next();!c.done;c=s.next())(x=c.value).canStretch(1)&&a.push(x)}catch(t){e={error:t}}finally{try{c&&!c.done&&(t=s.return)&&t.call(s)}finally{if(e)throw e.error}}var l=a.length,u=this.childNodes.length;if(l&&1 mjx-box":{"border-top":".07em solid"},"mjx-sqrt.mjx-tall > mjx-box":{"padding-left":".3em","margin-left":"-.3em"}},u);function u(){return null!==o&&o.apply(this,arguments)||this}e.CHTMLmsqrt=l},function(t,e,r){"use strict";var n,i=this&&this.__extends||(n=function(t,e){return(n=Object.setPrototypeOf||{__proto__:[]}instanceof Array&&function(t,e){t.__proto__=e}||function(t,e){for(var r in e)e.hasOwnProperty(r)&&(t[r]=e[r])})(t,e)},function(t,e){function r(){this.constructor=t}n(t,e),t.prototype=null===e?Object.create(e):(r.prototype=e.prototype,new r)}),C=this&&this.__values||function(t){var e="function"==typeof Symbol&&Symbol.iterator,r=e&&t[e],n=0;if(r)return r.call(t);if(t&&"number"==typeof t.length)return{next:function(){return t&&n>=t.length&&(t=void 0),{value:t&&t[n++],done:!t}}};throw new TypeError(e?"Object is not iterable.":"Symbol.iterator is not defined.")};Object.defineProperty(e,"__esModule",{value:!0}),e.CommonMtrMixin=function(t){return i(e,r=t),Object.defineProperty(e.prototype,"fixesPWidth",{get:function(){return!1},enumerable:!0,configurable:!0}),Object.defineProperty(e.prototype,"numCells",{get:function(){return this.childNodes.length},enumerable:!0,configurable:!0}),Object.defineProperty(e.prototype,"labeled",{get:function(){return!1},enumerable:!0,configurable:!0}),Object.defineProperty(e.prototype,"tableCells",{get:function(){return this.childNodes},enumerable:!0,configurable:!0}),e.prototype.getChild=function(t){return this.childNodes[t]},e.prototype.getChildBBoxes=function(){return this.childNodes.map(function(t){return t.getBBox()})},e.prototype.stretchChildren=function(t){var e,r,n,i,o,a;void 0===t&&(t=null);var s=[],c=this.labeled?this.childNodes.slice(1):this.childNodes;try{for(var l=C(c),u=l.next();!u.done;u=l.next())(E=u.value.childNodes[0]).canStretch(1)&&s.push(E)}catch(t){e={error:t}}finally{try{u&&!u.done&&(r=l.return)&&r.call(l)}finally{if(e)throw e.error}}var h=s.length,f=this.childNodes.length;if(h&&1=t.length&&(t=void 0),{value:t&&t[n++],done:!t}}};throw new TypeError(e?"Object is not iterable.":"Symbol.iterator is not defined.")};Object.defineProperty(e,"__esModule",{value:!0});var o,a=r(80),s=(o=a.AbstractDOMAdaptor,i(l,o),l.prototype.parse=function(t,e){return void 0===e&&(e="text/html"),this.parser.parseFromString(t,e)},l.prototype.create=function(t,e){return e?this.document.createElementNS(e,t):this.document.createElement(t)},l.prototype.text=function(t){return this.document.createTextNode(t)},l.prototype.head=function(t){return t.head},l.prototype.body=function(t){return t.body},l.prototype.root=function(t){return t.documentElement},l.prototype.tags=function(t,e,r){void 0===r&&(r=null);var n=r?t.getElementsByTagNameNS(r,e):t.getElementsByTagName(e);return Array.from(n)},l.prototype.getElements=function(t,e){var r,n,i=[];try{for(var o=c(t),a=o.next();!a.done;a=o.next()){var s=a.value;"string"==typeof s?i=i.concat(Array.from(this.document.querySelectorAll(s))):Array.isArray(s)?i=i.concat(Array.from(s)):s instanceof this.window.NodeList||s instanceof this.window.HTMLCollection?i=i.concat(Array.from(s)):i.push(s)}}catch(t){r={error:t}}finally{try{a&&!a.done&&(n=o.return)&&n.call(o)}finally{if(r)throw r.error}}return i},l.prototype.parent=function(t){return t.parentNode},l.prototype.append=function(t,e){return t.appendChild(e)},l.prototype.insert=function(t,e){return this.parent(e).insertBefore(t,e)},l.prototype.remove=function(t){return this.parent(t).removeChild(t)},l.prototype.replace=function(t,e){return this.parent(e).replaceChild(t,e)},l.prototype.clone=function(t){return t.cloneNode(!0)},l.prototype.split=function(t,e){return t.splitText(e)},l.prototype.next=function(t){return t.nextSibling},l.prototype.previous=function(t){return t.previousSibling},l.prototype.firstChild=function(t){return t.firstChild},l.prototype.lastChild=function(t){return t.lastChild},l.prototype.childNodes=function(t){return Array.from(t.childNodes)},l.prototype.childNode=function(t,e){return t.childNodes[e]},l.prototype.kind=function(t){return t.nodeName.toLowerCase()},l.prototype.value=function(t){return t.nodeValue||""},l.prototype.textContent=function(t){return t.textContent},l.prototype.innerHTML=function(t){return t.innerHTML},l.prototype.outerHTML=function(t){return t.outerHTML},l.prototype.setAttribute=function(t,e,r,n){return void 0===n&&(n=null),n?t.setAttributeNS(n,e,r):t.setAttribute(e,r)},l.prototype.getAttribute=function(t,e){return t.getAttribute(e)},l.prototype.removeAttribute=function(t,e){return t.removeAttribute(e)},l.prototype.hasAttribute=function(t,e){return t.hasAttribute(e)},l.prototype.allAttributes=function(t){return Array.from(t.attributes).map(function(t){return{name:t.name,value:t.value}})},l.prototype.addClass=function(t,e){t.classList.add(e)},l.prototype.removeClass=function(t,e){return t.classList.remove(e)},l.prototype.hasClass=function(t,e){return t.classList.contains(e)},l.prototype.setStyle=function(t,e,r){t.style[e]=r},l.prototype.getStyle=function(t,e){return t.style[e]},l.prototype.allStyles=function(t){return t.style.cssText},l.prototype.fontSize=function(t){var e=this.window.getComputedStyle(t);return parseFloat(e.fontSize)},l.prototype.nodeSize=function(t,e,r){if(void 0===e&&(e=1),void 0===r&&(r=!1),r&&t.getBBox){var n=t.getBBox();return[n.width/e,n.height/e]}return[t.offsetWidth/e,t.offsetHeight/e]},l.prototype.nodeBBox=function(t){var e=t.getBoundingClientRect();return{left:e.left,right:e.right,top:e.top,bottom:e.bottom}},l);function l(t){var e=o.call(this,t.document)||this;return e.window=t,e.parser=new t.DOMParser,e}e.HTMLAdaptor=s},function(t,e,r){"use strict";var m=this&&this.__values||function(t){var e="function"==typeof Symbol&&Symbol.iterator,r=e&&t[e],n=0;if(r)return r.call(t);if(t&&"number"==typeof t.length)return{next:function(){return t&&n>=t.length&&(t=void 0),{value:t&&t[n++],done:!t}}};throw new TypeError(e?"Object is not iterable.":"Symbol.iterator is not defined.")};Object.defineProperty(e,"__esModule",{value:!0});var n=(i.prototype.node=function(t,e,r,n){var i,o;void 0===e&&(e={}),void 0===r&&(r=[]);var a=this.create(t,n);this.setAttributes(a,e);try{for(var s=m(r),c=s.next();!c.done;c=s.next()){var l=c.value;this.append(a,l)}}catch(t){i={error:t}}finally{try{c&&!c.done&&(o=s.return)&&o.call(s)}finally{if(i)throw i.error}}return a},i.prototype.setAttributes=function(t,e){var r,n,i,o,a,s;if(e.style&&"string"!=typeof e.style)try{for(var c=m(Object.keys(e.style)),l=c.next();!l.done;l=c.next()){var u=l.value;this.setStyle(t,u.replace(/-([a-z])/g,function(t,e){return e.toUpperCase()}),e.style[u])}}catch(t){r={error:t}}finally{try{l&&!l.done&&(n=c.return)&&n.call(c)}finally{if(r)throw r.error}}if(e.properties)try{for(var h=m(Object.keys(e.properties)),f=h.next();!f.done;f=h.next())t[u=f.value]=e.properties[u]}catch(t){i={error:t}}finally{try{f&&!f.done&&(o=h.return)&&o.call(h)}finally{if(i)throw i.error}}try{for(var p=m(Object.keys(e)),d=p.next();!d.done;d=p.next())"style"===(u=d.value)&&"string"!=typeof e.style||"properties"===u||this.setAttribute(t,u,e[u])}catch(t){a={error:t}}finally{try{d&&!d.done&&(s=p.return)&&s.call(p)}finally{if(a)throw a.error}}},i.prototype.replace=function(t,e){return this.insert(t,e),this.remove(e),e},i.prototype.childNode=function(t,e){return this.childNodes(t)[e]},i.prototype.allClasses=function(t){var e=this.getAttribute(t,"class");return e?e.replace(/ +/g," ").replace(/^ /,"").replace(/ $/,"").split(/ /):[]},i);function i(t){void 0===t&&(t=null),this.document=t}e.AbstractDOMAdaptor=n},function(t,e,r){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var n=r(79);e.browserAdaptor=function(){return new n.HTMLAdaptor(window)}},function(t,e,r){"use strict";var n,i=this&&this.__extends||(n=function(t,e){return(n=Object.setPrototypeOf||{__proto__:[]}instanceof Array&&function(t,e){t.__proto__=e}||function(t,e){for(var r in e)e.hasOwnProperty(r)&&(t[r]=e[r])})(t,e)},function(t,e){function r(){this.constructor=t}n(t,e),t.prototype=null===e?Object.create(e):(r.prototype=e.prototype,new r)});Object.defineProperty(e,"__esModule",{value:!0});var o,a=r(41),s=(o=a.AbstractMathDocument,i(c,o),c);function c(){return null!==o&&o.apply(this,arguments)||this}var l=(Object.defineProperty(u.prototype,"name",{get:function(){return this.constructor.NAME},enumerable:!0,configurable:!0}),u.prototype.handlesDocument=function(t){return!1},u.prototype.create=function(t,e){return new this.documentClass(t,this.adaptor,e)},u.NAME="generic",u);function u(t,e){void 0===e&&(e=5),this.documentClass=s,this.adaptor=t,this.priority=e}e.AbstractHandler=l},function(t,e,r){"use strict";var l=this&&this.__read||function(t,e){var r="function"==typeof Symbol&&t[Symbol.iterator];if(!r)return t;var n,i,o=r.call(t),a=[];try{for(;(void 0===e||0=t.length&&(t=void 0),{value:t&&t[n++],done:!t}}};throw new TypeError(e?"Object is not iterable.":"Symbol.iterator is not defined.")};Object.defineProperty(e,"__esModule",{value:!0});var h=Symbol(),c=function(t){void 0===t&&(t=null),this.next=null,this.prev=null,this.data=t};e.ListItem=c;var i=(f.prototype.toArray=function(){return Array.from(this)},f.prototype.isBefore=function(t,e){return t":e.MO.BIN5,".":[0,3,i.TEXCLASS.PUNCT,{separator:!0}],"/":e.MO.ORD11,"//":n(1,1),"/=":e.MO.BIN4,":":[1,2,i.TEXCLASS.REL,null],":=":e.MO.BIN4,";":[0,3,i.TEXCLASS.PUNCT,{linebreakstyle:"after",separator:!0}],"<":e.MO.REL,"<=":e.MO.BIN5,"<>":n(1,1),"=":e.MO.REL,"==":e.MO.BIN4,">":e.MO.REL,">=":e.MO.BIN5,"?":[1,1,i.TEXCLASS.CLOSE,null],"@":e.MO.ORD11,"\\":e.MO.ORD,"^":e.MO.ORD11,_:e.MO.ORD11,"|":[2,2,i.TEXCLASS.ORD,{fence:!0,stretchy:!0,symmetric:!0}],"||":[2,2,i.TEXCLASS.BIN,{fence:!0,stretchy:!0,symmetric:!0}],"|||":[2,2,i.TEXCLASS.ORD,{fence:!0,stretchy:!0,symmetric:!0}],"\xb1":e.MO.BIN4,"\xb7":e.MO.BIN4,"\xd7":e.MO.BIN4,"\xf7":e.MO.BIN4,"\u02b9":e.MO.ORD,"\u0300":e.MO.ACCENT,"\u0301":e.MO.ACCENT,"\u0303":e.MO.WIDEACCENT,"\u0304":e.MO.ACCENT,"\u0306":e.MO.ACCENT,"\u0307":e.MO.ACCENT,"\u0308":e.MO.ACCENT,"\u030c":e.MO.ACCENT,"\u0332":e.MO.WIDEACCENT,"\u0338":e.MO.REL4,"\u2015":[0,0,i.TEXCLASS.ORD,{stretchy:!0}],"\u2017":[0,0,i.TEXCLASS.ORD,{stretchy:!0}],"\u2020":e.MO.BIN3,"\u2021":e.MO.BIN3,"\u2022":e.MO.BIN4,"\u2026":e.MO.INNER,"\u2044":e.MO.TALLBIN,"\u2061":e.MO.ORD,"\u2062":e.MO.ORD,"\u2063":[0,0,i.TEXCLASS.ORD,{linebreakstyle:"after",separator:!0}],"\u2064":e.MO.ORD,"\u20d7":e.MO.ACCENT,"\u2111":e.MO.ORD,"\u2113":e.MO.ORD,"\u2118":e.MO.ORD,"\u211c":e.MO.ORD,"\u2190":e.MO.WIDEREL,"\u2191":e.MO.RELSTRETCH,"\u2192":e.MO.WIDEREL,"\u2193":e.MO.RELSTRETCH,"\u2194":e.MO.WIDEREL,"\u2195":e.MO.RELSTRETCH,"\u2196":e.MO.RELSTRETCH,"\u2197":e.MO.RELSTRETCH,"\u2198":e.MO.RELSTRETCH,"\u2199":e.MO.RELSTRETCH,"\u219a":e.MO.RELACCENT,"\u219b":e.MO.RELACCENT,"\u219c":e.MO.WIDEREL,"\u219d":e.MO.WIDEREL,"\u219e":e.MO.WIDEREL,"\u219f":e.MO.WIDEREL,"\u21a0":e.MO.WIDEREL,"\u21a1":e.MO.RELSTRETCH,"\u21a2":e.MO.WIDEREL,"\u21a3":e.MO.WIDEREL,"\u21a4":e.MO.WIDEREL,"\u21a5":e.MO.RELSTRETCH,"\u21a6":e.MO.WIDEREL,"\u21a7":e.MO.RELSTRETCH,"\u21a8":e.MO.RELSTRETCH,"\u21a9":e.MO.WIDEREL,"\u21aa":e.MO.WIDEREL,"\u21ab":e.MO.WIDEREL,"\u21ac":e.MO.WIDEREL,"\u21ad":e.MO.WIDEREL,"\u21ae":e.MO.RELACCENT,"\u21af":e.MO.RELSTRETCH,"\u21b0":e.MO.RELSTRETCH,"\u21b1":e.MO.RELSTRETCH,"\u21b2":e.MO.RELSTRETCH,"\u21b3":e.MO.RELSTRETCH,"\u21b4":e.MO.RELSTRETCH,"\u21b5":e.MO.RELSTRETCH,"\u21b6":e.MO.RELACCENT,"\u21b7":e.MO.RELACCENT,"\u21b8":e.MO.REL,"\u21b9":e.MO.WIDEREL,"\u21ba":e.MO.REL,"\u21bb":e.MO.REL,"\u21bc":e.MO.WIDEREL,"\u21bd":e.MO.WIDEREL,"\u21be":e.MO.RELSTRETCH,"\u21bf":e.MO.RELSTRETCH,"\u21c0":e.MO.WIDEREL,"\u21c1":e.MO.WIDEREL,"\u21c2":e.MO.RELSTRETCH,"\u21c3":e.MO.RELSTRETCH,"\u21c4":e.MO.WIDEREL,"\u21c5":e.MO.RELSTRETCH,"\u21c6":e.MO.WIDEREL,"\u21c7":e.MO.WIDEREL,"\u21c8":e.MO.RELSTRETCH,"\u21c9":e.MO.WIDEREL,"\u21ca":e.MO.RELSTRETCH,"\u21cb":e.MO.WIDEREL,"\u21cc":e.MO.WIDEREL,"\u21cd":e.MO.RELACCENT,"\u21ce":e.MO.RELACCENT,"\u21cf":e.MO.RELACCENT,"\u21d0":e.MO.WIDEREL,"\u21d1":e.MO.RELSTRETCH,"\u21d2":e.MO.WIDEREL,"\u21d3":e.MO.RELSTRETCH,"\u21d4":e.MO.WIDEREL,"\u21d5":e.MO.RELSTRETCH,"\u21d6":e.MO.RELSTRETCH,"\u21d7":e.MO.RELSTRETCH,"\u21d8":e.MO.RELSTRETCH,"\u21d9":e.MO.RELSTRETCH,"\u21da":e.MO.WIDEREL,"\u21db":e.MO.WIDEREL,"\u21dc":e.MO.WIDEREL,"\u21dd":e.MO.WIDEREL,"\u21de":e.MO.REL,"\u21df":e.MO.REL,"\u21e0":e.MO.WIDEREL,"\u21e1":e.MO.RELSTRETCH,"\u21e2":e.MO.WIDEREL,"\u21e3":e.MO.RELSTRETCH,"\u21e4":e.MO.WIDEREL,"\u21e5":e.MO.WIDEREL,"\u21e6":e.MO.WIDEREL,"\u21e7":e.MO.RELSTRETCH,"\u21e8":e.MO.WIDEREL,"\u21e9":e.MO.RELSTRETCH,"\u21ea":e.MO.RELSTRETCH,"\u21eb":e.MO.RELSTRETCH,"\u21ec":e.MO.RELSTRETCH,"\u21ed":e.MO.RELSTRETCH,"\u21ee":e.MO.RELSTRETCH,"\u21ef":e.MO.RELSTRETCH,"\u21f0":e.MO.WIDEREL,"\u21f1":e.MO.REL,"\u21f2":e.MO.REL,"\u21f3":e.MO.RELSTRETCH,"\u21f4":e.MO.RELACCENT,"\u21f5":e.MO.RELSTRETCH,"\u21f6":e.MO.WIDEREL,"\u21f7":e.MO.RELACCENT,"\u21f8":e.MO.RELACCENT,"\u21f9":e.MO.RELACCENT,"\u21fa":e.MO.RELACCENT,"\u21fb":e.MO.RELACCENT,"\u21fc":e.MO.RELACCENT,"\u21fd":e.MO.WIDEREL,"\u21fe":e.MO.WIDEREL,"\u21ff":e.MO.WIDEREL,"\u2201":n(1,2,i.TEXCLASS.ORD),"\u2205":e.MO.ORD,"\u2206":e.MO.BIN3,"\u2208":e.MO.REL,"\u2209":e.MO.REL,"\u220a":e.MO.REL,"\u220b":e.MO.REL,"\u220c":e.MO.REL,"\u220d":e.MO.REL,"\u220e":e.MO.BIN3,"\u2212":e.MO.BIN4,"\u2213":e.MO.BIN4,"\u2214":e.MO.BIN4,"\u2215":e.MO.TALLBIN,"\u2216":e.MO.BIN4,"\u2217":e.MO.BIN4,"\u2218":e.MO.BIN4,"\u2219":e.MO.BIN4,"\u221d":e.MO.REL,"\u221e":e.MO.ORD,"\u221f":e.MO.REL,"\u2223":e.MO.REL,"\u2224":e.MO.REL,"\u2225":e.MO.REL,"\u2226":e.MO.REL,"\u2227":e.MO.BIN4,"\u2228":e.MO.BIN4,"\u2229":e.MO.BIN4,"\u222a":e.MO.BIN4,"\u2234":e.MO.REL,"\u2235":e.MO.REL,"\u2236":e.MO.REL,"\u2237":e.MO.REL,"\u2238":e.MO.BIN4,"\u2239":e.MO.REL,"\u223a":e.MO.BIN4,"\u223b":e.MO.REL,"\u223c":e.MO.REL,"\u223d":e.MO.REL,"\u223d\u0331":e.MO.BIN3,"\u223e":e.MO.REL,"\u223f":e.MO.BIN3,"\u2240":e.MO.BIN4,"\u2241":e.MO.REL,"\u2242":e.MO.REL,"\u2242\u0338":e.MO.REL,"\u2243":e.MO.REL,"\u2244":e.MO.REL,"\u2245":e.MO.REL,"\u2246":e.MO.REL,"\u2247":e.MO.REL,"\u2248":e.MO.REL,"\u2249":e.MO.REL,"\u224a":e.MO.REL,"\u224b":e.MO.REL,"\u224c":e.MO.REL,"\u224d":e.MO.REL,"\u224e":e.MO.REL,"\u224e\u0338":e.MO.REL,"\u224f":e.MO.REL,"\u224f\u0338":e.MO.REL,"\u2250":e.MO.REL,"\u2251":e.MO.REL,"\u2252":e.MO.REL,"\u2253":e.MO.REL,"\u2254":e.MO.REL,"\u2255":e.MO.REL,"\u2256":e.MO.REL,"\u2257":e.MO.REL,"\u2258":e.MO.REL,"\u2259":e.MO.REL,"\u225a":e.MO.REL,"\u225c":e.MO.REL,"\u225d":e.MO.REL,"\u225e":e.MO.REL,"\u225f":e.MO.REL,"\u2260":e.MO.REL,"\u2261":e.MO.REL,"\u2262":e.MO.REL,"\u2263":e.MO.REL,"\u2264":e.MO.REL,"\u2265":e.MO.REL,"\u2266":e.MO.REL,"\u2266\u0338":e.MO.REL,"\u2267":e.MO.REL,"\u2268":e.MO.REL,"\u2269":e.MO.REL,"\u226a":e.MO.REL,"\u226a\u0338":e.MO.REL,"\u226b":e.MO.REL,"\u226b\u0338":e.MO.REL,"\u226c":e.MO.REL,"\u226d":e.MO.REL,"\u226e":e.MO.REL,"\u226f":e.MO.REL,"\u2270":e.MO.REL,"\u2271":e.MO.REL,"\u2272":e.MO.REL,"\u2273":e.MO.REL,"\u2274":e.MO.REL,"\u2275":e.MO.REL,"\u2276":e.MO.REL,"\u2277":e.MO.REL,"\u2278":e.MO.REL,"\u2279":e.MO.REL,"\u227a":e.MO.REL,"\u227b":e.MO.REL,"\u227c":e.MO.REL,"\u227d":e.MO.REL,"\u227e":e.MO.REL,"\u227f":e.MO.REL,"\u227f\u0338":e.MO.REL,"\u2280":e.MO.REL,"\u2281":e.MO.REL,"\u2282":e.MO.REL,"\u2282\u20d2":e.MO.REL,"\u2283":e.MO.REL,"\u2283\u20d2":e.MO.REL,"\u2284":e.MO.REL,"\u2285":e.MO.REL,"\u2286":e.MO.REL,"\u2287":e.MO.REL,"\u2288":e.MO.REL,"\u2289":e.MO.REL,"\u228a":e.MO.REL,"\u228b":e.MO.REL,"\u228c":e.MO.BIN4,"\u228d":e.MO.BIN4,"\u228e":e.MO.BIN4,"\u228f":e.MO.REL,"\u228f\u0338":e.MO.REL,"\u2290":e.MO.REL,"\u2290\u0338":e.MO.REL,"\u2291":e.MO.REL,"\u2292":e.MO.REL,"\u2293":e.MO.BIN4,"\u2294":e.MO.BIN4,"\u2295":e.MO.BIN4,"\u2296":e.MO.BIN4,"\u2297":e.MO.BIN4,"\u2298":e.MO.BIN4,"\u2299":e.MO.BIN4,"\u229a":e.MO.BIN4,"\u229b":e.MO.BIN4,"\u229c":e.MO.BIN4,"\u229d":e.MO.BIN4,"\u229e":e.MO.BIN4,"\u229f":e.MO.BIN4,"\u22a0":e.MO.BIN4,"\u22a1":e.MO.BIN4,"\u22a2":e.MO.REL,"\u22a3":e.MO.REL,"\u22a4":e.MO.ORD55,"\u22a5":e.MO.REL,"\u22a6":e.MO.REL,"\u22a7":e.MO.REL,"\u22a8":e.MO.REL,"\u22a9":e.MO.REL,"\u22aa":e.MO.REL,"\u22ab":e.MO.REL,"\u22ac":e.MO.REL,"\u22ad":e.MO.REL,"\u22ae":e.MO.REL,"\u22af":e.MO.REL,"\u22b0":e.MO.REL,"\u22b1":e.MO.REL,"\u22b2":e.MO.REL,"\u22b3":e.MO.REL,"\u22b4":e.MO.REL,"\u22b5":e.MO.REL,"\u22b6":e.MO.REL,"\u22b7":e.MO.REL,"\u22b8":e.MO.REL,"\u22b9":e.MO.REL,"\u22ba":e.MO.BIN4,"\u22bb":e.MO.BIN4,"\u22bc":e.MO.BIN4,"\u22bd":e.MO.BIN4,"\u22be":e.MO.BIN3,"\u22bf":e.MO.BIN3,"\u22c4":e.MO.BIN4,"\u22c5":e.MO.BIN4,"\u22c6":e.MO.BIN4,"\u22c7":e.MO.BIN4,"\u22c8":e.MO.REL,"\u22c9":e.MO.BIN4,"\u22ca":e.MO.BIN4,"\u22cb":e.MO.BIN4,"\u22cc":e.MO.BIN4,"\u22cd":e.MO.REL,"\u22ce":e.MO.BIN4,"\u22cf":e.MO.BIN4,"\u22d0":e.MO.REL,"\u22d1":e.MO.REL,"\u22d2":e.MO.BIN4,"\u22d3":e.MO.BIN4,"\u22d4":e.MO.REL,"\u22d5":e.MO.REL,"\u22d6":e.MO.REL,"\u22d7":e.MO.REL,"\u22d8":e.MO.REL,"\u22d9":e.MO.REL,"\u22da":e.MO.REL,"\u22db":e.MO.REL,"\u22dc":e.MO.REL,"\u22dd":e.MO.REL,"\u22de":e.MO.REL,"\u22df":e.MO.REL,"\u22e0":e.MO.REL,"\u22e1":e.MO.REL,"\u22e2":e.MO.REL,"\u22e3":e.MO.REL,"\u22e4":e.MO.REL,"\u22e5":e.MO.REL,"\u22e6":e.MO.REL,"\u22e7":e.MO.REL,"\u22e8":e.MO.REL,"\u22e9":e.MO.REL,"\u22ea":e.MO.REL,"\u22eb":e.MO.REL,"\u22ec":e.MO.REL,"\u22ed":e.MO.REL,"\u22ee":e.MO.ORD55,"\u22ef":e.MO.INNER,"\u22f0":e.MO.REL,"\u22f1":[5,5,i.TEXCLASS.INNER,null],"\u22f2":e.MO.REL,"\u22f3":e.MO.REL,"\u22f4":e.MO.REL,"\u22f5":e.MO.REL,"\u22f6":e.MO.REL,"\u22f7":e.MO.REL,"\u22f8":e.MO.REL,"\u22f9":e.MO.REL,"\u22fa":e.MO.REL,"\u22fb":e.MO.REL,"\u22fc":e.MO.REL,"\u22fd":e.MO.REL,"\u22fe":e.MO.REL,"\u22ff":e.MO.REL,"\u2305":e.MO.BIN3,"\u2306":e.MO.BIN3,"\u2322":e.MO.REL4,"\u2323":e.MO.REL4,"\u2329":e.MO.OPEN,"\u232a":e.MO.CLOSE,"\u23aa":e.MO.ORD,"\u23af":[0,0,i.TEXCLASS.ORD,{stretchy:!0}],"\u23b0":e.MO.OPEN,"\u23b1":e.MO.CLOSE,"\u2500":e.MO.ORD,"\u25b3":e.MO.BIN4,"\u25b5":e.MO.BIN4,"\u25b9":e.MO.BIN4,"\u25bd":e.MO.BIN4,"\u25bf":e.MO.BIN4,"\u25c3":e.MO.BIN4,"\u25ef":e.MO.BIN3,"\u2660":e.MO.ORD,"\u2661":e.MO.ORD,"\u2662":e.MO.ORD,"\u2663":e.MO.ORD,"\u2758":e.MO.REL,"\u27f0":e.MO.RELSTRETCH,"\u27f1":e.MO.RELSTRETCH,"\u27f5":e.MO.WIDEREL,"\u27f6":e.MO.WIDEREL,"\u27f7":e.MO.WIDEREL,"\u27f8":e.MO.WIDEREL,"\u27f9":e.MO.WIDEREL,"\u27fa":e.MO.WIDEREL,"\u27fb":e.MO.WIDEREL,"\u27fc":e.MO.WIDEREL,"\u27fd":e.MO.WIDEREL,"\u27fe":e.MO.WIDEREL,"\u27ff":e.MO.WIDEREL,"\u2900":e.MO.RELACCENT,"\u2901":e.MO.RELACCENT,"\u2902":e.MO.RELACCENT,"\u2903":e.MO.RELACCENT,"\u2904":e.MO.RELACCENT,"\u2905":e.MO.RELACCENT,"\u2906":e.MO.RELACCENT,"\u2907":e.MO.RELACCENT,"\u2908":e.MO.REL,"\u2909":e.MO.REL,"\u290a":e.MO.RELSTRETCH,"\u290b":e.MO.RELSTRETCH,"\u290c":e.MO.WIDEREL,"\u290d":e.MO.WIDEREL,"\u290e":e.MO.WIDEREL,"\u290f":e.MO.WIDEREL,"\u2910":e.MO.WIDEREL,"\u2911":e.MO.RELACCENT,"\u2912":e.MO.RELSTRETCH,"\u2913":e.MO.RELSTRETCH,"\u2914":e.MO.RELACCENT,"\u2915":e.MO.RELACCENT,"\u2916":e.MO.RELACCENT,"\u2917":e.MO.RELACCENT,"\u2918":e.MO.RELACCENT,"\u2919":e.MO.RELACCENT,"\u291a":e.MO.RELACCENT,"\u291b":e.MO.RELACCENT,"\u291c":e.MO.RELACCENT,"\u291d":e.MO.RELACCENT,"\u291e":e.MO.RELACCENT,"\u291f":e.MO.RELACCENT,"\u2920":e.MO.RELACCENT,"\u2921":e.MO.RELSTRETCH,"\u2922":e.MO.RELSTRETCH,"\u2923":e.MO.REL,"\u2924":e.MO.REL,"\u2925":e.MO.REL,"\u2926":e.MO.REL,"\u2927":e.MO.REL,"\u2928":e.MO.REL,"\u2929":e.MO.REL,"\u292a":e.MO.REL,"\u292b":e.MO.REL,"\u292c":e.MO.REL,"\u292d":e.MO.REL,"\u292e":e.MO.REL,"\u292f":e.MO.REL,"\u2930":e.MO.REL,"\u2931":e.MO.REL,"\u2932":e.MO.REL,"\u2933":e.MO.RELACCENT,"\u2934":e.MO.REL,"\u2935":e.MO.REL,"\u2936":e.MO.REL,"\u2937":e.MO.REL,"\u2938":e.MO.REL,"\u2939":e.MO.REL,"\u293a":e.MO.RELACCENT,"\u293b":e.MO.RELACCENT,"\u293c":e.MO.RELACCENT,"\u293d":e.MO.RELACCENT,"\u293e":e.MO.REL,"\u293f":e.MO.REL,"\u2940":e.MO.REL,"\u2941":e.MO.REL,"\u2942":e.MO.RELACCENT,"\u2943":e.MO.RELACCENT,"\u2944":e.MO.RELACCENT,"\u2945":e.MO.RELACCENT,"\u2946":e.MO.RELACCENT,"\u2947":e.MO.RELACCENT,"\u2948":e.MO.RELACCENT,"\u2949":e.MO.REL,"\u294a":e.MO.RELACCENT,"\u294b":e.MO.RELACCENT,"\u294c":e.MO.REL,"\u294d":e.MO.REL,"\u294e":e.MO.WIDEREL,"\u294f":e.MO.RELSTRETCH,"\u2950":e.MO.WIDEREL,"\u2951":e.MO.RELSTRETCH,"\u2952":e.MO.WIDEREL,"\u2953":e.MO.WIDEREL,"\u2954":e.MO.RELSTRETCH,"\u2955":e.MO.RELSTRETCH,"\u2956":e.MO.RELSTRETCH,"\u2957":e.MO.RELSTRETCH,"\u2958":e.MO.RELSTRETCH,"\u2959":e.MO.RELSTRETCH,"\u295a":e.MO.WIDEREL,"\u295b":e.MO.WIDEREL,"\u295c":e.MO.RELSTRETCH,"\u295d":e.MO.RELSTRETCH,"\u295e":e.MO.WIDEREL,"\u295f":e.MO.WIDEREL,"\u2960":e.MO.RELSTRETCH,"\u2961":e.MO.RELSTRETCH,"\u2962":e.MO.RELACCENT,"\u2963":e.MO.REL,"\u2964":e.MO.RELACCENT,"\u2965":e.MO.REL,"\u2966":e.MO.RELACCENT,"\u2967":e.MO.RELACCENT,"\u2968":e.MO.RELACCENT,"\u2969":e.MO.RELACCENT,"\u296a":e.MO.RELACCENT,"\u296b":e.MO.RELACCENT,"\u296c":e.MO.RELACCENT,"\u296d":e.MO.RELACCENT,"\u296e":e.MO.RELSTRETCH,"\u296f":e.MO.RELSTRETCH,"\u2970":e.MO.RELACCENT,"\u2971":e.MO.RELACCENT,"\u2972":e.MO.RELACCENT,"\u2973":e.MO.RELACCENT,"\u2974":e.MO.RELACCENT,"\u2975":e.MO.RELACCENT,"\u2976":e.MO.RELACCENT,"\u2977":e.MO.RELACCENT,"\u2978":e.MO.RELACCENT,"\u2979":e.MO.RELACCENT,"\u297a":e.MO.RELACCENT,"\u297b":e.MO.RELACCENT,"\u297c":e.MO.RELACCENT,"\u297d":e.MO.RELACCENT,"\u297e":e.MO.REL,"\u297f":e.MO.REL,"\u2981":e.MO.BIN3,"\u2982":e.MO.BIN3,"\u2999":e.MO.BIN3,"\u299a":e.MO.BIN3,"\u299b":e.MO.BIN3,"\u299c":e.MO.BIN3,"\u299d":e.MO.BIN3,"\u299e":e.MO.BIN3,"\u299f":e.MO.BIN3,"\u29a0":e.MO.BIN3,"\u29a1":e.MO.BIN3,"\u29a2":e.MO.BIN3,"\u29a3":e.MO.BIN3,"\u29a4":e.MO.BIN3,"\u29a5":e.MO.BIN3,"\u29a6":e.MO.BIN3,"\u29a7":e.MO.BIN3,"\u29a8":e.MO.BIN3,"\u29a9":e.MO.BIN3,"\u29aa":e.MO.BIN3,"\u29ab":e.MO.BIN3,"\u29ac":e.MO.BIN3,"\u29ad":e.MO.BIN3,"\u29ae":e.MO.BIN3,"\u29af":e.MO.BIN3,"\u29b0":e.MO.BIN3,"\u29b1":e.MO.BIN3,"\u29b2":e.MO.BIN3,"\u29b3":e.MO.BIN3,"\u29b4":e.MO.BIN3,"\u29b5":e.MO.BIN3,"\u29b6":e.MO.BIN4,"\u29b7":e.MO.BIN4,"\u29b8":e.MO.BIN4,"\u29b9":e.MO.BIN4,"\u29ba":e.MO.BIN4,"\u29bb":e.MO.BIN4,"\u29bc":e.MO.BIN4,"\u29bd":e.MO.BIN4,"\u29be":e.MO.BIN4,"\u29bf":e.MO.BIN4,"\u29c0":e.MO.REL,"\u29c1":e.MO.REL,"\u29c2":e.MO.BIN3,"\u29c3":e.MO.BIN3,"\u29c4":e.MO.BIN4,"\u29c5":e.MO.BIN4,"\u29c6":e.MO.BIN4,"\u29c7":e.MO.BIN4,"\u29c8":e.MO.BIN4,"\u29c9":e.MO.BIN3,"\u29ca":e.MO.BIN3,"\u29cb":e.MO.BIN3,"\u29cc":e.MO.BIN3,"\u29cd":e.MO.BIN3,"\u29ce":e.MO.REL,"\u29cf":e.MO.REL,"\u29cf\u0338":e.MO.REL,"\u29d0":e.MO.REL,"\u29d0\u0338":e.MO.REL,"\u29d1":e.MO.REL,"\u29d2":e.MO.REL,"\u29d3":e.MO.REL,"\u29d4":e.MO.REL,"\u29d5":e.MO.REL,"\u29d6":e.MO.BIN4,"\u29d7":e.MO.BIN4,"\u29d8":e.MO.BIN3,"\u29d9":e.MO.BIN3,"\u29db":e.MO.BIN3,"\u29dc":e.MO.BIN3,"\u29dd":e.MO.BIN3,"\u29de":e.MO.REL,"\u29df":e.MO.BIN3,"\u29e0":e.MO.BIN3,"\u29e1":e.MO.REL,"\u29e2":e.MO.BIN4,"\u29e3":e.MO.REL,"\u29e4":e.MO.REL,"\u29e5":e.MO.REL,"\u29e6":e.MO.REL,"\u29e7":e.MO.BIN3,"\u29e8":e.MO.BIN3,"\u29e9":e.MO.BIN3,"\u29ea":e.MO.BIN3,"\u29eb":e.MO.BIN3,"\u29ec":e.MO.BIN3,"\u29ed":e.MO.BIN3,"\u29ee":e.MO.BIN3,"\u29ef":e.MO.BIN3,"\u29f0":e.MO.BIN3,"\u29f1":e.MO.BIN3,"\u29f2":e.MO.BIN3,"\u29f3":e.MO.BIN3,"\u29f4":e.MO.REL,"\u29f5":e.MO.BIN4,"\u29f6":e.MO.BIN4,"\u29f7":e.MO.BIN4,"\u29f8":e.MO.BIN3,"\u29f9":e.MO.BIN3,"\u29fa":e.MO.BIN3,"\u29fb":e.MO.BIN3,"\u29fe":e.MO.BIN4,"\u29ff":e.MO.BIN4,"\u2a1d":e.MO.BIN3,"\u2a1e":e.MO.BIN3,"\u2a1f":e.MO.BIN3,"\u2a20":e.MO.BIN3,"\u2a21":e.MO.BIN3,"\u2a22":e.MO.BIN4,"\u2a23":e.MO.BIN4,"\u2a24":e.MO.BIN4,"\u2a25":e.MO.BIN4,"\u2a26":e.MO.BIN4,"\u2a27":e.MO.BIN4,"\u2a28":e.MO.BIN4,"\u2a29":e.MO.BIN4,"\u2a2a":e.MO.BIN4,"\u2a2b":e.MO.BIN4,"\u2a2c":e.MO.BIN4,"\u2a2d":e.MO.BIN4,"\u2a2e":e.MO.BIN4,"\u2a2f":e.MO.BIN4,"\u2a30":e.MO.BIN4,"\u2a31":e.MO.BIN4,"\u2a32":e.MO.BIN4,"\u2a33":e.MO.BIN4,"\u2a34":e.MO.BIN4,"\u2a35":e.MO.BIN4,"\u2a36":e.MO.BIN4,"\u2a37":e.MO.BIN4,"\u2a38":e.MO.BIN4,"\u2a39":e.MO.BIN4,"\u2a3a":e.MO.BIN4,"\u2a3b":e.MO.BIN4,"\u2a3c":e.MO.BIN4,"\u2a3d":e.MO.BIN4,"\u2a3e":e.MO.BIN4,"\u2a3f":e.MO.BIN4,"\u2a40":e.MO.BIN4,"\u2a41":e.MO.BIN4,"\u2a42":e.MO.BIN4,"\u2a43":e.MO.BIN4,"\u2a44":e.MO.BIN4,"\u2a45":e.MO.BIN4,"\u2a46":e.MO.BIN4,"\u2a47":e.MO.BIN4,"\u2a48":e.MO.BIN4,"\u2a49":e.MO.BIN4,"\u2a4a":e.MO.BIN4,"\u2a4b":e.MO.BIN4,"\u2a4c":e.MO.BIN4,"\u2a4d":e.MO.BIN4,"\u2a4e":e.MO.BIN4,"\u2a4f":e.MO.BIN4,"\u2a50":e.MO.BIN4,"\u2a51":e.MO.BIN4,"\u2a52":e.MO.BIN4,"\u2a53":e.MO.BIN4,"\u2a54":e.MO.BIN4,"\u2a55":e.MO.BIN4,"\u2a56":e.MO.BIN4,"\u2a57":e.MO.BIN4,"\u2a58":e.MO.BIN4,"\u2a59":e.MO.REL,"\u2a5a":e.MO.BIN4,"\u2a5b":e.MO.BIN4,"\u2a5c":e.MO.BIN4,"\u2a5d":e.MO.BIN4,"\u2a5e":e.MO.BIN4,"\u2a5f":e.MO.BIN4,"\u2a60":e.MO.BIN4,"\u2a61":e.MO.BIN4,"\u2a62":e.MO.BIN4,"\u2a63":e.MO.BIN4,"\u2a64":e.MO.BIN4,"\u2a65":e.MO.BIN4,"\u2a66":e.MO.REL,"\u2a67":e.MO.REL,"\u2a68":e.MO.REL,"\u2a69":e.MO.REL,"\u2a6a":e.MO.REL,"\u2a6b":e.MO.REL,"\u2a6c":e.MO.REL,"\u2a6d":e.MO.REL,"\u2a6e":e.MO.REL,"\u2a6f":e.MO.REL,"\u2a70":e.MO.REL,"\u2a71":e.MO.BIN4,"\u2a72":e.MO.BIN4,"\u2a73":e.MO.REL,"\u2a74":e.MO.REL,"\u2a75":e.MO.REL,"\u2a76":e.MO.REL,"\u2a77":e.MO.REL,"\u2a78":e.MO.REL,"\u2a79":e.MO.REL,"\u2a7a":e.MO.REL,"\u2a7b":e.MO.REL,"\u2a7c":e.MO.REL,"\u2a7d":e.MO.REL,"\u2a7d\u0338":e.MO.REL,"\u2a7e":e.MO.REL,"\u2a7e\u0338":e.MO.REL,"\u2a7f":e.MO.REL,"\u2a80":e.MO.REL,"\u2a81":e.MO.REL,"\u2a82":e.MO.REL,"\u2a83":e.MO.REL,"\u2a84":e.MO.REL,"\u2a85":e.MO.REL,"\u2a86":e.MO.REL,"\u2a87":e.MO.REL,"\u2a88":e.MO.REL,"\u2a89":e.MO.REL,"\u2a8a":e.MO.REL,"\u2a8b":e.MO.REL,"\u2a8c":e.MO.REL,"\u2a8d":e.MO.REL,"\u2a8e":e.MO.REL,"\u2a8f":e.MO.REL,"\u2a90":e.MO.REL,"\u2a91":e.MO.REL,"\u2a92":e.MO.REL,"\u2a93":e.MO.REL,"\u2a94":e.MO.REL,"\u2a95":e.MO.REL,"\u2a96":e.MO.REL,"\u2a97":e.MO.REL,"\u2a98":e.MO.REL,"\u2a99":e.MO.REL,"\u2a9a":e.MO.REL,"\u2a9b":e.MO.REL,"\u2a9c":e.MO.REL,"\u2a9d":e.MO.REL,"\u2a9e":e.MO.REL,"\u2a9f":e.MO.REL,"\u2aa0":e.MO.REL,"\u2aa1":e.MO.REL,"\u2aa1\u0338":e.MO.REL,"\u2aa2":e.MO.REL,"\u2aa2\u0338":e.MO.REL,"\u2aa3":e.MO.REL,"\u2aa4":e.MO.REL,"\u2aa5":e.MO.REL,"\u2aa6":e.MO.REL,"\u2aa7":e.MO.REL,"\u2aa8":e.MO.REL,"\u2aa9":e.MO.REL,"\u2aaa":e.MO.REL,"\u2aab":e.MO.REL,"\u2aac":e.MO.REL,"\u2aad":e.MO.REL,"\u2aae":e.MO.REL,"\u2aaf":e.MO.REL,"\u2aaf\u0338":e.MO.REL,"\u2ab0":e.MO.REL,"\u2ab0\u0338":e.MO.REL,"\u2ab1":e.MO.REL,"\u2ab2":e.MO.REL,"\u2ab3":e.MO.REL,"\u2ab4":e.MO.REL,"\u2ab5":e.MO.REL,"\u2ab6":e.MO.REL,"\u2ab7":e.MO.REL,"\u2ab8":e.MO.REL,"\u2ab9":e.MO.REL,"\u2aba":e.MO.REL,"\u2abb":e.MO.REL,"\u2abc":e.MO.REL,"\u2abd":e.MO.REL,"\u2abe":e.MO.REL,"\u2abf":e.MO.REL,"\u2ac0":e.MO.REL,"\u2ac1":e.MO.REL,"\u2ac2":e.MO.REL,"\u2ac3":e.MO.REL,"\u2ac4":e.MO.REL,"\u2ac5":e.MO.REL,"\u2ac6":e.MO.REL,"\u2ac7":e.MO.REL,"\u2ac8":e.MO.REL,"\u2ac9":e.MO.REL,"\u2aca":e.MO.REL,"\u2acb":e.MO.REL,"\u2acc":e.MO.REL,"\u2acd":e.MO.REL,"\u2ace":e.MO.REL,"\u2acf":e.MO.REL,"\u2ad0":e.MO.REL,"\u2ad1":e.MO.REL,"\u2ad2":e.MO.REL,"\u2ad3":e.MO.REL,"\u2ad4":e.MO.REL,"\u2ad5":e.MO.REL,"\u2ad6":e.MO.REL,"\u2ad7":e.MO.REL,"\u2ad8":e.MO.REL,"\u2ad9":e.MO.REL,"\u2ada":e.MO.REL,"\u2adb":e.MO.REL,"\u2adc":e.MO.REL,"\u2add":e.MO.REL,"\u2ade":e.MO.REL,"\u2adf":e.MO.REL,"\u2ae0":e.MO.REL,"\u2ae1":e.MO.REL,"\u2ae2":e.MO.REL,"\u2ae3":e.MO.REL,"\u2ae4":e.MO.REL,"\u2ae5":e.MO.REL,"\u2ae6":e.MO.REL,"\u2ae7":e.MO.REL,"\u2ae8":e.MO.REL,"\u2ae9":e.MO.REL,"\u2aea":e.MO.REL,"\u2aeb":e.MO.REL,"\u2aec":e.MO.REL,"\u2aed":e.MO.REL,"\u2aee":e.MO.REL,"\u2aef":e.MO.REL,"\u2af0":e.MO.REL,"\u2af1":e.MO.REL,"\u2af2":e.MO.REL,"\u2af3":e.MO.REL,"\u2af4":e.MO.BIN4,"\u2af5":e.MO.BIN4,"\u2af6":e.MO.BIN4,"\u2af7":e.MO.REL,"\u2af8":e.MO.REL,"\u2af9":e.MO.REL,"\u2afa":e.MO.REL,"\u2afb":e.MO.BIN4,"\u2afd":e.MO.BIN4,"\u2afe":e.MO.BIN3,"\u2b45":e.MO.RELSTRETCH,"\u2b46":e.MO.RELSTRETCH,"\u3008":e.MO.OPEN,"\u3009":e.MO.CLOSE,"\ufe37":e.MO.WIDEACCENT,"\ufe38":e.MO.WIDEACCENT}},e.OPTABLE.infix["^"]=e.MO.WIDEREL,e.OPTABLE.infix._=e.MO.WIDEREL,e.OPTABLE.prefix["\u2223"]=e.MO.OPEN,e.OPTABLE.prefix["\u2225"]=e.MO.OPEN,e.OPTABLE.postfix["\u2223"]=e.MO.CLOSE,e.OPTABLE.postfix["\u2225"]=e.MO.CLOSE},function(t,e,r){"use strict";var n,i=this&&this.__extends||(n=function(t,e){return(n=Object.setPrototypeOf||{__proto__:[]}instanceof Array&&function(t,e){t.__proto__=e}||function(t,e){for(var r in e)e.hasOwnProperty(r)&&(t[r]=e[r])})(t,e)},function(t,e){function r(){this.constructor=t}n(t,e),t.prototype=null===e?Object.create(e):(r.prototype=e.prototype,new r)}),o=this&&this.__assign||function(){return(o=Object.assign||function(t){for(var e,r=1,n=arguments.length;r=t.length&&(t=void 0),{value:t&&t[n++],done:!t}}};throw new TypeError(e?"Object is not iterable.":"Symbol.iterator is not defined.")},i=this&&this.__read||function(t,e){var r="function"==typeof Symbol&&t[Symbol.iterator];if(!r)return t;var n,i,o=r.call(t),a=[];try{for(;(void 0===e||0=t.length&&(t=void 0),{value:t&&t[n++],done:!t}}};throw new TypeError(e?"Object is not iterable.":"Symbol.iterator is not defined.")};Object.defineProperty(e,"__esModule",{value:!0});var o,s=r(24),c=(o=s.PrioritizedList,i(l,o),l.prototype.register=function(t){return this.add(t,t.priority)},l.prototype.unregister=function(t){this.remove(t)},l.prototype.handlesDocument=function(t){var e,r;try{for(var n=a(this),i=n.next();!i.done;i=n.next()){var o=i.value.item;if(o.handlesDocument(t))return o}}catch(t){e={error:t}}finally{try{i&&!i.done&&(r=n.return)&&r.call(n)}finally{if(e)throw e.error}}throw new Error("Can't find handler for document")},l.prototype.document=function(t,e){return void 0===e&&(e=null),this.handlesDocument(t).create(t,e)},l);function l(){return null!==o&&o.apply(this,arguments)||this}e.HandlerList=c},function(t,e,r){"use strict";var c=this&&this.__values||function(t){var e="function"==typeof Symbol&&Symbol.iterator,r=e&&t[e],n=0;if(r)return r.call(t);if(t&&"number"==typeof t.length)return{next:function(){return t&&n>=t.length&&(t=void 0),{value:t&&t[n++],done:!t}}};throw new TypeError(e?"Object is not iterable.":"Symbol.iterator is not defined.")},n=this&&this.__read||function(t,e){var r="function"==typeof Symbol&&t[Symbol.iterator];if(!r)return t;var n,i,o=r.call(t),a=[];try{for(;(void 0===e||0=t.length&&(t=void 0),{value:t&&t[n++],done:!t}}};throw new TypeError(e?"Object is not iterable.":"Symbol.iterator is not defined.")},s=this&&this.__read||function(t,e){var r="function"==typeof Symbol&&t[Symbol.iterator];if(!r)return t;var n,i,o=r.call(t),a=[];try{for(;(void 0===e||0\n",o=e;e+=" ";try{for(var a=l(t.childNodes),s=a.next();!s.done;s=a.next()){var c=s.value;i+=this.visitNode(c,e)}}catch(t){r={error:t}}finally{try{s&&!s.done&&(n=a.return)&&n.call(a)}finally{if(r)throw r.error}}return i+="\n"+o+""},h.prototype.visitAnnotationNode=function(t,e){return e+""+this.childNodeMml(t,"","")+""},h.prototype.visitDefault=function(t,e){var r=t.kind,n=s(t.isToken||0===t.childNodes.length?["",""]:["\n",e],2),i=n[0],o=n[1],a=this.childNodeMml(t,e+" ",i);return e+"<"+r+this.getAttributes(t)+">"+(a.match(/\S/)?i+a+o:"")+""},h.prototype.childNodeMml=function(t,e,r){var n,i,o="";try{for(var a=l(t.childNodes),s=a.next();!s.done;s=a.next()){var c=s.value;o+=this.visitNode(c,e)+r}}catch(t){n={error:t}}finally{try{s&&!s.done&&(i=a.return)&&i.call(a)}finally{if(n)throw n.error}}return o},h.prototype.getAttributes=function(t){var e,r,n="",i=t.attributes.getAllAttributes();try{for(var o=l(Object.keys(i)),a=o.next();!a.done;a=o.next()){var s=a.value;void 0!==i[s]&&(n+=" "+s+'="'+this.quoteHTML(i[s].toString())+'"')}}catch(t){e={error:t}}finally{try{a&&!a.done&&(r=o.return)&&r.call(o)}finally{if(e)throw e.error}}return n},h.prototype.quoteHTML=function(t){return t.replace(/&/g,"&").replace(//g,">").replace(/\"/g,""").replace(/([\uD800-\uDBFF].)/g,function(t,e){return"&#x"+(1024*(e.charCodeAt(0)-55296)+(e.charCodeAt(1)-56320)+65536).toString(16).toUpperCase()+";"}).replace(/([\u0080-\uD7FF\uE000-\uFFFF])/g,function(t,e){return"&#x"+e.charCodeAt(0).toString(16).toUpperCase()+";"})},h);function h(){return null!==o&&o.apply(this,arguments)||this}e.SerializedMmlVisitor=c},function(t,e,r){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var n=(Object.defineProperty(i.prototype,"kind",{get:function(){return this.node.kind},enumerable:!0,configurable:!0}),i.prototype.wrap=function(t){return this.factory.wrap(t)},i);function i(t,e){this.factory=t,this.node=e}e.AbstractWrapper=n},function(t,e,r){"use strict";var n,i=this&&this.__extends||(n=function(t,e){return(n=Object.setPrototypeOf||{__proto__:[]}instanceof Array&&function(t,e){t.__proto__=e}||function(t,e){for(var r in e)e.hasOwnProperty(r)&&(t[r]=e[r])})(t,e)},function(t,e){function r(){this.constructor=t}n(t,e),t.prototype=null===e?Object.create(e):(r.prototype=e.prototype,new r)}),o=this&&this.__read||function(t,e){var r="function"==typeof Symbol&&t[Symbol.iterator];if(!r)return t;var n,i,o=r.call(t),a=[];try{for(;(void 0===e||0=t.length&&(t=void 0),{value:t&&t[n++],done:!t}}};throw new TypeError(e?"Object is not iterable.":"Symbol.iterator is not defined.")};Object.defineProperty(e,"__esModule",{value:!0});var s,a=r(41),w=r(3),c=r(100),l=r(101),u=r(102),h=r(13),f=(s=a.AbstractMathDocument,i(p,s),p.prototype.findPosition=function(t,e,r,n){var i,o;try{for(var a=T(n[t]),s=a.next();!s.done;s=a.next()){var c=s.value,l=_(c,2),u=l[0],h=l[1];if(e<=h)return{node:u,n:e,delim:r};e-=h}}catch(t){i={error:t}}finally{try{s&&!s.done&&(o=a.return)&&o.call(a)}finally{if(i)throw i.error}}return{node:null,n:0,delim:r}},p.prototype.mathItem=function(t,e,r){var n=t.math,i=this.findPosition(t.n,t.start.n,t.open,r),o=this.findPosition(t.n,t.end.n,t.close,r);return new this.options.MathItem(n,e,t.display,i,o)},p.prototype.findMath=function(t){var e,r,n,i,o,a,s,c,l;if(!this.processed.isSet("findMath")){this.adaptor.document=this.document,t=w.userOptions({elements:[this.adaptor.body(this.document)]},t);try{for(var u=T(this.adaptor.getElements(t.elements,this.document)),h=u.next();!h.done;h=u.next()){var f=h.value,p=_([null,null],2),d=p[0],m=p[1];try{for(var y=(n=void 0,T(this.inputJax)),v=y.next();!v.done;v=y.next()){var b=v.value,g=new this.options.MathList;if(b.processStrings){null===d&&(d=(o=_(this.domStrings.find(f),2))[0],m=o[1]);try{for(var M=(a=void 0,T(b.findMath(d))),O=M.next();!O.done;O=M.next()){var x=O.value;g.push(this.mathItem(x,b,m))}}catch(t){a={error:t}}finally{try{O&&!O.done&&(s=M.return)&&s.call(M)}finally{if(a)throw a.error}}}else try{for(var S=(c=void 0,T(b.findMath(f))),E=S.next();!E.done;E=S.next()){x=E.value;var C=new this.options.MathItem(x.math,b,x.display,x.start,x.end);g.push(C)}}catch(t){c={error:t}}finally{try{E&&!E.done&&(l=S.return)&&l.call(S)}finally{if(c)throw c.error}}this.math.merge(g)}}catch(t){n={error:t}}finally{try{v&&!v.done&&(i=y.return)&&i.call(y)}finally{if(n)throw n.error}}}}catch(t){e={error:t}}finally{try{h&&!h.done&&(r=u.return)&&r.call(u)}finally{if(e)throw e.error}}this.processed.set("findMath")}return this},p.prototype.updateDocument=function(){return this.processed.isSet("updateDocument")||(this.addPageElements(),this.addStyleSheet(),s.prototype.updateDocument.call(this),this.processed.set("updateDocument")),this},p.prototype.addPageElements=function(){var t=this.adaptor.body(this.document),e=this.documentPageElements();e&&this.adaptor.append(t,e)},p.prototype.addStyleSheet=function(){var t=this.documentStyleSheet();if(t){var e=this.adaptor.head(this.document),r=this.findSheet(e,this.adaptor.getAttribute(t,"id"));r?this.adaptor.replace(t,r):this.adaptor.append(e,t)}},p.prototype.findSheet=function(t,e){var r,n;if(e)try{for(var i=T(this.adaptor.tags(t,"style")),o=i.next();!o.done;o=i.next()){var a=o.value;if(this.adaptor.getAttribute(a,"id")===e)return a}}catch(t){r={error:t}}finally{try{o&&!o.done&&(n=i.return)&&n.call(i)}finally{if(r)throw r.error}}return null},p.prototype.removeFromDocument=function(t){var e,r;if(void 0===t&&(t=!1),this.processed.isSet("updateDocument"))try{for(var n=T(this.math),i=n.next();!i.done;i=n.next()){var o=i.value;o.state()>=h.STATE.INSERTED&&o.state(h.STATE.TYPESET,t)}}catch(t){e={error:t}}finally{try{i&&!i.done&&(r=n.return)&&r.call(n)}finally{if(e)throw e.error}}return this.processed.clear("updateDocument"),this},p.prototype.documentStyleSheet=function(){return this.outputJax.styleSheet(this)},p.prototype.documentPageElements=function(){return this.outputJax.pageElements(this)},p.KIND="HTML",p.OPTIONS=o(o({},a.AbstractMathDocument.OPTIONS),{renderActions:w.expandable(o(o({},a.AbstractMathDocument.OPTIONS.renderActions),{styles:[h.STATE.INSERTED+1,"","updateStyleSheet",!1]})),MathList:l.HTMLMathList,MathItem:c.HTMLMathItem,DomStrings:null}),p);function p(t,e,r){var n=this,i=_(w.separateOptions(r,u.HTMLDomStrings.OPTIONS),2),o=i[0],a=i[1];return(n=s.call(this,t,e,o)||this).domStrings=n.options.DomStrings||new u.HTMLDomStrings(a),n.domStrings.adaptor=e,n}e.HTMLDocument=f},function(t,e,r){"use strict";var n,i=this&&this.__extends||(n=function(t,e){return(n=Object.setPrototypeOf||{__proto__:[]}instanceof Array&&function(t,e){t.__proto__=e}||function(t,e){for(var r in e)e.hasOwnProperty(r)&&(t[r]=e[r])})(t,e)},function(t,e){function r(){this.constructor=t}n(t,e),t.prototype=null===e?Object.create(e):(r.prototype=e.prototype,new r)});Object.defineProperty(e,"__esModule",{value:!0});var o,a=r(13),s=(o=a.AbstractMathItem,i(c,o),Object.defineProperty(c.prototype,"adaptor",{get:function(){return this.inputJax.adaptor},enumerable:!0,configurable:!0}),c.prototype.updateDocument=function(t){if(this.state()=a.STATE.TYPESET){var e=this.start.node,r=this.adaptor.text("");if(t){var n=this.start.delim+this.math+this.end.delim;if(this.inputJax.processStrings)r=this.adaptor.text(n);else{var i=this.adaptor.parse(n,"text/html");r=this.adaptor.firstChild(this.adaptor.body(i))}}this.adaptor.replace(r,e),this.start.node=this.end.node=r,this.start.n=this.end.n=0}},c);function c(t,e,r,n,i){return void 0===r&&(r=!0),void 0===n&&(n={node:null,n:0,delim:""}),void 0===i&&(i={node:null,n:0,delim:""}),o.call(this,t,e,r,n,i)||this}e.HTMLMathItem=s},function(t,e,r){"use strict";var n,i=this&&this.__extends||(n=function(t,e){return(n=Object.setPrototypeOf||{__proto__:[]}instanceof Array&&function(t,e){t.__proto__=e}||function(t,e){for(var r in e)e.hasOwnProperty(r)&&(t[r]=e[r])})(t,e)},function(t,e){function r(){this.constructor=t}n(t,e),t.prototype=null===e?Object.create(e):(r.prototype=e.prototype,new r)});Object.defineProperty(e,"__esModule",{value:!0});var o,a=r(43),s=(o=a.AbstractMathList,i(c,o),c);function c(){return null!==o&&o.apply(this,arguments)||this}e.HTMLMathList=s},function(t,e,r){"use strict";var s=this&&this.__read||function(t,e){var r="function"==typeof Symbol&&t[Symbol.iterator];if(!r)return t;var n,i,o=r.call(t),a=[];try{for(;(void 0===e||0=t.length&&(t=void 0),{value:t&&t[n++],done:!t}}};throw new TypeError(e?"Object is not iterable.":"Symbol.iterator is not defined.")};Object.defineProperty(e,"__esModule",{value:!0});var a=r(29),u=r(3),h=r(106),f=r(107),s=r(6),p=r(21),d=r(4),m=r(110),y=r(27),v=r(11);r(113);var b,g=(b=a.AbstractInputJax,i(M,b),M.configure=function(t){var e,r,n=v.Configuration.empty();try{for(var i=c(t),o=i.next();!o.done;o=i.next()){var a=o.value,s=v.ConfigurationHandler.get(a);s&&n.append(s)}}catch(t){e={error:t}}finally{try{o&&!o.done&&(r=i.return)&&r.call(i)}finally{if(e)throw e.error}}return n.init(n),n},M.tags=function(t,e){y.TagsFactory.addTags(e.tags),y.TagsFactory.setDefault(t.options.tags),t.tags=y.TagsFactory.getDefault(),t.tags.configuration=t},M.prototype.setMmlFactory=function(t){b.prototype.setMmlFactory.call(this,t),this._parseOptions.nodeFactory.setMmlFactory(t)},Object.defineProperty(M.prototype,"parseOptions",{get:function(){return this._parseOptions},enumerable:!0,configurable:!0}),M.prototype.compile=function(t,e){this.parseOptions.clear(),this.executeFilters(this.preFilters,t,e,this.parseOptions);var r,n=t.display;this.latex=t.math,this.parseOptions.tags.startEquation(t);try{r=new p.default(this.latex,{display:n,isInner:!1},this.parseOptions).mml()}catch(t){if(!(t instanceof d.default))throw t;this.parseOptions.error=!0,r=this.formatError(t)}return r=this.parseOptions.nodeFactory.create("node","math",[r]),n&&s.default.setAttribute(r,"display","block"),this.parseOptions.tags.finishEquation(t),this.parseOptions.root=r,this.executeFilters(this.postFilters,t,e,this.parseOptions),this.mathNode=this.parseOptions.root,this.mathNode},M.prototype.findMath=function(t){return this.findTeX.findMath(t)},M.prototype.formatError=function(t){var e=t.message.replace(/\n.*/,"");return this.parseOptions.nodeFactory.create("error",e,t.id,this.latex)},M.NAME="TeX",M.OPTIONS=o(o({},a.AbstractInputJax.OPTIONS),{FindTeX:null,packages:["base"],digits:/^(?:[0-9]+(?:\{,\}[0-9]{3})*(?:\.[0-9]*)?|\.[0-9]+)/,maxBuffer:5120}),M);function M(t){void 0===t&&(t={});var e=this,r=l(u.separateOptions(t,M.OPTIONS,h.FindTeX.OPTIONS),3),n=r[0],i=r[1],o=r[2];(e=b.call(this,i)||this).findTeX=e.options.FindTeX||new h.FindTeX(o);var a=e.options.packages,s=e.configuration=M.configure(a),c=e._parseOptions=new m.default(s,[e.options,y.TagsFactory.OPTIONS]);return u.userOptions(c.options,n),s.config(s,e),M.tags(c,s),e.postFilters.add(f.default.cleanSubSup,-5),e.postFilters.add(f.default.setInherited,-4),e.postFilters.add(f.default.cleanStretchy,-3),e.postFilters.add(f.default.cleanAttributes,-2),e.postFilters.add(f.default.combineRelations,-1),e}e.TeX=g},function(t,e,r){"use strict";var n,i=this&&this.__extends||(n=function(t,e){return(n=Object.setPrototypeOf||{__proto__:[]}instanceof Array&&function(t,e){t.__proto__=e}||function(t,e){for(var r in e)e.hasOwnProperty(r)&&(t[r]=e[r])})(t,e)},function(t,e){function r(){this.constructor=t}n(t,e),t.prototype=null===e?Object.create(e):(r.prototype=e.prototype,new r)}),h=this&&this.__read||function(t,e){var r="function"==typeof Symbol&&t[Symbol.iterator];if(!r)return t;var n,i,o=r.call(t),a=[];try{for(;(void 0===e||0=t.length&&(t=void 0),{value:t&&t[n++],done:!t}}};throw new TypeError(e?"Object is not iterable.":"Symbol.iterator is not defined.")};Object.defineProperty(e,"__esModule",{value:!0});var n,d=r(0),m=r(6);!function(t){t.cleanStretchy=function(t){var e,r,n=t.data;try{for(var i=p(n.getList("fixStretchy")),o=i.next();!o.done;o=i.next()){var a=o.value;if(m.default.getProperty(a,"fixStretchy")){var s=m.default.getForm(a);s&&s[3]&&s[3].stretchy&&m.default.setAttribute(a,"stretchy",!1);var c=a.parent;if(!(m.default.getTexClass(a)||s&&s[2])){var l=n.nodeFactory.create("node","TeXAtom",[a]);c.replaceChild(l,a),l.inheritAttributesFrom(a)}m.default.removeProperties(a,"fixStretchy")}}}catch(t){e={error:t}}finally{try{o&&!o.done&&(r=i.return)&&r.call(i)}finally{if(e)throw e.error}}},t.cleanAttributes=function(t){t.data.root.walkTree(function(t,e){var r,n,i=t.attributes;try{for(var o=p(i.getExplicitNames()),a=o.next();!a.done;a=o.next()){var s=a.value;i.attributes[s]===t.attributes.getInherited(s)&&delete i.attributes[s]}}catch(t){r={error:t}}finally{try{a&&!a.done&&(n=o.return)&&n.call(o)}finally{if(r)throw r.error}}},{})},t.combineRelations=function(t){var e,r;try{for(var n=p(t.data.getList("mo")),i=n.next();!i.done;i=n.next()){var o=i.value;if(!o.getProperty("relationsCombined")&&o.parent&&(!o.parent||m.default.isType(o.parent,"mrow"))&&m.default.getTexClass(o)===d.TEXCLASS.REL){for(var a=o.parent,s=void 0,c=a.childNodes,l=c.indexOf(o)+1,u=m.default.getProperty(o,"variantForm");l\u20d2",nvinfin:"\u29de",nvlArr:"\u2902",nvle:"\u2264\u20d2",nvlt:"<\u20d2",nvltrie:"\u22b4\u20d2",nvrArr:"\u2903",nvrtrie:"\u22b5\u20d2",nvsim:"\u223c\u20d2",nwArr:"\u21d6",nwarhk:"\u2923",nwarrow:"\u2196",nwnear:"\u2927"},"n")},function(t,e,r){"use strict";var u=this&&this.__values||function(t){var e="function"==typeof Symbol&&Symbol.iterator,r=e&&t[e],n=0;if(r)return r.call(t);if(t&&"number"==typeof t.length)return{next:function(){return t&&n>=t.length&&(t=void 0),{value:t&&t[n++],done:!t}}};throw new TypeError(e?"Object is not iterable.":"Symbol.iterator is not defined.")},h=this&&this.__read||function(t,e){var r="function"==typeof Symbol&&t[Symbol.iterator];if(!r)return t;var n,i,o=r.call(t),a=[];try{for(;(void 0===e||0=t.length&&(t=void 0),{value:t&&t[n++],done:!t}}};throw new TypeError(e?"Object is not iterable.":"Symbol.iterator is not defined.")};Object.defineProperty(e,"__esModule",{value:!0});var o=r(111),a=r(8),s=r(112),l=r(3),u=(h.prototype.pushParser=function(t){this.parsers.unshift(t)},h.prototype.popParser=function(){this.parsers.shift()},Object.defineProperty(h.prototype,"parser",{get:function(){return this.parsers[0]},enumerable:!0,configurable:!0}),h.prototype.clear=function(){this.parsers=[],this.root=null,this.nodeLists={},this.error=!1,this.tags.resetTag()},h.prototype.addNode=function(t,e){var r=this.nodeLists[t];(r=r||(this.nodeLists[t]=[])).push(e)},h.prototype.getList=function(t){var e,r,n=this.nodeLists[t]||[],i=[];try{for(var o=c(n),a=o.next();!a.done;a=o.next()){var s=a.value;this.inTree(s)&&i.push(s)}}catch(t){e={error:t}}finally{try{a&&!a.done&&(r=o.return)&&r.call(o)}finally{if(e)throw e.error}}return this.nodeLists[t]=i},h.prototype.inTree=function(t){for(;t&&t!==this.root;)t=t.parent;return!!t},h);function h(t,e){void 0===e&&(e=[]),this.options={},this.parsers=[],this.root=null,this.nodeLists={},this.error=!1,this.handlers=new a.SubHandlers(t),this.nodeFactory=new s.NodeFactory,(this.nodeFactory.configuration=this).nodeFactory.setCreators(t.nodes),this.itemFactory=new o.default(t.items),this.itemFactory.configuration=this,l.defaultOptions.apply(void 0,i([this.options],e)),l.defaultOptions(this.options,t.options)}e.default=u},function(t,e,r){"use strict";var n,i,o=this&&this.__extends||(n=function(t,e){return(n=Object.setPrototypeOf||{__proto__:[]}instanceof Array&&function(t,e){t.__proto__=e}||function(t,e){for(var r in e)e.hasOwnProperty(r)&&(t[r]=e[r])})(t,e)},function(t,e){function r(){this.constructor=t}n(t,e),t.prototype=null===e?Object.create(e):(r.prototype=e.prototype,new r)});Object.defineProperty(e,"__esModule",{value:!0});var a,s=r(32),c=r(30),l=(a=s.BaseItem,o(u,a),u);function u(){return null!==a&&a.apply(this,arguments)||this}var h,f=(h=c.AbstractFactory,o(p,h),p.DefaultStackItems=((i={})[l.prototype.kind]=l,i),p);function p(){var t=null!==h&&h.apply(this,arguments)||this;return t.defaultKind="dummy",t.configuration=null,t}e.default=f},function(t,e,r){"use strict";var n=this&&this.__read||function(t,e){var r="function"==typeof Symbol&&t[Symbol.iterator];if(!r)return t;var n,i,o=r.call(t),a=[];try{for(;(void 0===e||0=t.length&&(t=void 0),{value:t&&t[n++],done:!t}}};throw new TypeError(e?"Object is not iterable.":"Symbol.iterator is not defined.")},r=this&&this.__read||function(t,e){var r="function"==typeof Symbol&&t[Symbol.iterator];if(!r)return t;var n,i,o=r.call(t),a=[];try{for(;(void 0===e||0"),i=this.checkForErrors(this.adaptor.parse(n,"text/"+this.options.parseAs)),o=this.adaptor.body(i);1!==this.adaptor.childNodes(o).length&&this.error("MathML must consist of a single element"),r=this.adaptor.remove(this.adaptor.firstChild(o)),"math"!==this.adaptor.kind(r).replace(/^[a-z]+:/,"")&&this.error("MathML must be formed by a element, not <"+this.adaptor.kind(r)+">")}return r=this.executeFilters(this.mmlFilters,t,e,r),this.executeFilters(this.postFilters,t,e,this.mathml.compile(r))},p.prototype.checkForErrors=function(t){var e=this.adaptor.tags(this.adaptor.body(t),"parsererror")[0];return e&&(""===this.adaptor.textContent(e)&&this.error("Error processing MathML"),this.options.parseError.call(this,e)),t},p.prototype.error=function(t){throw new Error(t)},p.prototype.findMath=function(t){return this.findMathML.findMath(t)},p.NAME="MathML",p.OPTIONS=c.defaultOptions({parseAs:"html",forceReparse:!1,FindMathML:null,MathMLCompile:null,parseError:function(t){this.error(this.adaptor.textContent(t).replace(/\n.*/g,""))}},o.AbstractInputJax.OPTIONS),p);function p(t){void 0===t&&(t={});var e=this,r=a(c.separateOptions(t,u.FindMathML.OPTIONS,h.MathMLCompile.OPTIONS),3),n=r[0],i=r[1],o=r[2];return(e=s.call(this,n)||this).findMathML=e.options.FindMathML||new u.FindMathML(i),e.mathml=e.options.MathMLCompile||new h.MathMLCompile(o),e.mmlFilters=new l.FunctionList,e}e.MathML=f},function(t,e,r){"use strict";var n,i=this&&this.__extends||(n=function(t,e){return(n=Object.setPrototypeOf||{__proto__:[]}instanceof Array&&function(t,e){t.__proto__=e}||function(t,e){for(var r in e)e.hasOwnProperty(r)&&(t[r]=e[r])})(t,e)},function(t,e){function r(){this.constructor=t}n(t,e),t.prototype=null===e?Object.create(e):(r.prototype=e.prototype,new r)}),d=this&&this.__values||function(t){var e="function"==typeof Symbol&&Symbol.iterator,r=e&&t[e],n=0;if(r)return r.call(t);if(t&&"number"==typeof t.length)return{next:function(){return t&&n>=t.length&&(t=void 0),{value:t&&t[n++],done:!t}}};throw new TypeError(e?"Object is not iterable.":"Symbol.iterator is not defined.")};Object.defineProperty(e,"__esModule",{value:!0});var o,a=r(40),m="http://www.w3.org/1998/Math/MathML",s=(o=a.AbstractFindMath,i(c,o),c.prototype.findMath=function(t){var e=new Set;this.findMathNodes(t,e),this.findMathPrefixed(t,e);var r=this.adaptor.root(this.adaptor.document);return"html"===this.adaptor.kind(r)&&0===e.size&&this.findMathNS(t,e),this.processMath(e)},c.prototype.findMathNodes=function(t,e){var r,n;try{for(var i=d(this.adaptor.tags(t,"math")),o=i.next();!o.done;o=i.next()){var a=o.value;e.add(a)}}catch(t){r={error:t}}finally{try{o&&!o.done&&(n=i.return)&&n.call(i)}finally{if(r)throw r.error}}},c.prototype.findMathPrefixed=function(t,e){var r,n,i,o,a=this.adaptor.root(this.adaptor.document);try{for(var s=d(this.adaptor.allAttributes(a)),c=s.next();!c.done;c=s.next()){var l=c.value;if("xmlns:"===l.name.substr(0,6)&&l.value===m){var u=l.name.substr(6);try{for(var h=(i=void 0,d(this.adaptor.tags(t,u+":math"))),f=h.next();!f.done;f=h.next()){var p=f.value;e.add(p)}}catch(t){i={error:t}}finally{try{f&&!f.done&&(o=h.return)&&o.call(h)}finally{if(i)throw i.error}}}}}catch(t){r={error:t}}finally{try{c&&!c.done&&(n=s.return)&&n.call(s)}finally{if(r)throw r.error}}},c.prototype.findMathNS=function(t,e){var r,n;try{for(var i=d(this.adaptor.tags(t,"math",m)),o=i.next();!o.done;o=i.next()){var a=o.value;e.add(a)}}catch(t){r={error:t}}finally{try{o&&!o.done&&(n=i.return)&&n.call(i)}finally{if(r)throw r.error}}},c.prototype.processMath=function(t){var e,r,n=[];try{for(var i=d(Array.from(t)),o=i.next();!o.done;o=i.next()){var a=o.value,s="block"===this.adaptor.getAttribute(a,"display")||"display"===this.adaptor.getAttribute(a,"mode"),c={node:a,n:0,delim:""},l={node:a,n:0,delim:""};n.push({math:this.adaptor.outerHTML(a),start:c,end:l,display:s})}}catch(t){e={error:t}}finally{try{o&&!o.done&&(r=i.return)&&r.call(i)}finally{if(e)throw e.error}}return n},c.OPTIONS={},c);function c(){return null!==o&&o.apply(this,arguments)||this}e.FindMathML=s},function(t,e,r){"use strict";var n=this&&this.__assign||function(){return(n=Object.assign||function(t){for(var e,r=1,n=arguments.length;r=t.length&&(t=void 0),{value:t&&t[n++],done:!t}}};throw new TypeError(e?"Object is not iterable.":"Symbol.iterator is not defined.")};Object.defineProperty(e,"__esModule",{value:!0});var i=r(0),o=r(3),a=r(12),s=(c.prototype.setMmlFactory=function(t){this.factory=t},c.prototype.compile=function(t){var e=this.makeNode(t);return e.verifyTree(this.options.verify),e.setInheritedAttributes({},!1,0,!1),e.walkTree(this.markMrows),e},c.prototype.makeNode=function(t){var e,r,n=!1,i="",o=this.adaptor.kind(t).replace(/^.*:/,"");try{for(var a=u(this.adaptor.allClasses(t)),s=a.next();!s.done;s=a.next()){var c=s.value;c.match(/^MJX-TeXAtom-/)?(i=c.substr(12),o="TeXAtom"):"MJX-fixedlimits"===c&&(n=!0)}}catch(t){e={error:t}}finally{try{s&&!s.done&&(r=a.return)&&r.call(a)}finally{if(e)throw e.error}}this.factory.getNodeClass(o)||this.error('Unknown node type "'+o+'"');var l=this.factory.create(o);return i&&this.texAtom(l,i,n),this.addAttributes(l,t),this.checkClass(l,t),this.addChildren(l,t),l},c.prototype.addAttributes=function(t,e){var r,n;try{for(var i=u(this.adaptor.allAttributes(e)),o=i.next();!o.done;o=i.next()){var a=o.value,s=a.name;if("class"!==s){var c=this.filterAttribute(s,a.value);if(null!==c){var l=c.toLowerCase();"true"===l||"false"===l?t.attributes.set(s,"true"===l):t.attributes.set(s,c)}}}}catch(t){r={error:t}}finally{try{o&&!o.done&&(n=i.return)&&n.call(i)}finally{if(r)throw r.error}}},c.prototype.filterAttribute=function(t,e){return e},c.prototype.addChildren=function(t,e){var r,n;if(0!==t.arity)try{for(var i=u(this.adaptor.childNodes(e)),o=i.next();!o.done;o=i.next()){var a=o.value,s=this.adaptor.kind(a);if("#comment"!==s)if("#text"===s)this.addText(t,a);else if(t.isKind("annotation-xml"))t.appendChild(this.factory.create("XML").setXML(a));else{var c=t.appendChild(this.makeNode(a));0===c.arity&&this.adaptor.childNodes(a).length&&(this.options.fixMisplacedChildren?this.addChildren(t,a):c.mError("There should not be children for "+c.kind+" nodes",this.options.verify,!0))}}}catch(t){r={error:t}}finally{try{o&&!o.done&&(n=i.return)&&n.call(i)}finally{if(r)throw r.error}}},c.prototype.addText=function(t,e){var r=this.adaptor.value(e);(t.isToken||t.getProperty("isChars"))&&t.arity?(t.isToken&&(r=a.translate(r),r=this.trimSpace(r)),t.appendChild(this.factory.create("text").setText(r))):r.match(/\S/)&&this.error('Unexpected text node "'+r+'"')},c.prototype.checkClass=function(t,e){var r,n,i=[];try{for(var o=u(this.adaptor.allClasses(e)),a=o.next();!a.done;a=o.next()){var s=a.value;"MJX-"===s.substr(0,4)?"MJX-variant"===s?t.setProperty("variantForm",!0):"MJX-TeXAtom"!==s.substr(0,11)&&t.attributes.set("mathvariant",s.substr(3)):i.push(s)}}catch(t){r={error:t}}finally{try{a&&!a.done&&(n=o.return)&&n.call(o)}finally{if(r)throw r.error}}i.length&&t.attributes.set("class",i.join(" "))},c.prototype.texAtom=function(t,e,r){t.texClass=i.TEXCLASS[e],"OP"!==e||r||(t.setProperty("movesupsub",!0),t.attributes.setInherited("movablelimits",!0))},c.prototype.markMrows=function(t){if(t.isKind("mrow")&&!t.isInferred&&2<=t.childNodes.length){var e=t.childNodes[0],r=t.childNodes[t.childNodes.length-1];e.isKind("mo")&&e.attributes.get("fence")&&r.isKind("mo")&&r.attributes.get("fence")&&(e.childNodes.length&&t.setProperty("open",e.getText()),r.childNodes.length&&t.setProperty("close",r.getText()))}},c.prototype.trimSpace=function(t){return t.replace(/[\t\n\r]/g," ").trim().replace(/ +/g," ")},c.prototype.error=function(t){throw new Error(t)},c.OPTIONS={MmlFactory:null,fixMisplacedChildren:!0,verify:{},translateEntities:!0},c.VERIFY=n({},i.AbstractMmlNode.verifyDefaults),c);function c(t){void 0===t&&(t={});var e=this.constructor;this.options=o.userOptions(o.defaultOptions({},e.OPTIONS),t),this.options.verify&&(this.options.verify=o.userOptions(o.defaultOptions({},e.VERIFY),this.options.verify))}e.MathMLCompile=s},function(t,e,r){"use strict";var n,i=this&&this.__extends||(n=function(t,e){return(n=Object.setPrototypeOf||{__proto__:[]}instanceof Array&&function(t,e){t.__proto__=e}||function(t,e){for(var r in e)e.hasOwnProperty(r)&&(t[r]=e[r])})(t,e)},function(t,e){function r(){this.constructor=t}n(t,e),t.prototype=null===e?Object.create(e):(r.prototype=e.prototype,new r)}),o=this&&this.__assign||function(){return(o=Object.assign||function(t){for(var e,r=1,n=arguments.length;r=t.length&&(t=void 0),{value:t&&t[n++],done:!t}}};throw new TypeError(e?"Object is not iterable.":"Symbol.iterator is not defined.")};Object.defineProperty(e,"__esModule",{value:!0});var s,c=r(123),l=r(125),u=r(77),h=(s=c.CommonOutputJax,i(f,s),f.prototype.escaped=function(t,e){return this.setDocument(e),this.html("span",{},[this.text(t.math)])},f.prototype.styleSheet=function(t){var e=s.prototype.styleSheet.call(this,t);return this.adaptor.setAttribute(e,"id",f.STYLESHEETID),e},f.prototype.addClassStyles=function(t){var e;this.options.adaptiveCSS&&!t.used||(t.autoStyle&&"unknown"!==t.kind&&this.cssStyles.addStyles(((e={})["mjx-"+t.kind]={display:"inline-block","text-align":"left"},e)),s.prototype.addClassStyles.call(this,t))},f.prototype.processMath=function(t,e){this.factory.wrap(t).toCHTML(e)},f.prototype.clearCache=function(){var e,t;this.cssStyles.clear(),this.font.clearCache();try{for(var r=a(this.factory.getKinds()),n=r.next();!n.done;n=r.next()){var i=n.value;this.factory.getNodeClass(i).used=!1}}catch(t){e={error:t}}finally{try{n&&!n.done&&(t=r.return)&&t.call(r)}finally{if(e)throw e.error}}},f.prototype.unknownText=function(t,e){var r={},n=100/this.math.metrics.scale;return 100!=n&&(r["font-size"]=this.fixed(n,1)+"%"),"-explicitFont"!==e&&this.cssFontStyles(this.font.getCssFont(e),r),this.html("mjx-utext",{variant:e,style:r},[this.text(t)])},f.prototype.measureTextNode=function(t){var e=this.adaptor;t=e.clone(t);var r=this.html("mjx-measure-text",{},[t]);e.append(e.parent(this.math.start.node),this.container),e.append(this.container,r);var n=e.nodeSize(t,this.math.metrics.em)[0]/this.math.metrics.scale;return e.remove(this.container),e.remove(r),{w:n,h:.75,d:.25}},f.prototype.getFontData=function(t){var e=s.prototype.getFontData.call(this,t);return e[0]="MJXZERO, "+e[0],e},f.NAME="CHTML",f.OPTIONS=o(o({},c.CommonOutputJax.OPTIONS),{adaptiveCSS:!0}),f.commonStyles={'mjx-container [space="1"]':{"margin-left":".111em"},'mjx-container [space="2"]':{"margin-left":".167em"},'mjx-container [space="3"]':{"margin-left":".222em"},'mjx-container [space="4"]':{"margin-left":".278em"},'mjx-container [space="5"]':{"margin-left":".333em"},'mjx-container [rspace="1"]':{"margin-right":".111em"},'mjx-container [rspace="2"]':{"margin-right":".167em"},'mjx-container [rspace="3"]':{"margin-right":".222em"},'mjx-container [rspace="4"]':{"margin-right":".278em"},'mjx-container [rspace="5"]':{"margin-right":".333em"},'mjx-container [size="s"]':{"font-size":"70.7%"},'mjx-container [size="ss"]':{"font-size":"50%"},'mjx-container [size="Tn"]':{"font-size":"60%"},'mjx-container [size="sm"]':{"font-size":"85%"},'mjx-container [size="lg"]':{"font-size":"120%"},'mjx-container [size="Lg"]':{"font-size":"144%"},'mjx-container [size="LG"]':{"font-size":"173%"},'mjx-container [size="hg"]':{"font-size":"207%"},'mjx-container [size="HG"]':{"font-size":"249%"},'mjx-container [width="full"]':{width:"100%"},"mjx-box":{display:"inline-block"},"mjx-block":{display:"block"},"mjx-itable":{display:"inline-table"},"mjx-row":{display:"table-row"},"mjx-row > *":{display:"table-cell"},"mjx-mtext":{display:"inline-block"},"mjx-mstyle":{display:"inline-block"},"mjx-merror":{display:"inline-block",color:"red","background-color":"yellow"},"mjx-mphantom":{visibility:"hidden"}},f.STYLESHEETID="MJX-CHTML-styles",f);function f(t){void 0===t&&(t=null);var e=s.call(this,t,l.CHTMLWrapperFactory,u.TeXFont)||this;return e.font.adaptiveCSS(e.options.adaptiveCSS),e}e.CHTML=h},function(t,e,r){"use strict";var n,i=this&&this.__extends||(n=function(t,e){return(n=Object.setPrototypeOf||{__proto__:[]}instanceof Array&&function(t,e){t.__proto__=e}||function(t,e){for(var r in e)e.hasOwnProperty(r)&&(t[r]=e[r])})(t,e)},function(t,e){function r(){this.constructor=t}n(t,e),t.prototype=null===e?Object.create(e):(r.prototype=e.prototype,new r)}),o=this&&this.__assign||function(){return(o=Object.assign||function(t){for(var e,r=1,n=arguments.length;r=t.length&&(t=void 0),{value:t&&t[n++],done:!t}}};throw new TypeError(e?"Object is not iterable.":"Symbol.iterator is not defined.")};Object.defineProperty(e,"__esModule",{value:!0});var c,a=r(42),l=r(3),u=r(124),h=r(14),f=r(71),p=(c=a.AbstractOutputJax,i(d,c),d.prototype.typeset=function(t,e){this.setDocument(e);var r=this.createNode();return this.toDOM(t,r,e),r},d.prototype.createNode=function(){var t=this.constructor.NAME;return this.html("mjx-container",{class:"MathJax",jax:t})},d.prototype.setScale=function(t){var e=this.math.metrics.scale*this.options.scale;1!=e&&this.adaptor.setStyle(t,"fontSize",h.percent(e))},d.prototype.toDOM=function(t,e,r){void 0===r&&(r=null),this.setDocument(r),this.math=t,this.pxPerEm=t.metrics.ex/this.font.params.x_height,t.root.setTeXclass(null),this.setScale(e),this.nodeMap=new Map,this.container=e,this.processMath(t.root,e),this.nodeMap=null,this.executeFilters(this.postFilters,t,r,e)},d.prototype.getBBox=function(t,e){this.setDocument(e),(this.math=t).root.setTeXclass(null),this.nodeMap=new Map;var r=this.factory.wrap(t.root).getBBox();return this.nodeMap=null,r},d.prototype.getMetrics=function(t){var e,r;this.setDocument(t);var n=this.adaptor,i=this.getMetricMaps(t);try{for(var o=w(t.math),a=o.next();!a.done;a=o.next()){var s=a.value,c=i[s.display?1:0].get(n.parent(s.start.node)),l=c.em,u=c.ex,h=c.containerWidth,f=c.lineWidth,p=c.scale;s.setMetrics(l,u,h,f,p)}}catch(t){e={error:t}}finally{try{a&&!a.done&&(r=o.return)&&r.call(o)}finally{if(e)throw e.error}}},d.prototype.getMetricsFor=function(t,e){var r=this.getTestElement(t,e),n=this.measureMetrics(r);return this.adaptor.remove(r),n},d.prototype.getMetricMaps=function(t){var e,r,n,i,o,a,s,c,l,u,h=this.adaptor,f=[new Map,new Map];try{for(var p=w(t.math),d=p.next();!d.done;d=p.next()){var m=d.value,y=h.parent(m.start.node),v=f[m.display?1:0];v.has(y)||v.set(y,this.getTestElement(y,m.display))}}catch(t){e={error:t}}finally{try{d&&!d.done&&(r=p.return)&&r.call(p)}finally{if(e)throw e.error}}var b=[new Map,new Map];try{for(var g=w(b.keys()),M=g.next();!M.done;M=g.next()){var O=M.value;try{for(var x=(o=void 0,w(f[O].keys())),S=x.next();!S.done;S=x.next())y=S.value,b[O].set(y,this.measureMetrics(f[O].get(y)))}catch(t){o={error:t}}finally{try{S&&!S.done&&(a=x.return)&&a.call(x)}finally{if(o)throw o.error}}}}catch(t){n={error:t}}finally{try{M&&!M.done&&(i=g.return)&&i.call(g)}finally{if(n)throw n.error}}try{for(var E=w(b.keys()),C=E.next();!C.done;C=E.next()){O=C.value;try{for(var _=(l=void 0,w(f[O].values())),T=_.next();!T.done;T=_.next())y=T.value,h.remove(y)}catch(t){l={error:t}}finally{try{T&&!T.done&&(u=_.return)&&u.call(_)}finally{if(l)throw l.error}}}}catch(t){s={error:t}}finally{try{C&&!C.done&&(c=E.return)&&c.call(E)}finally{if(s)throw s.error}}return b},d.prototype.getTestElement=function(t,e){var r=this.adaptor;if(!this.testInline){this.testInline=this.html("mjx-test",{style:{display:"inline-block",width:"100%","font-style":"normal","font-weight":"normal","font-size":"100%","font-size-adjust":"none","text-indent":0,"text-transform":"none","letter-spacing":"normal","word-spacing":"normal",overflow:"hidden",height:"1px","margin-right":"-1px"}},[this.html("mjx-left-box",{style:{display:"inline-block",width:0,float:"left"}}),this.html("mjx-ex-box",{style:{position:"absolute",overflow:"hidden",width:"1px",height:"60ex"}}),this.html("mjx-right-box",{style:{display:"inline-block",width:0,float:"right"}})]),this.testDisplay=r.clone(this.testInline),r.setStyle(this.testDisplay,"display","table"),r.setStyle(this.testDisplay,"margin-right",""),r.setStyle(r.firstChild(this.testDisplay),"display","none");var n=r.lastChild(this.testDisplay);r.setStyle(n,"display","table-cell"),r.setStyle(n,"width","10000em"),r.setStyle(n,"float","")}return r.append(t,r.clone(e?this.testDisplay:this.testInline))},d.prototype.measureMetrics=function(t){var e=this.adaptor,r=e.fontSize(t),n=e.nodeSize(e.childNode(t,1))[1]/60||r*this.options.exFactor;return{em:r,ex:n,containerWidth:"table"===e.getStyle(t,"display")?e.nodeSize(e.lastChild(t))[0]-1:e.nodeBBox(e.lastChild(t)).left-e.nodeBBox(e.firstChild(t)).left-2,lineWidth:1e6,scale:Math.max(this.options.minScale,this.options.matchFontHeight?n/this.font.params.x_height/r:1)}},d.prototype.styleSheet=function(t){var e,r;this.setDocument(t),this.cssStyles.clear(),this.cssStyles.addStyles(this.constructor.commonStyles);try{for(var n=w(this.factory.getKinds()),i=n.next();!i.done;i=n.next()){var o=i.value;this.addClassStyles(this.factory.getNodeClass(o))}}catch(t){e={error:t}}finally{try{i&&!i.done&&(r=n.return)&&r.call(n)}finally{if(e)throw e.error}}return this.cssStyles.addStyles(this.font.styles),this.html("style",{id:"MJX-styles"},[this.text("\n"+this.cssStyles.cssText+"\n")])},d.prototype.addClassStyles=function(t){this.cssStyles.addStyles(t.styles)},d.prototype.setDocument=function(t){t&&(this.document=t,this.adaptor.document=t.document)},d.prototype.html=function(t,e,r,n){return void 0===e&&(e={}),void 0===r&&(r=[]),this.adaptor.node(t,e,r,n)},d.prototype.text=function(t){return this.adaptor.text(t)},d.prototype.fixed=function(t,e){return void 0===e&&(e=3),Math.abs(t)<6e-4?"0":t.toFixed(e).replace(/\.?0+$/,"")},d.prototype.measureText=function(t,e,r){void 0===r&&(r=["",!1,!1]);var n=this.unknownText(t,e);if("-explicitFont"===e){var i=this.cssFontStyles(r);this.adaptor.setAttributes(n,{style:i})}return this.measureTextNodeWithCache(n,t,e,r)},d.prototype.measureTextNodeWithCache=function(t,e,r,n){void 0===n&&(n=["",!1,!1]),"-explicitFont"===r&&(r=[n[0],n[1]?"T":"F",n[2]?"T":"F",""].join("-")),this.unknownCache.has(r)||this.unknownCache.set(r,new Map);var i=this.unknownCache.get(r),o=i.get(e);if(o)return o;var a=this.measureTextNode(t);return i.set(e,a),a},d.prototype.cssFontStyles=function(t,e){void 0===e&&(e={});var r=s(t,3),n=r[0],i=r[1],o=r[2];return e["font-family"]=n,i&&(e["font-style"]="italic"),o&&(e["font-weight"]="bold"),e},d.prototype.getFontData=function(t){return[(t=t||new f.Styles).get("font-family"),"italic"===t.get("font-style"),"bold"===t.get("font-weight")]},d.NAME="Common",d.OPTIONS=o(o({},a.AbstractOutputJax.OPTIONS),{scale:1,minScale:.5,matchFontHeight:!0,mtextInheritFont:!1,merrorInheritFont:!0,mathmlSpacing:!1,skipAttributes:{},exFactor:.5,displayAlign:"center",displayIndent:"0",wrapperFactory:null,font:null,cssStyles:null}),d.commonStyles={},d);function d(t,e,r){void 0===t&&(t=null),void 0===e&&(e=null),void 0===r&&(r=null);var n=this,i=s(l.separateOptions(t,r.OPTIONS),2),o=i[0],a=i[1];return(n=c.call(this,o)||this).factory=n.options.wrapperFactory||new e,(n.factory.jax=n).cssStyles=n.options.cssStyles||new u.CssStyles,n.font=n.options.font||new r(a),n.unknownCache=new Map,n}e.CommonOutputJax=p},function(t,e,r){"use strict";var l=this&&this.__values||function(t){var e="function"==typeof Symbol&&Symbol.iterator,r=e&&t[e],n=0;if(r)return r.call(t);if(t&&"number"==typeof t.length)return{next:function(){return t&&n>=t.length&&(t=void 0),{value:t&&t[n++],done:!t}}};throw new TypeError(e?"Object is not iterable.":"Symbol.iterator is not defined.")};Object.defineProperty(e,"__esModule",{value:!0});var n=(Object.defineProperty(i.prototype,"cssText",{get:function(){return this.getStyleString()},enumerable:!0,configurable:!0}),i.prototype.addStyles=function(t){var e,r;if(t)try{for(var n=l(Object.keys(t)),i=n.next();!i.done;i=n.next()){var o=i.value;this.styles[o]||(this.styles[o]={}),Object.assign(this.styles[o],t[o])}}catch(t){e={error:t}}finally{try{i&&!i.done&&(r=n.return)&&r.call(n)}finally{if(e)throw e.error}}},i.prototype.removeStyles=function(){for(var e,t,r=[],n=0;n=t.length&&(t=void 0),{value:t&&t[n++],done:!t}}};throw new TypeError(e?"Object is not iterable.":"Symbol.iterator is not defined.")},o=this&&this.__read||function(t,e){var r="function"==typeof Symbol&&t[Symbol.iterator];if(!r)return t;var n,i,o=r.call(t),a=[];try{for(;(void 0===e||0=t.length&&(t=void 0),{value:t&&t[n++],done:!t}}};throw new TypeError(e?"Object is not iterable.":"Symbol.iterator is not defined.")};Object.defineProperty(e,"__esModule",{value:!0});var f,o=r(2),d=r(135),a=r(26),p=r(129),s=(f=d.CommonMoMixin(o.CHTMLWrapper),i(c,f),c.prototype.toCHTML=function(t){var e,r,n=this.node.attributes,i=n.get("symmetric")&&2!==this.stretch.dir,o=0!==this.stretch.dir;o&&null===this.size&&this.getStretchedVariant([]);var a=this.standardCHTMLnode(t);if(this.noIC&&this.adaptor.setAttribute(a,"noIC","true"),o&&this.size<0)this.stretchHTML(a,i);else{if(i||n.get("largeop")){var s=p.BBox.empty();f.prototype.computeBBox.call(this,s);var c=this.em((s.d-s.h)/2+this.font.params.axis_height);"0"!==c&&this.adaptor.setStyle(a,"verticalAlign",c)}try{for(var l=h(this.childNodes),u=l.next();!u.done;u=l.next())u.value.toCHTML(a)}catch(t){e={error:t}}finally{try{u&&!u.done&&(r=l.return)&&r.call(l)}finally{if(e)throw e.error}}}},c.prototype.stretchHTML=function(t,e){var r=this.getText().charCodeAt(0),n=this.stretch;n.used=!0;var i=n.stretch,o=[];i[0]&&o.push(this.html("mjx-beg",{},[this.html("mjx-c")])),o.push(this.html("mjx-ext",{},[this.html("mjx-c")])),4===i.length&&o.push(this.html("mjx-mid",{},[this.html("mjx-c")]),this.html("mjx-ext",{},[this.html("mjx-c")])),i[2]&&o.push(this.html("mjx-end",{},[this.html("mjx-c")]));var a={},s=this.bbox,c=s.h,l=s.d,u=s.w;1===n.dir?(o.push(this.html("mjx-mark")),a.height=this.em(c+l),a.verticalAlign=this.em(-l)):a.width=this.em(u);var h=d.DirectionVH[n.dir],f={class:this.char(n.c||r),style:a},p=this.html("mjx-stretchy-"+h,f,o);this.adaptor.append(t,p)},c.kind=a.MmlMo.prototype.kind,c.styles={"mjx-stretchy-h":{display:"inline-table",width:"100%"},"mjx-stretchy-h > *":{display:"table-cell",width:0},"mjx-stretchy-h > * > mjx-c":{display:"inline-block"},"mjx-stretchy-h > * > mjx-c::before":{padding:".001em 0",width:"initial"},"mjx-stretchy-h > mjx-ext":{overflow:"hidden",width:"100%"},"mjx-stretchy-h > mjx-ext > mjx-c::before":{transform:"scalex(500)"},"mjx-stretchy-h > mjx-ext > mjx-c":{width:0},"mjx-stretchy-h > mjx-beg > mjx-c":{"margin-right":"-.1em"},"mjx-stretchy-h > mjx-end > mjx-c":{"margin-left":"-.1em"},"mjx-stretchy-v":{display:"inline-block"},"mjx-stretchy-v > *":{display:"block"},"mjx-stretchy-v > mjx-beg":{height:0},"mjx-stretchy-v > mjx-end > mjx-c":{display:"block"},"mjx-stretchy-v > * > mjx-c":{transform:"scale(1)","transform-origin":"left center",overflow:"hidden"},"mjx-stretchy-v > mjx-ext":{display:"block",height:"100%","box-sizing":"border-box",border:"0px solid transparent",overflow:"hidden"},"mjx-stretchy-v > mjx-ext > mjx-c::before":{width:"initial"},"mjx-stretchy-v > mjx-ext > mjx-c":{transform:"scaleY(500) translateY(.1em)",overflow:"visible"},"mjx-mark":{display:"inline-block",height:"0px"}},c);function c(){return null!==f&&f.apply(this,arguments)||this}e.CHTMLmo=s},function(t,e,r){"use strict";var n,i,o=this&&this.__extends||(n=function(t,e){return(n=Object.setPrototypeOf||{__proto__:[]}instanceof Array&&function(t,e){t.__proto__=e}||function(t,e){for(var r in e)e.hasOwnProperty(r)&&(t[r]=e[r])})(t,e)},function(t,e){function r(){this.constructor=t}n(t,e),t.prototype=null===e?Object.create(e):(r.prototype=e.prototype,new r)}),m=this&&this.__read||function(t,e){var r="function"==typeof Symbol&&t[Symbol.iterator];if(!r)return t;var n,i,o=r.call(t),a=[];try{for(;(void 0===e||0=t.length&&(t=void 0),{value:t&&t[n++],done:!t}}};throw new TypeError(e?"Object is not iterable.":"Symbol.iterator is not defined.")};Object.defineProperty(e,"__esModule",{value:!0});var s=r(23);e.DirectionVH=((i={})[1]="v",i[2]="h",i),e.CommonMoMixin=function(t){return o(e,i=t),e.prototype.computeBBox=function(t,e){void 0===e&&(e=!1);var r=0!==this.stretch.dir;if(r&&null===this.size&&this.getStretchedVariant([0]),!(r&&this.size<0)&&(i.prototype.computeBBox.call(this,t),this.copySkewIC(t),this.noIC&&(t.w-=t.ic),this.node.attributes.get("symmetric")&&2!==this.stretch.dir)){var n=(t.h+t.d)/2+this.font.params.axis_height-t.h;t.h+=n,t.d-=n}},e.prototype.getVariant=function(){this.node.attributes.get("largeop")?this.variant=this.node.attributes.get("displaystyle")?"-largeop":"-smallop":i.prototype.getVariant.call(this)},e.prototype.canStretch=function(t){if(0!==this.stretch.dir)return this.stretch.dir===t;if(!this.node.attributes.get("stretchy"))return!1;var e=this.getText();if(1!==e.length)return!1;var r=this.font.getDelimiter(e.charCodeAt(0));return this.stretch=r&&r.dir===t?r:s.NOSTRETCH,0!==this.stretch.dir},e.prototype.getStretchedVariant=function(t,e){var r,n;if(void 0===e&&(e=!1),0!==this.stretch.dir){var i=this.getWH(t),o=this.getSize("minsize",0),a=this.getSize("maxsize",1/0);i=Math.max(o,Math.min(a,i));var s=o||e?i:Math.max(i*this.font.params.delimiterfactor/1e3,i-this.font.params.delimitershortfall),c=this.stretch,l=c.c||this.getText().charCodeAt(0),u=0;if(c.sizes)try{for(var h=p(c.sizes),f=h.next();!f.done;f=h.next()){if(s<=f.value)return this.variant=this.font.getSizeVariant(l,u),void(this.size=u);u++}}catch(t){r={error:t}}finally{try{f&&!f.done&&(n=h.return)&&n.call(h)}finally{if(r)throw r.error}}c.stretch?(this.size=-1,this.invalidateBBox(),this.getStretchBBox(t,i,c)):(this.variant=this.font.getSizeVariant(l,u-1),this.size=u-1)}},e.prototype.getSize=function(t,e){var r=this.node.attributes;return r.isSet(t)&&(e=this.length2em(r.get(t),1,1)),e},e.prototype.getWH=function(t){if(0===t.length)return 0;if(1===t.length)return t[0];var e=m(t,2),r=e[0],n=e[1],i=this.font.params.axis_height;return this.node.attributes.get("symmetric")?2*Math.max(r-i,n+i):r+n},e.prototype.getStretchBBox=function(t,e,r){var n;r.hasOwnProperty("min")&&r.min>e&&(e=r.min);var i=m(r.HDW,3),o=i[0],a=i[1],s=i[2];1===this.stretch.dir?(o=(n=m(this.getBaseline(t,e,r),2))[0],a=n[1]):s=e,this.bbox.h=o,this.bbox.d=a,this.bbox.w=s},e.prototype.getBaseline=function(t,e,r){var n=2===t.length&&t[0]+t[1]===e,i=this.node.attributes.get("symmetric"),o=m(n?t:[e,0],2),a=o[0],s=o[1],c=m([a+s,0],2),l=c[0],u=c[1];if(i){var h=this.font.params.axis_height;n&&(l=2*Math.max(a-h,s+h)),u=l/2-h}else if(n)u=s;else{var f=m(r.HDW||[.75,.25],2),p=f[0],d=f[1];u=d*(l/(p+d))}return[l-u,u]},e.prototype.remapChars=function(t){if(1==t.length){var e=this.node.parent,r=this.isAccent&&(e===this.node.coreParent()||e.isEmbellished)?"accent":"mo",n=this.font.getRemappedChar(r,t[0]);n&&(t=this.unicodeChars(n))}return t},e;function e(){for(var t=[],e=0;e=t.length&&(t=void 0),{value:t&&t[n++],done:!t}}};throw new TypeError(e?"Object is not iterable.":"Symbol.iterator is not defined.")};Object.defineProperty(e,"__esModule",{value:!0});var o,a=r(2),s=r(145),c=r(56),l=(o=s.CommonMpaddedMixin(a.CHTMLWrapper),i(u,o),u.prototype.toCHTML=function(t){var e,r,n=this.standardCHTMLnode(t),i=[],o={},a=v(this.getDimens(),9),s=(a[0],a[1],a[2]),c=a[3],l=a[4],u=a[5],h=a[6],f=a[7],p=a[8];if(u&&(o.width=this.em(s+u)),(c||l)&&(o.margin=this.em(c)+" 0 "+this.em(l)),h+p||f){o.position="relative";var d=this.html("mjx-rbox",{style:{left:this.em(h+p),top:this.em(-f)}});h+p&&this.childNodes[0].getBBox().pwidth&&(this.adaptor.setAttribute(d,"width","full"),this.adaptor.setStyle(d,"left",this.em(h))),i.push(d)}n=this.adaptor.append(n,this.html("mjx-block",{style:o},i));try{for(var m=b(this.childNodes),y=m.next();!y.done;y=m.next())y.value.toCHTML(i[0]||n)}catch(t){e={error:t}}finally{try{y&&!y.done&&(r=m.return)&&r.call(m)}finally{if(e)throw e.error}}},u.kind=c.MmlMpadded.prototype.kind,u.styles={"mjx-mpadded":{display:"inline-block"},"mjx-rbox":{display:"inline-block",position:"relative"}},u);function u(){return null!==o&&o.apply(this,arguments)||this}e.CHTMLmpadded=l},function(t,e,r){"use strict";var n,i=this&&this.__extends||(n=function(t,e){return(n=Object.setPrototypeOf||{__proto__:[]}instanceof Array&&function(t,e){t.__proto__=e}||function(t,e){for(var r in e)e.hasOwnProperty(r)&&(t[r]=e[r])})(t,e)},function(t,e){function r(){this.constructor=t}n(t,e),t.prototype=null===e?Object.create(e):(r.prototype=e.prototype,new r)}),l=this&&this.__read||function(t,e){var r="function"==typeof Symbol&&t[Symbol.iterator];if(!r)return t;var n,i,o=r.call(t),a=[];try{for(;(void 0===e||0=t.length&&(t=void 0),{value:t&&t[n++],done:!t}}};throw new TypeError(e?"Object is not iterable.":"Symbol.iterator is not defined.")},m=this&&this.__read||function(t,e){var r="function"==typeof Symbol&&t[Symbol.iterator];if(!r)return t;var n,i,o=r.call(t),a=[];try{for(;(void 0===e||0 mjx-dstrike":{display:"inline-block",left:0,top:0,position:"absolute","border-top":b.SOLID,"transform-origin":"top left"},"mjx-menclose > mjx-ustrike":{display:"inline-block",left:0,bottom:0,position:"absolute","border-top":b.SOLID,"transform-origin":"bottom left"},"mjx-menclose > mjx-hstrike":{"border-top":b.SOLID,position:"absolute",left:0,right:0,bottom:"50%",transform:"translateY("+c.em(b.THICKNESS/2)+")"},"mjx-menclose > mjx-vstrike":{"border-left":b.SOLID,position:"absolute",top:0,bottom:0,right:"50%",transform:"translateX("+c.em(b.THICKNESS/2)+")"},"mjx-menclose > mjx-rbox":{position:"absolute",top:0,bottom:0,right:0,left:0,border:b.SOLID,"border-radius":c.em(b.THICKNESS+b.PADDING)},"mjx-menclose > mjx-cbox":{position:"absolute",top:0,bottom:0,right:0,left:0,border:b.SOLID,"border-radius":"50%"},"mjx-menclose > mjx-arrow":{position:"absolute",left:0,bottom:"50%",height:0,width:0},"mjx-menclose > mjx-arrow > *":{display:"block",position:"absolute","transform-origin":"bottom","border-left":c.em(b.THICKNESS*b.ARROWX)+" solid","border-right":0,"box-sizing":"border-box"},"mjx-menclose > mjx-arrow > mjx-aline":{left:0,top:c.em(-b.THICKNESS/2),right:c.em(b.THICKNESS*(b.ARROWX-1)),height:0,"border-top":c.em(b.THICKNESS)+" solid","border-left":0},"mjx-menclose > mjx-arrow[double] > mjx-aline":{left:c.em(b.THICKNESS*(b.ARROWX-1)),height:0},"mjx-menclose > mjx-arrow > mjx-rthead":{transform:"skewX("+u+"rad)",right:0,bottom:"-1px","border-bottom":"1px solid transparent","border-top":c.em(b.THICKNESS*b.ARROWY)+" solid transparent"},"mjx-menclose > mjx-arrow > mjx-rbhead":{transform:"skewX(-"+u+"rad)","transform-origin":"top",right:0,top:"-1px","border-top":"1px solid transparent","border-bottom":c.em(b.THICKNESS*b.ARROWY)+" solid transparent"},"mjx-menclose > mjx-arrow > mjx-lthead":{transform:"skewX(-"+u+"rad)",left:0,bottom:"-1px","border-left":0,"border-right":c.em(b.THICKNESS*b.ARROWX)+" solid","border-bottom":"1px solid transparent","border-top":c.em(b.THICKNESS*b.ARROWY)+" solid transparent"},"mjx-menclose > mjx-arrow > mjx-lbhead":{transform:"skewX("+u+"rad)","transform-origin":"top",left:0,top:"-1px","border-left":0,"border-right":c.em(b.THICKNESS*b.ARROWX)+" solid","border-top":"1px solid transparent","border-bottom":c.em(b.THICKNESS*b.ARROWY)+" solid transparent"},"mjx-menclose > dbox":{position:"absolute",top:0,bottom:0,left:c.em(-1.5*b.PADDING),width:c.em(3*b.PADDING),border:c.em(b.THICKNESS)+" solid","border-radius":"50%","clip-path":"inset(0 0 0 "+c.em(1.5*b.PADDING)+")","box-sizing":"border-box"}},f.notations=new Map([b.Border("top"),b.Border("right"),b.Border("bottom"),b.Border("left"),b.Border2("actuarial","top","right"),b.Border2("madruwb","bottom","right"),b.DiagonalStrike("up",1),b.DiagonalStrike("down",-1),["horizontalstrike",{renderer:b.RenderElement("hstrike","Y"),bbox:function(t){return[0,t.padding,0,t.padding]}}],["verticalstrike",{renderer:b.RenderElement("vstrike","X"),bbox:function(t){return[t.padding,0,t.padding,0]}}],["box",{renderer:function(t,e){t.adaptor.setStyle(e,"border",t.em(t.thickness)+" solid")},bbox:b.fullBBox,border:b.fullBorder,remove:"left right top bottom"}],["roundedbox",{renderer:b.RenderElement("rbox"),bbox:b.fullBBox}],["circle",{renderer:b.RenderElement("cbox"),bbox:b.fullBBox}],["phasorangle",{renderer:function(t,e){var r=t.getBBox(),n=(r.w,r.h),i=r.d,o=m(t.getArgMod(1.75*t.padding,n+i),2),a=o[0],s=o[1],c=t.thickness*Math.sin(a)*.9;t.adaptor.setStyle(e,"border-bottom",t.em(t.thickness)+" solid");var l=t.adjustBorder(t.html("mjx-ustrike",{style:{width:t.em(s),transform:"translateX("+t.em(c)+") rotate("+t.fixed(-a)+"rad)"}}));t.adaptor.append(t.chtml,l)},bbox:function(t){var e=t.padding/2,r=t.thickness;return[2*e,e,e+r,3*e+r]},border:function(t){return[0,0,t.thickness,0]},remove:"bottom"}],b.Arrow("up"),b.Arrow("down"),b.Arrow("left"),b.Arrow("right"),b.Arrow("updown"),b.Arrow("leftright"),b.DiagonalArrow("updiagonal"),b.DiagonalArrow("northeast"),b.DiagonalArrow("southeast"),b.DiagonalArrow("northwest"),b.DiagonalArrow("southwest"),b.DiagonalArrow("northeastsouthwest"),b.DiagonalArrow("northwestsoutheast"),["longdiv",{renderer:function(t,e){var r=t.adaptor;r.setStyle(e,"border-top",t.em(t.thickness)+" solid");var n=r.append(t.chtml,t.html("dbox")),i=t.thickness,o=t.padding;i!==b.THICKNESS&&r.setStyle(n,"border-width",t.em(i)),o!==b.PADDING&&(r.setStyle(n,"left",t.em(-1.5*o)),r.setStyle(n,"width",t.em(3*o)),r.setStyle(n,"clip-path","inset(0 0 0 "+t.em(1.5*o)+")"))},bbox:function(t){var e=t.padding,r=t.thickness;return[e+r,e,e,2*e+r/2]}}],["radical",{renderer:function(e,t){e.msqrt.toCHTML(t);var r=e.sqrtTRBL();e.adaptor.setStyle(e.msqrt.chtml,"margin",r.map(function(t){return e.em(-t)}).join(" "))},init:function(t){t.msqrt=t.createMsqrt(t.childNodes[0])},bbox:function(t){return t.sqrtTRBL()},renderChild:!0}]]),f);function f(){return null!==l&&l.apply(this,arguments)||this}e.CHTMLmenclose=h},function(t,e,r){"use strict";var n,i=this&&this.__extends||(n=function(t,e){return(n=Object.setPrototypeOf||{__proto__:[]}instanceof Array&&function(t,e){t.__proto__=e}||function(t,e){for(var r in e)e.hasOwnProperty(r)&&(t[r]=e[r])})(t,e)},function(t,e){function r(){this.constructor=t}n(t,e),t.prototype=null===e?Object.create(e):(r.prototype=e.prototype,new r)}),f=this&&this.__read||function(t,e){var r="function"==typeof Symbol&&t[Symbol.iterator];if(!r)return t;var n,i,o=r.call(t),a=[];try{for(;(void 0===e||0=t.length&&(t=void 0),{value:t&&t[n++],done:!t}}};throw new TypeError(e?"Object is not iterable.":"Symbol.iterator is not defined.")};Object.defineProperty(e,"__esModule",{value:!0});var c=r(36),l=r(10);e.CommonMencloseMixin=function(t){return i(e,n=t),e.prototype.getParameters=function(){var t=this.node.attributes,e=t.get("data-padding");void 0!==e&&(this.padding=this.length2em(e,c.PADDING));var r=t.get("data-thickness");void 0!==r&&(this.thickness=this.length2em(r,c.THICKNESS));var n=t.get("data-arrowhead");if(void 0!==n){var i=f(l.split(n),3),o=i[0],a=i[1],s=i[2];this.arrowhead={x:o?parseFloat(o):c.ARROWX,y:a?parseFloat(a):c.ARROWY,dx:s?parseFloat(s):c.ARROWDX}}},e.prototype.getNotations=function(){var e,t,r=this.constructor.notations;try{for(var n=h(l.split(this.node.attributes.get("notation"))),i=n.next();!i.done;i=n.next()){var o=i.value,a=r.get(o);a&&(this.notations[o]=a).renderChild&&(this.renderChild=a.renderer)}}catch(t){e={error:t}}finally{try{i&&!i.done&&(t=n.return)&&t.call(n)}finally{if(e)throw e.error}}},e.prototype.removeRedundantNotations=function(){var e,t,r,n;try{for(var i=h(Object.keys(this.notations)),o=i.next();!o.done;o=i.next()){var a=o.value;if(this.notations[a]){var s=this.notations[a].remove||"";try{for(var c=(r=void 0,h(s.split(/ /))),l=c.next();!l.done;l=c.next()){var u=l.value;delete this.notations[u]}}catch(t){r={error:t}}finally{try{l&&!l.done&&(n=c.return)&&n.call(c)}finally{if(r)throw r.error}}}}}catch(t){e={error:t}}finally{try{o&&!o.done&&(t=i.return)&&t.call(i)}finally{if(e)throw e.error}}},e.prototype.initializeNotations=function(){var e,t;try{for(var r=h(Object.keys(this.notations)),n=r.next();!n.done;n=r.next()){var i=n.value,o=this.notations[i].init;o&&o(this)}}catch(t){e={error:t}}finally{try{n&&!n.done&&(t=r.return)&&t.call(r)}finally{if(e)throw e.error}}},e.prototype.computeBBox=function(t,e){void 0===e&&(e=!1);var r=f(this.getBBoxExtenders(),4),n=r[0],i=r[1],o=r[2],a=r[3],s=this.childNodes[0].getBBox();t.combine(s,a,0),t.h+=n,t.d+=o,t.w+=i,this.setChildPWidths(e)},e.prototype.getBBoxExtenders=function(){var e,t,r=[0,0,0,0];try{for(var n=h(Object.keys(this.notations)),i=n.next();!i.done;i=n.next()){var o=i.value;this.maximizeEntries(r,this.notations[o].bbox(this))}}catch(t){e={error:t}}finally{try{i&&!i.done&&(t=n.return)&&t.call(n)}finally{if(e)throw e.error}}return r},e.prototype.getPadding=function(){var e,t,r=[0,0,0,0],n=[0,0,0,0];try{for(var i=h(Object.keys(this.notations)),o=i.next();!o.done;o=i.next()){var a=o.value;this.maximizeEntries(r,this.notations[a].bbox(this));var s=this.notations[a].border;s&&this.maximizeEntries(n,s(this))}}catch(t){e={error:t}}finally{try{o&&!o.done&&(t=i.return)&&t.call(i)}finally{if(e)throw e.error}}return[0,1,2,3].map(function(t){return r[t]-n[t]})},e.prototype.maximizeEntries=function(t,e){for(var r=0;r=t.length&&(t=void 0),{value:t&&t[n++],done:!t}}};throw new TypeError(e?"Object is not iterable.":"Symbol.iterator is not defined.")};Object.defineProperty(e,"__esModule",{value:!0});var o,a=r(2),s=r(73),c=r(73),u=r(52),h=(o=s.CommonMrowMixin(a.CHTMLWrapper),i(f,o),f.prototype.toCHTML=function(t){var e,r,n=this.node.isInferred?this.chtml=t:this.standardCHTMLnode(t),i=!1;try{for(var o=l(this.childNodes),a=o.next();!a.done;a=o.next()){var s=a.value;s.toCHTML(n),s.bbox.w<0&&(i=!0)}}catch(t){e={error:t}}finally{try{a&&!a.done&&(r=o.return)&&r.call(o)}finally{if(e)throw e.error}}if(i){var c=this.getBBox().w;c&&(this.adaptor.setStyle(n,"width",this.em(Math.max(0,c))),c<0&&this.adaptor.setStyle(n,"marginRight",this.em(c)))}},f.kind=u.MmlMrow.prototype.kind,f);function f(){return null!==o&&o.apply(this,arguments)||this}e.CHTMLmrow=h;var p,d=(p=c.CommonInferredMrowMixin(h),i(m,p),m.kind=u.MmlInferredMrow.prototype.kind,m);function m(){return null!==p&&p.apply(this,arguments)||this}e.CHTMLinferredMrow=d},function(t,e,r){"use strict";var n,i=this&&this.__extends||(n=function(t,e){return(n=Object.setPrototypeOf||{__proto__:[]}instanceof Array&&function(t,e){t.__proto__=e}||function(t,e){for(var r in e)e.hasOwnProperty(r)&&(t[r]=e[r])})(t,e)},function(t,e){function r(){this.constructor=t}n(t,e),t.prototype=null===e?Object.create(e):(r.prototype=e.prototype,new r)});Object.defineProperty(e,"__esModule",{value:!0});var o,a=r(2),s=r(151),c=r(57),l=(o=s.CommonMfencedMixin(a.CHTMLWrapper),i(u,o),u.prototype.toCHTML=function(t){var e=this.standardCHTMLnode(t);this.mrow.toCHTML(e)},u.kind=c.MmlMfenced.prototype.kind,u);function u(){return null!==o&&o.apply(this,arguments)||this}e.CHTMLmfenced=l},function(t,e,r){"use strict";var n,i=this&&this.__extends||(n=function(t,e){return(n=Object.setPrototypeOf||{__proto__:[]}instanceof Array&&function(t,e){t.__proto__=e}||function(t,e){for(var r in e)e.hasOwnProperty(r)&&(t[r]=e[r])})(t,e)},function(t,e){function r(){this.constructor=t}n(t,e),t.prototype=null===e?Object.create(e):(r.prototype=e.prototype,new r)}),o=this&&this.__read||function(t,e){var r="function"==typeof Symbol&&t[Symbol.iterator];if(!r)return t;var n,i,o=r.call(t),a=[];try{for(;(void 0===e||0=t.length&&(t=void 0),{value:t&&t[n++],done:!t}}};throw new TypeError(e?"Object is not iterable.":"Symbol.iterator is not defined.")};Object.defineProperty(e,"__esModule",{value:!0}),e.CommonMfencedMixin=function(t){return i(e,n=t),e.prototype.createMrow=function(){var t=this.node.factory.create("inferredMrow");t.inheritAttributesFrom(this.node),this.mrow=this.wrap(t),this.mrow.parent=this},e.prototype.addMrowChildren=function(){var e,t,r=this.node,n=this.mrow;this.addMo(r.open),this.childNodes.length&&n.childNodes.push(this.childNodes[0]);var i=0;try{for(var o=c(this.childNodes.slice(1)),a=o.next();!a.done;a=o.next()){var s=a.value;this.addMo(r.separators[i++]),n.childNodes.push(s)}}catch(t){e={error:t}}finally{try{a&&!a.done&&(t=o.return)&&t.call(o)}finally{if(e)throw e.error}}this.addMo(r.close),n.stretchChildren()},e.prototype.addMo=function(t){if(t){var e=this.wrap(t);this.mrow.childNodes.push(e),e.parent=this.mrow}},e.prototype.computeBBox=function(t,e){void 0===e&&(e=!1),t.updateFrom(this.mrow.getBBox()),this.setChildPWidths(e)},e;function e(){for(var t=[],e=0;e *":{"font-size":"2000%"},"mjx-dbox":{display:"block","font-size":"5%"},"mjx-num":{display:"block","text-align":"center"},"mjx-den":{display:"block","text-align":"center"},"mjx-mfrac[bevelled] > mjx-num":{display:"inline-block"},"mjx-mfrac[bevelled] > mjx-den":{display:"inline-block"},'mjx-den[align="right"], mjx-num[align="right"]':{"text-align":"right"},'mjx-den[align="left"], mjx-num[align="left"]':{"text-align":"left"},"mjx-nstrut":{display:"inline-block",height:".054em",width:0,"vertical-align":"-.054em"},'mjx-nstrut[type="d"]':{height:".217em","vertical-align":"-.217em"},"mjx-dstrut":{display:"inline-block",height:".505em",width:0},'mjx-dstrut[type="d"]':{height:".726em"},"mjx-line":{display:"block","box-sizing":"border-box","min-height":"1px",height:".06em","border-top":".06em solid",margin:".06em -.1em",overflow:"hidden"},'mjx-line[type="d"]':{margin:".18em -.1em"}},u);function u(){return null!==o&&o.apply(this,arguments)||this}e.CHTMLmfrac=l},function(t,e,r){"use strict";var n,i=this&&this.__extends||(n=function(t,e){return(n=Object.setPrototypeOf||{__proto__:[]}instanceof Array&&function(t,e){t.__proto__=e}||function(t,e){for(var r in e)e.hasOwnProperty(r)&&(t[r]=e[r])})(t,e)},function(t,e){function r(){this.constructor=t}n(t,e),t.prototype=null===e?Object.create(e):(r.prototype=e.prototype,new r)}),l=this&&this.__read||function(t,e){var r="function"==typeof Symbol&&t[Symbol.iterator];if(!r)return t;var n,i,o=r.call(t),a=[];try{for(;(void 0===e||0this.surdH?(t.h+t.d-(this.surdH-e))/2:e+r/4]},e.prototype.getRootDimens=function(t){return[0,0,0,0]},e;function e(){for(var t=[],e=0;e=t.length&&(t=void 0),{value:t&&t[n++],done:!t}}};throw new TypeError(e?"Object is not iterable.":"Symbol.iterator is not defined.")};Object.defineProperty(e,"__esModule",{value:!0});var a,s=r(2),c=r(158),l=(a=c.CommonScriptbaseMixin(s.CHTMLWrapper),i(h,a),h.prototype.toCHTML=function(t){this.chtml=this.standardCHTMLnode(t);var e=o(this.getOffset(this.baseChild.getBBox(),this.script.getBBox()),2),r=e[0],n=e[1],i={"vertical-align":this.em(n)};r&&(i["margin-left"]=this.em(r)),this.baseChild.toCHTML(this.chtml),this.script.toCHTML(this.adaptor.append(this.chtml,this.html("mjx-script",{style:i})))},h.prototype.setDeltaW=function(t,e){for(var r=0;r=t.length&&(t=void 0),{value:t&&t[n++],done:!t}}};throw new TypeError(e?"Object is not iterable.":"Symbol.iterator is not defined.")};Object.defineProperty(e,"__esModule",{value:!0});var a=1.5;e.CommonScriptbaseMixin=function(t){var e,i;return o(r,i=t),Object.defineProperty(r.prototype,"baseChild",{get:function(){return this.childNodes[this.node.base]},enumerable:!0,configurable:!0}),Object.defineProperty(r.prototype,"script",{get:function(){return this.childNodes[1]},enumerable:!0,configurable:!0}),r.prototype.computeBBox=function(t,e){void 0===e&&(e=!1);var r=this.baseChild.getBBox(),n=this.script.getBBox(),i=s(this.getOffset(r,n),2),o=i[0],a=i[1];t.append(r),t.combine(n,t.w+o,a),t.w+=this.font.params.scriptspace,t.clean(),this.setChildPWidths(e)},r.prototype.coreIC=function(){var t=this.baseCore.getBBox();return t.ic?1.2*t.ic+.05:0},r.prototype.isCharBase=function(){var t=this.baseChild;return(t.node.isKind("mstyle")||t.node.isKind("mrow"))&&1===t.childNodes.length&&(t=t.childNodes[0]),(t.node.isKind("mo")||t.node.isKind("mi")||t.node.isKind("mn"))&&1===t.bbox.rscale&&1===t.getText().length&&!t.node.attributes.get("largeop")},r.prototype.getOffset=function(t,e){return[0,0]},r.prototype.getV=function(t,e){var r=this.font.params,n=this.length2em(this.node.attributes.get("subscriptshift"),r.sub1);return Math.max(this.isCharBase()?0:t.d+r.sub_drop*e.rscale,n,e.h*e.rscale-.8*r.x_height)},r.prototype.getU=function(t,e){var r=this.font.params,n=this.node.attributes.getList("displaystyle","texprimestyle","superscriptshift"),i=n.displaystyle?r.sup1:n.texprimestyle?r.sup3:r.sup2,o=this.length2em(n.superscriptshift,i);return Math.max(this.isCharBase()?0:t.h-r.sup_drop*e.rscale,o,e.d*e.rscale+.25*r.x_height)},r.prototype.hasMovableLimits=function(){return!this.node.attributes.get("displaystyle")&&(this.node.getProperty("movablelimits")||this.node.attributes.get("movablelimits")||this.baseChild.coreMO().node.attributes.get("movablelimits"))},r.prototype.getOverKU=function(t,e){var r=this.node.attributes.get("accent"),n=this.font.params,i=e.d*e.rscale,o=(r?n.rule_thickness:Math.max(n.big_op_spacing1,n.big_op_spacing3-Math.max(0,i)))-(this.baseChild.node.isKind("munderover")?.1:0);return[o,t.h*t.rscale+o+i]},r.prototype.getUnderKV=function(t,e){var r=this.node.attributes.get("accentunder"),n=this.font.params,i=e.h*e.rscale,o=(r?n.rule_thickness:Math.max(n.big_op_spacing2,n.big_op_spacing4-i))-(this.baseChild.node.isKind("munderover")?.1:0);return[o,-(t.d*t.rscale+o+i)]},r.prototype.getDeltaW=function(t,e){var r,n,i,o;void 0===e&&(e=[0,0,0]);var a=this.node.attributes.get("align"),s=t.map(function(t){return t.w*t.rscale}),c=Math.max.apply(Math,y(s)),l=[],u=0;try{for(var h=x(s.keys()),f=h.next();!f.done;f=h.next())l[m=f.value]=("center"===a?(c-s[m])/2:"right"===a?c-s[m]:0)+e[m],l[m] mjx-row":{"text-align":"left"},"mjx-under":{"padding-bottom":".1em"}},f);function f(){return null!==c&&c.apply(this,arguments)||this}e.CHTMLmunder=h;var d,m=(d=s.CommonMoverMixin(o.CHTMLmsup),i(y,d),y.prototype.toCHTML=function(t){if(this.hasMovableLimits())return d.prototype.toCHTML.call(this,t),void this.adaptor.setAttribute(this.chtml,"limits","false");this.chtml=this.standardCHTMLnode(t);var e=this.adaptor.append(this.chtml,this.html("mjx-over")),r=this.adaptor.append(this.chtml,this.html("mjx-base"));this.script.toCHTML(e),this.baseChild.toCHTML(r);var n=this.script.getBBox(),i=this.baseChild.getBBox(),o=p(this.getOverKU(i,n),2),a=o[0],s=(o[1],this.getDelta());this.adaptor.setStyle(e,"paddingBottom",this.em(a)),this.setDeltaW([r,e],this.getDeltaW([i,n],[0,s])),this.adjustOverDepth(e,n)},y.kind=u.MmlMover.prototype.kind,y.useIC=!0,y.styles={'mjx-mover:not([limits="false"])':{"padding-top":".1em"},'mjx-mover:not([limits="false"]) > *':{display:"block","text-align":"left"}},y);function y(){return null!==d&&d.apply(this,arguments)||this}e.CHTMLmover=m;var v,b=(v=l.CommonMunderoverMixin(o.CHTMLmsubsup),i(g,v),g.prototype.toCHTML=function(t){if(this.hasMovableLimits())return v.prototype.toCHTML.call(this,t),void this.adaptor.setAttribute(this.chtml,"limits","false");this.chtml=this.standardCHTMLnode(t);var e=this.adaptor.append(this.chtml,this.html("mjx-over")),r=this.adaptor.append(this.adaptor.append(this.chtml,this.html("mjx-box")),this.html("mjx-munder")),n=this.adaptor.append(this.adaptor.append(r,this.html("mjx-row")),this.html("mjx-base")),i=this.adaptor.append(this.adaptor.append(r,this.html("mjx-row")),this.html("mjx-under"));this.overChild.toCHTML(e),this.baseChild.toCHTML(n),this.underChild.toCHTML(i);var o=this.overChild.getBBox(),a=this.baseChild.getBBox(),s=this.underChild.getBBox(),c=p(this.getOverKU(a,o),2),l=c[0],u=(c[1],p(this.getUnderKV(a,s),2)),h=u[0],f=(u[1],this.getDelta());this.adaptor.setStyle(e,"paddingBottom",this.em(l)),this.adaptor.setStyle(i,"paddingTop",this.em(h)),this.setDeltaW([n,i,e],this.getDeltaW([a,s,o],[0,-f,f])),this.adjustOverDepth(e,o),this.adjustUnderDepth(i,s)},g.kind=u.MmlMunderover.prototype.kind,g.useIC=!0,g.styles={'mjx-munderover:not([limits="false"])':{"padding-top":".1em"},'mjx-munderover:not([limits="false"]) > *':{display:"block"}},g);function g(){return null!==v&&v.apply(this,arguments)||this}e.CHTMLmunderover=b},function(t,e,r){"use strict";var n,i=this&&this.__extends||(n=function(t,e){return(n=Object.setPrototypeOf||{__proto__:[]}instanceof Array&&function(t,e){t.__proto__=e}||function(t,e){for(var r in e)e.hasOwnProperty(r)&&(t[r]=e[r])})(t,e)},function(t,e){function r(){this.constructor=t}n(t,e),t.prototype=null===e?Object.create(e):(r.prototype=e.prototype,new r)}),c=this&&this.__read||function(t,e){var r="function"==typeof Symbol&&t[Symbol.iterator];if(!r)return t;var n,i,o=r.call(t),a=[];try{for(;(void 0===e||0 mjx-row > mjx-cell":{"text-align":"right"}},h);function h(){return null!==o&&o.apply(this,arguments)||this}e.CHTMLmmultiscripts=u},function(t,s,e){"use strict";var n,r=this&&this.__extends||(n=function(t,e){return(n=Object.setPrototypeOf||{__proto__:[]}instanceof Array&&function(t,e){t.__proto__=e}||function(t,e){for(var r in e)e.hasOwnProperty(r)&&(t[r]=e[r])})(t,e)},function(t,e){function r(){this.constructor=t}n(t,e),t.prototype=null===e?Object.create(e):(r.prototype=e.prototype,new r)}),d=this&&this.__read||function(t,e){var r="function"==typeof Symbol&&t[Symbol.iterator];if(!r)return t;var n,i,o=r.call(t),a=[];try{for(;(void 0===e||0=t.length&&(t=void 0),{value:t&&t[n++],done:!t}}};throw new TypeError(e?"Object is not iterable.":"Symbol.iterator is not defined.")};Object.defineProperty(s,"__esModule",{value:!0});var i=e(16);s.NextScript={base:"subList",subList:"supList",supList:"subList",psubList:"psupList",psupList:"psubList"},s.ScriptNames=["sup","sup","psup","psub"],s.CommonMmultiscriptsMixin=function(t){return r(e,c=t),e.prototype.combinePrePost=function(t,e){var r=new i.BBox(t);return r.combine(e,0,0),r},e.prototype.computeBBox=function(t,e){void 0===e&&(e=!1);var r=this.font.params.scriptspace,n=this.getScriptData(),i=this.combinePrePost(n.sub,n.psub),o=this.combinePrePost(n.sup,n.psup),a=d(this.getUVQ(n.base,i,o),2),s=a[0],c=a[1];if(t.empty(),n.numPrescripts&&(t.combine(n.psup,r,s),t.combine(n.psub,r,c)),t.append(n.base),n.numScripts){var l=t.w;t.combine(n.sup,l,s),t.combine(n.sub,l,c),t.w+=r}t.clean(),this.setChildPWidths(e)},e.prototype.getScriptData=function(){if(this.scriptData)return this.scriptData;var t=this.scriptData={base:null,sub:i.BBox.empty(),sup:i.BBox.empty(),psub:i.BBox.empty(),psup:i.BBox.empty(),numPrescripts:0,numScripts:0},e=this.getScriptBBoxLists();return this.combineBBoxLists(t.sub,t.sup,e.subList,e.supList),this.combineBBoxLists(t.psub,t.psup,e.psubList,e.psupList),this.scriptData.base=e.base[0],this.scriptData.numPrescripts=e.psubList.length,this.scriptData.numScripts=e.subList.length,this.scriptData},e.prototype.getScriptBBoxLists=function(){var e,t,r={base:[],subList:[],supList:[],psubList:[],psupList:[]},n="base";try{for(var i=l(this.childNodes),o=i.next();!o.done;o=i.next()){var a=o.value;n=a.node.isKind("mprescripts")?"psubList":(r[n].push(a.getBBox()),s.NextScript[n])}}catch(t){e={error:t}}finally{try{o&&!o.done&&(t=i.return)&&t.call(i)}finally{if(e)throw e.error}}return this.firstPrescript=r.subList.length+r.supList.length+2,this.padLists(r.subList,r.supList),this.padLists(r.psubList,r.psupList),r},e.prototype.padLists=function(t,e){t.length>e.length&&e.push(i.BBox.empty())},e.prototype.combineBBoxLists=function(t,e,r,n){for(var i=0;it.h&&(t.h=s),c>t.d&&(t.d=c),h>e.h&&(e.h=h),f>e.d&&(e.d=f)}},e.prototype.getScaledWHD=function(t){var e=t.w,r=t.h,n=t.d,i=t.rscale;return[e*i,r*i,n*i]},e.prototype.getUVQ=function(t,e,r){var n;if(!this.UVQ){var i=d([0,0,0],3),o=i[0],a=i[1],s=i[2];0===e.h&&0===e.d?o=this.getU(t,r):0===r.h&&0===r.d?o=-this.getV(t,e):(o=(n=d(c.prototype.getUVQ.call(this,t,e,r),3))[0],a=n[1],s=n[2]),this.UVQ=[o,a,s]}return this.UVQ},e;function e(){var t=null!==c&&c.apply(this,arguments)||this;return t.scriptData=null,t.firstPrescript=0,t}var c}},function(t,e,r){"use strict";var n,i=this&&this.__extends||(n=function(t,e){return(n=Object.setPrototypeOf||{__proto__:[]}instanceof Array&&function(t,e){t.__proto__=e}||function(t,e){for(var r in e)e.hasOwnProperty(r)&&(t[r]=e[r])})(t,e)},function(t,e){function r(){this.constructor=t}n(t,e),t.prototype=null===e?Object.create(e):(r.prototype=e.prototype,new r)}),y=this&&this.__values||function(t){var e="function"==typeof Symbol&&Symbol.iterator,r=e&&t[e],n=0;if(r)return r.call(t);if(t&&"number"==typeof t.length)return{next:function(){return t&&n>=t.length&&(t=void 0),{value:t&&t[n++],done:!t}}};throw new TypeError(e?"Object is not iterable.":"Symbol.iterator is not defined.")},u=this&&this.__read||function(t,e){var r="function"==typeof Symbol&&t[Symbol.iterator];if(!r)return t;var n,i,o=r.call(t),a=[];try{for(;(void 0===e||0 mjx-itable":{"vertical-align":"middle","text-align":"left","box-sizing":"border-box"},"mjx-labels > mjx-itable":{position:"absolute",top:0},'mjx-mtable[justify="left"]':{"text-align":"left"},'mjx-mtable[justify="right"]':{"text-align":"right"},'mjx-mtable[justify="left"][side="left"]':{"padding-right":"0 ! important"},'mjx-mtable[justify="left"][side="right"]':{"padding-left":"0 ! important"},'mjx-mtable[justify="right"][side="left"]':{"padding-right":"0 ! important"},'mjx-mtable[justify="right"][side="right"]':{"padding-left":"0 ! important"},"mjx-mtable[align]":{"vertical-align":"baseline"},'mjx-mtable[align="top"] > mjx-table':{"vertical-align":"top"},'mjx-mtable[align="bottom"] > mjx-table':{"vertical-align":"bottom"},'mjx-mtable[align="center"] > mjx-table':{"vertical-align":"middle"},'mjx-mtable[align="baseline"] > mjx-table':{"vertical-align":"middle"}},f);function f(t,e,r){void 0===r&&(r=null);var n=o.call(this,t,e,r)||this;return n.itable=n.html("mjx-itable"),n.labels=n.html("mjx-itable"),n}e.CHTMLmtable=l},function(t,e,r){"use strict";var n,o=this&&this.__extends||(n=function(t,e){return(n=Object.setPrototypeOf||{__proto__:[]}instanceof Array&&function(t,e){t.__proto__=e}||function(t,e){for(var r in e)e.hasOwnProperty(r)&&(t[r]=e[r])})(t,e)},function(t,e){function r(){this.constructor=t}n(t,e),t.prototype=null===e?Object.create(e):(r.prototype=e.prototype,new r)}),y=this&&this.__read||function(t,e){var r="function"==typeof Symbol&&t[Symbol.iterator];if(!r)return t;var n,i,o=r.call(t),a=[];try{for(;(void 0===e||0=t.length&&(t=void 0),{value:t&&t[n++],done:!t}}};throw new TypeError(e?"Object is not iterable.":"Symbol.iterator is not defined.")};Object.defineProperty(e,"__esModule",{value:!0});var s=r(16),v=r(10),b=r(104);e.CommonMtableMixin=function(t){return o(e,i=t),Object.defineProperty(e.prototype,"tableRows",{get:function(){return this.childNodes},enumerable:!0,configurable:!0}),e.prototype.findContainer=function(){for(var t=this,e=t.parent;e&&(e.node.notParent||e.node.isKind("mrow"));)e=(t=e).parent;this.container=e,this.containerI=t.node.childPosition()},e.prototype.getPercentageWidth=function(){if(this.hasLabels)this.bbox.pwidth=s.BBox.fullWidth;else{var t=this.node.attributes.get("width");v.isPercent(t)&&(this.bbox.pwidth=t)}},e.prototype.stretchRows=function(){for(var t=this.node.attributes.get("equalrows"),e=t?this.getEqualRowHeight():0,r=t?this.getTableData():{H:[0],D:[0]},n=r.H,i=r.D,o=this.tableRows,a=0;an[r]&&(n[r]=s),c>i[r]&&(i[r]=c),o&&l>o[e]&&(o[e]=l)},e.prototype.recordPWidthCell=function(t,e){t.childNodes[0]&&t.childNodes[0].getBBox().pwidth&&this.pwidthCells.push([t,e])},e.prototype.computeBBox=function(t,e){void 0===e&&(e=!1);var r,n,i=this.getTableData(),o=i.H,a=i.D;if(this.node.attributes.get("equalrows")){var s=this.getEqualRowHeight();r=b.sum([].concat(this.rLines,this.rSpace))+s*this.numRows}else r=b.sum(o.concat(a,this.rLines,this.rSpace));r+=2*(this.fLine+this.fSpace[1]);var c=this.getComputedWidths();n=b.sum(c.concat(this.cLines,this.cSpace))+2*(this.fLine+this.fSpace[0]);var l=this.node.attributes.get("width");"auto"!==l&&(n=Math.max(this.length2em(l,0)+2*this.fLine,n));var u=y(this.getBBoxHD(r),2),h=u[0],f=u[1];t.h=h,t.d=f,t.w=n;var p=y(this.getBBoxLR(),2),d=p[0],m=p[1];t.L=d,t.R=m,v.isPercent(l)||this.setColumnPWidths()},e.prototype.setChildPWidths=function(t,e,r){var n=this.node.attributes.get("width");if(v.isPercent(n)){this.hasLabels||(this.bbox.pwidth="",this.container.bbox.pwidth="");var i=this.bbox,o=i.w,a=i.L,s=i.R,c=Math.max(o,this.length2em(n,Math.max(e,a+o+s))),l=this.node.attributes.get("equalcolumns")?Array(this.numCols).fill(this.percent(1/Math.max(1,this.numCols))):this.getColumnAttributes("columnwidth",0);this.cWidths=this.getColumnWidthsFixed(l,c);var u=this.getComputedWidths();return this.pWidth=b.sum(u.concat(this.cLines,this.cSpace))+2*(this.fLine+this.fSpace[0]),this.isTop&&(this.bbox.w=this.pWidth),this.setColumnPWidths(),this.pWidth!==o&&this.parent.invalidateBBox(),this.pWidth!==o}},e.prototype.setColumnPWidths=function(){var e,t,r=this.cWidths;try{for(var n=x(this.pwidthCells),i=n.next();!i.done;i=n.next()){var o=y(i.value,2),a=o[0],s=o[1];a.setChildPWidths(!1,r[s])&&(a.invalidateBBox(),a.getBBox())}}catch(t){e={error:t}}finally{try{i&&!i.done&&(t=n.return)&&t.call(n)}finally{if(e)throw e.error}}},e.prototype.getBBoxHD=function(t){var e=y(this.getAlignmentRow(),2),r=e[0],n=e[1];if(null===n){var i=this.font.params.axis_height,o=t/2;return{top:[0,t],center:[o,o],bottom:[t,0],baseline:[o,o],axis:[o+i,o-i]}[r]||[o,o]}var a=this.getVerticalPosition(n,r);return[a,t-a]},e.prototype.getBBoxLR=function(){if(this.hasLabels){var t=this.node.attributes.get("side"),e=y(this.getPadAlignShift(t),3),r=e[0],n=e[1];return e[2],"center"===n?[r,r]:"left"===t?[r,0]:[0,r]}return[0,0]},e.prototype.getPadAlignShift=function(t){var e=this.getTableData().L+this.length2em(this.node.attributes.get("minlabelspacing")),r=y(null==this.styles?["",""]:[this.styles.get("padding-left"),this.styles.get("padding-right")],2),n=r[0],i=r[1];(n||i)&&(e=Math.max(e,this.length2em(n||"0"),this.length2em(i||"0")));var o=y(this.getAlignShift(),2),a=o[0],s=o[1];return a===t&&(s="left"===t?Math.max(e,s)-e:Math.min(-e,s)+e),[e,a,s]},e.prototype.getAlignShift=function(){return this.isTop?i.prototype.getAlignShift.call(this):[this.container.getChildAlign(this.containerI),0]},e.prototype.getWidth=function(){return this.pWidth||this.getBBox().w},e.prototype.getEqualRowHeight=function(){var t=this.getTableData(),e=t.H,r=t.D,n=Array.from(e.keys()).map(function(t){return e[t]+r[t]});return Math.max.apply(Math,n)},e.prototype.getComputedWidths=function(){var e=this,r=this.getTableData().W,t=Array.from(r.keys()).map(function(t){return"number"==typeof e.cWidths[t]?e.cWidths[t]:r[t]});return this.node.attributes.get("equalcolumns")&&(t=Array(t.length).fill(b.max(t))),t},e.prototype.getColumnWidths=function(){var t=this.node.attributes.get("width");if(this.node.attributes.get("equalcolumns"))return this.getEqualColumns(t);var e=this.getColumnAttributes("columnwidth",0);return"auto"===t?this.getColumnWidthsAuto(e):v.isPercent(t)?this.getColumnWidthsPercent(e,t):this.getColumnWidthsFixed(e,this.length2em(t))},e.prototype.getEqualColumns=function(t){var e,r=Math.max(1,this.numCols);if("auto"===t){var n=this.getTableData().W;e=b.max(n)}else if(v.isPercent(t))e=this.percent(1/r);else{var i=b.sum([].concat(this.cLines,this.cSpace))+2*this.fSpace[0];e=Math.max(0,this.length2em(t)-i)/r}return Array(this.numCols).fill(e)},e.prototype.getColumnWidthsAuto=function(t){var e=this;return t.map(function(t){return"auto"===t||"fit"===t?null:v.isPercent(t)?t:e.length2em(t)})},e.prototype.getColumnWidthsPercent=function(r,t){var n=this,i=0<=r.indexOf("fit"),o=(i?this.getTableData():{W:null}).W;return Array.from(r.keys()).map(function(t){var e=r[t];return"fit"===e?null:"auto"===e?i?o[t]:null:v.isPercent(e)?e:n.length2em(e)})},e.prototype.getColumnWidthsFixed=function(r,n){var i=this,t=Array.from(r.keys()),o=t.filter(function(t){return"fit"===r[t]}),e=t.filter(function(t){return"auto"===r[t]}),a=o.length||e.length,s=(a?this.getTableData():{W:null}).W,c=n-b.sum([].concat(this.cLines,this.cSpace))-2*this.fSpace[0],l=c;t.forEach(function(t){var e=r[t];l-="fit"===e||"auto"===e?s[t]:i.length2em(e,n)});var u=a&&0this.numRows?null:n-1]},e.prototype.getColumnAttributes=function(t,e){void 0===e&&(e=1);var r=this.numCols-e,n=this.getAttributeArray(t);if(0!==n.length){for(;n.lengthr&&n.splice(r),n}},e.prototype.getRowAttributes=function(t,e){void 0===e&&(e=1);var r=this.numRows-e,n=this.getAttributeArray(t);if(0!==n.length){for(;n.lengthr&&n.splice(r),n}},e.prototype.getAttributeArray=function(t){var e=this.node.attributes.get(t);return e?v.split(e):[this.node.attributes.getDefault(t)]},e.prototype.addEm=function(t,e){var r=this;if(void 0===e&&(e=1),t)return t.map(function(t){return r.em(t/e)})},e.prototype.convertLengths=function(t){var e=this;if(t)return t.map(function(t){return e.length2em(t)})},e;function e(){for(var t=[],e=0;e mjx-mtd':{"vertical-align":"top"},'mjx-mtr[rowalign="center"] > mjx-mtd':{"vertical-align":"middle"},'mjx-mtr[rowalign="bottom"] > mjx-mtd':{"vertical-align":"bottom"},'mjx-mtr[rowalign="baseline"] > mjx-mtd':{"vertical-align":"baseline"},'mjx-mtr[rowalign="axis"] > mjx-mtd':{"vertical-align":".25em"}},h);function h(){return null!==o&&o.apply(this,arguments)||this}e.CHTMLmtr=u;var f,p=(f=c.CommonMlabeledtrMixin(u),i(d,f),d.prototype.toCHTML=function(t){f.prototype.toCHTML.call(this,t);var e=this.adaptor.firstChild(this.chtml);if(e){this.adaptor.remove(e);var r=this.node.attributes.get("rowalign"),n="baseline"!==r&&"axis"!==r?{rowalign:r}:{},i=this.html("mjx-mtr",n,[e]);this.adaptor.append(this.parent.labels,i)}},d.kind=l.MmlMlabeledtr.prototype.kind,d.styles={"mjx-mlabeledtr":{display:"table-row"},'mjx-mlabeledtr[rowalign="top"] > mjx-mtd':{"vertical-align":"top"},'mjx-mlabeledtr[rowalign="center"] > mjx-mtd':{"vertical-align":"middle"},'mjx-mlabeledtr[rowalign="bottom"] > mjx-mtd':{"vertical-align":"bottom"},'mjx-mlabeledtr[rowalign="baseline"] > mjx-mtd':{"vertical-align":"baseline"},'mjx-mlabeledtr[rowalign="axis"] > mjx-mtd':{"vertical-align":".25em"}},d);function d(){return null!==f&&f.apply(this,arguments)||this}e.CHTMLmlabeledtr=p},function(t,e,r){"use strict";var n,i=this&&this.__extends||(n=function(t,e){return(n=Object.setPrototypeOf||{__proto__:[]}instanceof Array&&function(t,e){t.__proto__=e}||function(t,e){for(var r in e)e.hasOwnProperty(r)&&(t[r]=e[r])})(t,e)},function(t,e){function r(){this.constructor=t}n(t,e),t.prototype=null===e?Object.create(e):(r.prototype=e.prototype,new r)});Object.defineProperty(e,"__esModule",{value:!0});var o,a=r(2),s=r(166),c=r(64),l=(o=s.CommonMtdMixin(a.CHTMLWrapper),i(u,o),u.prototype.toCHTML=function(t){o.prototype.toCHTML.call(this,t);var e=this.node.attributes.get("rowalign"),r=this.node.attributes.get("columnalign");e!==this.parent.node.attributes.get("rowalign")&&this.adaptor.setAttribute(this.chtml,"rowalign",e),"center"===r||"mlabeledtr"===this.parent.kind&&this===this.parent.childNodes[0]&&r===this.parent.parent.node.attributes.get("side")||this.adaptor.setStyle(this.chtml,"textAlign",r),this.adaptor.append(this.chtml,this.html("mjx-tstrut"))},u.kind=c.MmlMtd.prototype.kind,u.styles={"mjx-mtd":{display:"table-cell","text-align":"center",padding:".215em .4em"},"mjx-mtd:first-child":{"padding-left":0},"mjx-mtd:last-child":{"padding-right":0},"mjx-mtable > * > mjx-itable > *:first-child > mjx-mtd":{"padding-top":0},"mjx-mtable > * > mjx-itable > *:last-child > mjx-mtd":{"padding-bottom":0},"mjx-tstrut":{display:"inline-block",height:"1em","vertical-align":"-.25em"},'mjx-labels[align="left"] > mjx-mtr > mjx-mtd':{"text-align":"left"},'mjx-labels[align="right"] > mjx-mtr > mjx-mtd':{"text-align":"right"},'mjx-mtr mjx-mtd[rowalign="top"], mjx-mlabeledtr mjx-mtd[rowalign="top"]':{"vertical-align":"top"},'mjx-mtr mjx-mtd[rowalign="center"], mjx-mlabeledtr mjx-mtd[rowalign="center"]':{"vertical-align":"middle"},'mjx-mtr mjx-mtd[rowalign="bottom"], mjx-mlabeledtr mjx-mtd[rowalign="bottom"]':{"vertical-align":"bottom"},'mjx-mtr mjx-mtd[rowalign="baseline"], mjx-mlabeledtr mjx-mtd[rowalign="baseline"]':{"vertical-align":"baseline"},'mjx-mtr mjx-mtd[rowalign="axis"], mjx-mlabeledtr mjx-mtd[rowalign="axis"]':{"vertical-align":".25em"}},u);function u(){return null!==o&&o.apply(this,arguments)||this}e.CHTMLmtd=l},function(t,e,r){"use strict";var n,i=this&&this.__extends||(n=function(t,e){return(n=Object.setPrototypeOf||{__proto__:[]}instanceof Array&&function(t,e){t.__proto__=e}||function(t,e){for(var r in e)e.hasOwnProperty(r)&&(t[r]=e[r])})(t,e)},function(t,e){function r(){this.constructor=t}n(t,e),t.prototype=null===e?Object.create(e):(r.prototype=e.prototype,new r)});Object.defineProperty(e,"__esModule",{value:!0}),e.CommonMtdMixin=function(t){return i(e,r=t),Object.defineProperty(e.prototype,"fixesPWidth",{get:function(){return!1},enumerable:!0,configurable:!0}),e.prototype.invalidateBBox=function(){this.bboxComputed=!1},e.prototype.getWrapWidth=function(t){var e=this.parent.parent,r=this.parent,n=this.node.childPosition()-(r.labeled?1:0);return"number"==typeof e.cWidths[n]?e.cWidths[n]:e.getTableData().W[n]},e.prototype.getChildAlign=function(t){return this.node.attributes.get("columnalign")},e;function e(){return null!==r&&r.apply(this,arguments)||this}var r}},function(t,e,r){"use strict";var n,i=this&&this.__extends||(n=function(t,e){return(n=Object.setPrototypeOf||{__proto__:[]}instanceof Array&&function(t,e){t.__proto__=e}||function(t,e){for(var r in e)e.hasOwnProperty(r)&&(t[r]=e[r])})(t,e)},function(t,e){function r(){this.constructor=t}n(t,e),t.prototype=null===e?Object.create(e):(r.prototype=e.prototype,new r)});Object.defineProperty(e,"__esModule",{value:!0});var o,a=r(2),s=r(76),c=r(76),l=r(59),u=(o=s.CommonMactionMixin(a.CHTMLWrapper),i(h,o),h.prototype.toCHTML=function(t){var e=this.standardCHTMLnode(t);this.selected.toCHTML(e),this.action(this,this.data)},h.prototype.setEventHandler=function(t,e){this.chtml.addEventListener(t,e)},h.kind=l.MmlMaction.prototype.kind,h.styles={"mjx-maction":{position:"relative"},"mjx-maction > mjx-tool":{display:"none",position:"absolute",bottom:0,right:0,width:0,height:0,"z-index":500},"mjx-tool > mjx-tip":{display:"inline-block",padding:".2em",border:"1px solid #888","font-size":"70%","background-color":"#F8F8F8",color:"black","box-shadow":"2px 2px 5px #AAAAAA"},"mjx-maction[toggle]":{cursor:"pointer"},"mjx-status":{display:"block",position:"fixed",left:"1em",bottom:"1em","min-width":"25%",padding:".2em .4em",border:"1px solid #888","font-size":"90%","background-color":"#F8F8F8",color:"black"}},h.actions=new Map([["toggle",[function(t,e){t.adaptor.setAttribute(t.chtml,"toggle",t.node.attributes.get("selection"));var r=t.factory.jax.math,n=t.factory.jax.document,i=t.node;t.setEventHandler("click",function(t){r.start.node||(r.start.node=r.end.node=r.typesetRoot,r.start.n=r.end.n=0),i.nextToggleSelection(),r.rerender(n),t.stopPropagation()})},{}]],["tooltip",[function(r,n){var t=r.childNodes[1];if(t)if(t.node.isKind("mtext")){var e=t.node.getText();r.adaptor.setAttribute(r.chtml,"title",e)}else{var i=r.adaptor,o=i.append(r.chtml,r.html("mjx-tool",{style:{bottom:r.em(-r.dy),right:r.em(-r.dx)}},[r.html("mjx-tip")]));t.toCHTML(i.firstChild(o)),r.setEventHandler("mouseover",function(t){n.stopTimers(r,n);var e=setTimeout(function(){return i.setStyle(o,"display","block")},n.postDelay);n.hoverTimer.set(r,e),t.stopPropagation()}),r.setEventHandler("mouseout",function(t){n.stopTimers(r,n);var e=setTimeout(function(){return i.setStyle(o,"display","")},n.clearDelay);n.clearTimer.set(r,e),t.stopPropagation()})}},c.TooltipData]],["statusline",[function(r,n){var t=r.childNodes[1];if(t&&t.node.isKind("mtext")){var i=r.adaptor,o=t.node.getText();i.setAttribute(r.chtml,"statusline",o),r.setEventHandler("mouseover",function(t){if(null===n.status){var e=i.body(i.document);n.status=i.append(e,r.html("mjx-status",{},[r.text(o)]))}t.stopPropagation()}),r.setEventHandler("mouseout",function(t){n.status&&(i.remove(n.status),n.status=null),t.stopPropagation()})}},{status:null}]]]),h);function h(){return null!==o&&o.apply(this,arguments)||this}e.CHTMLmaction=u},function(t,e,r){"use strict";var n,i=this&&this.__extends||(n=function(t,e){return(n=Object.setPrototypeOf||{__proto__:[]}instanceof Array&&function(t,e){t.__proto__=e}||function(t,e){for(var r in e)e.hasOwnProperty(r)&&(t[r]=e[r])})(t,e)},function(t,e){function r(){this.constructor=t}n(t,e),t.prototype=null===e?Object.create(e):(r.prototype=e.prototype,new r)});Object.defineProperty(e,"__esModule",{value:!0});var o,a=r(2),s=r(169),c=r(65),l=(o=s.CommonMglyphMixin(a.CHTMLWrapper),i(u,o),u.prototype.toCHTML=function(t){var e=this.standardCHTMLnode(t),r=this.node.attributes.getList("src","alt"),n=r.src,i=r.alt,o={width:this.em(this.width),height:this.em(this.height)};this.voffset&&(o.verticalAlign=this.em(-this.voffset));var a=this.html("img",{src:n,style:o,alt:i,title:i});this.adaptor.append(e,a)},u.kind=c.MmlMglyph.prototype.kind,u.styles={"mjx-mglyph > img":{display:"inline-block",border:0,padding:0}},u);function u(){return null!==o&&o.apply(this,arguments)||this}e.CHTMLmglyph=l},function(t,e,r){"use strict";var n,i=this&&this.__extends||(n=function(t,e){return(n=Object.setPrototypeOf||{__proto__:[]}instanceof Array&&function(t,e){t.__proto__=e}||function(t,e){for(var r in e)e.hasOwnProperty(r)&&(t[r]=e[r])})(t,e)},function(t,e){function r(){this.constructor=t}n(t,e),t.prototype=null===e?Object.create(e):(r.prototype=e.prototype,new r)}),o=this&&this.__read||function(t,e){var r="function"==typeof Symbol&&t[Symbol.iterator];if(!r)return t;var n,i,o=r.call(t),a=[];try{for(;(void 0===e||0=t.length&&(t=void 0),{value:t&&t[n++],done:!t}}};throw new TypeError(e?"Object is not iterable.":"Symbol.iterator is not defined.")};Object.defineProperty(e,"__esModule",{value:!0});var o,a=r(0),s=r(2),c=r(175),l=(o=c.CommonTextNodeMixin(s.CHTMLWrapper),i(u,o),u.prototype.toCHTML=function(t){var e,r;this.markUsed();var n=this.adaptor,i=this.parent.variant,o=this.node.getText();if("-explicitFont"===i){var a=this.jax.getFontData(this.parent.styles);n.append(t,this.jax.unknownText(o,i,a))}else{var s=this.parent.stretch.c,c=this.parent.remapChars(s?[s]:this.unicodeChars(o));try{for(var l=d(c),u=l.next();!u.done;u=l.next()){var h=u.value,f=this.getVariantChar(i,h)[3],p=f.unknown?this.jax.unknownText(String.fromCharCode(h),i):this.html("mjx-c",{class:this.char(h)});n.append(t,p),f.used=!0}}catch(t){e={error:t}}finally{try{u&&!u.done&&(r=l.return)&&r.call(l)}finally{if(e)throw e.error}}}},u.kind=a.TextNode.prototype.kind,u.autoStyle=!1,u.styles={"mjx-c":{display:"inline-block"},"mjx-utext":{display:"inline-block",padding:".75em 0 .25em 0"},"mjx-measure-text":{position:"absolute","font-family":"MJXZERO","white-space":"nowrap",height:"1px",width:"1px",overflow:"hidden"}},u);function u(){return null!==o&&o.apply(this,arguments)||this}e.CHTMLTextNode=l},function(t,e,r){"use strict";var n,i=this&&this.__extends||(n=function(t,e){return(n=Object.setPrototypeOf||{__proto__:[]}instanceof Array&&function(t,e){t.__proto__=e}||function(t,e){for(var r in e)e.hasOwnProperty(r)&&(t[r]=e[r])})(t,e)},function(t,e){function r(){this.constructor=t}n(t,e),t.prototype=null===e?Object.create(e):(r.prototype=e.prototype,new r)}),g=this&&this.__values||function(t){var e="function"==typeof Symbol&&Symbol.iterator,r=e&&t[e],n=0;if(r)return r.call(t);if(t&&"number"==typeof t.length)return{next:function(){return t&&n>=t.length&&(t=void 0),{value:t&&t[n++],done:!t}}};throw new TypeError(e?"Object is not iterable.":"Symbol.iterator is not defined.")},M=this&&this.__read||function(t,e){var r="function"==typeof Symbol&&t[Symbol.iterator];if(!r)return t;var n,i,o=r.call(t),a=[];try{for(;(void 0===e||0t.h&&(t.h=l),u>t.d&&(t.d=u),t.ic=v.ic||0,t.sk=v.sk||0}}catch(t){r={error:t}}finally{try{d&&!d.done&&(n=p.return)&&n.call(p)}finally{if(r)throw r.error}}1"},63:{c:"?"},64:{c:"@"},65:{c:"A"},66:{c:"B"},67:{c:"C"},68:{c:"D"},69:{c:"E"},70:{c:"F"},71:{c:"G"},72:{c:"H"},73:{c:"I"},74:{c:"J"},75:{c:"K"},76:{c:"L"},77:{c:"M"},78:{c:"N"},79:{c:"O"},80:{c:"P"},81:{c:"Q"},82:{c:"R"},83:{c:"S"},84:{c:"T"},85:{c:"U"},86:{c:"V"},87:{c:"W"},88:{c:"X"},89:{c:"Y"},90:{c:"Z"},91:{c:"["},93:{c:"]"},94:{c:"^"},95:{c:"_"},96:{c:"`"},97:{c:"a"},98:{c:"b"},99:{c:"c"},100:{c:"d"},101:{c:"e"},102:{c:"f"},103:{c:"g"},104:{c:"h"},105:{c:"i"},106:{c:"j"},107:{c:"k"},108:{c:"l"},109:{c:"m"},110:{c:"n"},111:{c:"o"},112:{c:"p"},113:{c:"q"},114:{c:"r"},115:{c:"s"},116:{c:"t"},117:{c:"u"},118:{c:"v"},119:{c:"w"},120:{c:"x"},121:{c:"y"},122:{c:"z"},123:{c:"{"},124:{c:"|"},125:{c:"}"},126:{c:"~"},183:{c:"\\22C5"},697:{c:"\\2032"},913:{c:"A"},914:{c:"B"},917:{c:"E"},918:{c:"Z"},919:{c:"H"},921:{c:"I"},922:{c:"K"},924:{c:"M"},925:{c:"N"},927:{c:"O"},929:{c:"P"},930:{c:"\\398"},932:{c:"T"},935:{c:"X"},978:{c:"\\3A5"},988:{c:"F"},8194:{c:""},8195:{c:""},8196:{c:""},8197:{c:""},8198:{c:""},8201:{c:""},8202:{c:""},8213:{c:"\\2014"},8214:{c:"\\2225"},8215:{c:"_"},8226:{c:"\\2219"},8243:{c:"\\2032\\2032"},8244:{c:"\\2032\\2032\\2032"},8254:{c:"\\2C9"},8260:{c:"/"},8279:{c:"\\2032\\2032\\2032\\2032"},8407:{c:"\\2192",f:"VB"},8465:{c:"I",f:"FR"},8476:{c:"R",f:"FR"},8602:{c:"\\2190\\338"},8603:{c:"\\2192\\338"},8622:{c:"\\2194\\338"},8653:{c:"\\21D0\\338"},8654:{c:"\\21D4\\338"},8655:{c:"\\21D2\\338"},8708:{c:"\\2203\\338"},8710:{c:"\\394"},8716:{c:"\\220B\\338"},8740:{c:"\\2223\\338"},8742:{c:"\\2225\\338"},8769:{c:"\\223C\\338"},8772:{c:"\\2243\\338"},8775:{c:"\\2245\\338"},8777:{c:"\\2248\\338"},8802:{c:"\\2261\\338"},8813:{c:"\\224D\\338"},8814:{c:"<\\338"},8815:{c:">\\338"},8816:{c:"\\2264\\338"},8817:{c:"\\2265\\338"},8832:{c:"\\227A\\338"},8833:{c:"\\227B\\338"},8836:{c:"\\2282\\338"},8837:{c:"\\2283\\338"},8840:{c:"\\2286\\338"},8841:{c:"\\2287\\338"},8876:{c:"\\22A2\\338"},8877:{c:"\\22A8\\338"},8930:{c:"\\2291\\338"},8931:{c:"\\2292\\338"},9001:{c:"\\27E8"},9002:{c:"\\27E9"},9653:{c:"\\25B3"},9663:{c:"\\25BD"},10072:{c:"\\2223"},10744:{c:"/",f:"BI"},10799:{c:"\\D7"},12296:{c:"\\27E8"},12297:{c:"\\27E9"}})},function(t,e,r){"use strict";Object.defineProperty(e,"__esModule",{value:!0}),e.bold={32:[0,0,.25],33:[.705,0,.35],34:[.694,-.329,.603],35:[.694,.193,.958],36:[.75,.056,.575],37:[.75,.056,.958],38:[.705,.011,.894],39:[.694,-.329,.319],40:[.75,.249,.447],41:[.75,.249,.447],42:[.75,-.306,.575],43:[.633,.131,.894],44:[.171,.194,.319],45:[.278,-.166,.383],46:[.171,0,.319],47:[.75,.25,.575],48:[.654,.01,.575],49:[.655,0,.575],50:[.654,0,.575],51:[.655,.011,.575],52:[.656,0,.575],53:[.655,.011,.575],54:[.655,.011,.575],55:[.676,.011,.575],56:[.654,.011,.575],57:[.654,.011,.575],58:[.444,0,.319],59:[.444,.194,.319],60:[.587,.085,.894],61:[.393,-.109,.894],62:[.587,.085,.894],63:[.7,0,.543],64:[.699,.006,.894],65:[.698,0,.869],66:[.686,0,.818],67:[.697,.011,.831],68:[.686,0,.882],69:[.68,0,.756],70:[.68,0,.724],71:[.697,.01,.904],72:[.686,0,.9],73:[.686,0,.436],74:[.686,.011,.594],75:[.686,0,.901],76:[.686,0,.692],77:[.686,0,1.092],78:[.686,0,.9],79:[.696,.01,.864],80:[.686,0,.786],81:[.696,.193,.864],82:[.686,.011,.862],83:[.697,.011,.639],84:[.675,0,.8],85:[.686,.011,.885],86:[.686,.007,.869],87:[.686,.007,1.189],88:[.686,0,.869],89:[.686,0,.869],90:[.686,0,.703],91:[.75,.25,.319],92:[.75,.25,.575],93:[.75,.25,.319],94:[.694,-.52,.575],95:[-.01,.061,.575],96:[.706,-.503,.575],97:[.453,.006,.559],98:[.694,.006,.639],99:[.453,.006,.511],100:[.694,.006,.639],101:[.452,.006,.527],102:[.7,0,.351,{ic:.101}],103:[.455,.201,.575],104:[.694,0,.639],105:[.695,0,.319],106:[.695,.2,.351],107:[.694,0,.607],108:[.694,0,.319],109:[.45,0,.958],110:[.45,0,.639],111:[.452,.005,.575],112:[.45,.194,.639],113:[.45,.194,.607],114:[.45,0,.474],115:[.453,.006,.454],116:[.635,.005,.447],117:[.45,.006,.639],118:[.444,0,.607],119:[.444,0,.831],120:[.444,0,.607],121:[.444,.2,.607],122:[.444,0,.511],123:[.75,.25,.575],124:[.75,.249,.319],125:[.75,.25,.575],126:[.344,-.202,.575],160:[0,0,.25],168:[.695,-.535,.575],172:[.371,-.061,.767],175:[.607,-.54,.575],176:[.702,-.536,.575],177:[.728,.035,.894],180:[.706,-.503,.575],183:[.336,-.166,.319],215:[.53,.028,.894],247:[.597,.096,.894],305:[.452,.008,.394,{sk:.0319}],567:[.451,.201,.439,{sk:.0958}],697:[.563,-.033,.344],710:[.694,-.52,.575],711:[.66,-.515,.575],713:[.607,-.54,.575],714:[.706,-.503,.575],715:[.706,-.503,.575],728:[.694,-.5,.575],729:[.695,-.525,.575],730:[.702,-.536,.575],732:[.694,-.552,.575],768:[.706,-.503,0],769:[.706,-.503,0],770:[.694,-.52,0],771:[.694,-.552,0],772:[.607,-.54,0],774:[.694,-.5,0],775:[.695,-.525,0],776:[.695,-.535,0],778:[.702,-.536,0],779:[.714,-.511,0],780:[.66,-.515,0],824:[.711,.21,0],913:[.698,0,.869],914:[.686,0,.818],915:[.68,0,.692],916:[.698,0,.958],917:[.68,0,.756],918:[.686,0,.703],919:[.686,0,.9],920:[.696,.01,.894],921:[.686,0,.436],922:[.686,0,.901],923:[.698,0,.806],924:[.686,0,1.092],925:[.686,0,.9],926:[.675,0,.767],927:[.696,.01,.864],928:[.68,0,.9],929:[.686,0,.786],930:[.696,.01,.894],931:[.686,0,.831],932:[.675,0,.8],933:[.697,0,.894],934:[.686,0,.831],935:[.686,0,.869],936:[.686,0,.894],937:[.696,0,.831],945:[.452,.008,.761,{sk:.0319}],946:[.701,.194,.66,{sk:.0958}],947:[.451,.211,.59],948:[.725,.008,.522,{sk:.0639}],949:[.461,.017,.529,{sk:.0958}],950:[.711,.202,.508,{sk:.0958}],951:[.452,.211,.6,{sk:.0639}],952:[.702,.008,.562,{sk:.0958}],953:[.452,.008,.412,{sk:.0639}],954:[.452,.008,.668],955:[.694,.013,.671],956:[.452,.211,.708,{sk:.0319}],957:[.452,0,.577,{sk:.0319}],958:[.711,.201,.508,{sk:.128}],959:[.452,.008,.585,{sk:.0639}],960:[.444,.008,.682],961:[.451,.211,.612,{sk:.0958}],962:[.451,.105,.424,{sk:.0958}],963:[.444,.008,.686],964:[.444,.013,.521,{ic:.089,sk:.0319}],965:[.453,.008,.631,{sk:.0319}],966:[.452,.216,.747,{sk:.0958}],967:[.452,.201,.718,{sk:.0639}],968:[.694,.202,.758,{sk:.128}],969:[.453,.008,.718],977:[.701,.008,.692,{sk:.0958}],978:[.697,0,.894],981:[.694,.202,.712,{sk:.0958}],982:[.444,.008,.975],988:[.68,0,.724],1009:[.451,.194,.612,{sk:.0958}],1013:[.444,.007,.483,{sk:.0639}],8194:[0,0,.5],8195:[0,0,.999],8196:[0,0,.333],8197:[0,0,.25],8198:[0,0,.167],8201:[0,0,.167],8202:[0,0,.083],8211:[.3,-.249,.575],8212:[.3,-.249,1.15],8213:[.3,-.249,1.15],8214:[.75,.248,.575],8215:[-.01,.061,.575],8216:[.694,-.329,.319],8217:[.694,-.329,.319],8220:[.694,-.329,.603],8221:[.694,-.329,.603],8224:[.702,.211,.511],8225:[.702,.202,.511],8226:[.474,-.028,.575],8230:[.171,0,1.295],8242:[.563,-.033,.344],8243:[.563,0,.688],8244:[.563,0,1.032],8254:[.607,-.54,.575],8260:[.75,.25,.575],8279:[.563,0,1.376],8407:[.723,-.513,.575],8463:[.694,.008,.668,{sk:-.0319}],8465:[.686,.026,.554],8467:[.702,.019,.474,{sk:.128}],8472:[.461,.21,.74],8476:[.686,.026,.828],8501:[.694,0,.703],8592:[.518,.017,1.15],8593:[.694,.193,.575],8594:[.518,.017,1.15],8595:[.694,.194,.575],8596:[.518,.017,1.15],8597:[.767,.267,.575],8598:[.724,.194,1.15],8599:[.724,.193,1.15],8600:[.694,.224,1.15],8601:[.694,.224,1.15],8602:[.711,.21,1.15],8603:[.711,.21,1.15],8614:[.518,.017,1.15],8617:[.518,.017,1.282],8618:[.518,.017,1.282],8622:[.711,.21,1.15],8636:[.518,-.22,1.15],8637:[.281,.017,1.15],8640:[.518,-.22,1.15],8641:[.281,.017,1.15],8652:[.718,.017,1.15],8653:[.711,.21,1.15],8654:[.711,.21,1.15],8655:[.711,.21,1.15],8656:[.547,.046,1.15],8657:[.694,.193,.703],8658:[.547,.046,1.15],8659:[.694,.194,.703],8660:[.547,.046,1.15],8661:[.767,.267,.703],8704:[.694,.016,.639],8706:[.71,.017,.628,{sk:.0958}],8707:[.694,0,.639],8708:[.711,.21,.639],8709:[.767,.073,.575],8710:[.698,0,.958],8711:[.686,.024,.958],8712:[.587,.086,.767],8713:[.711,.21,.767],8715:[.587,.086,.767],8716:[.711,.21,.767],8722:[.281,-.221,.894],8723:[.537,.227,.894],8725:[.75,.25,.575],8726:[.75,.25,.575],8727:[.472,-.028,.575],8728:[.474,-.028,.575],8729:[.474,-.028,.575],8730:[.82,.18,.958],8733:[.451,.008,.894],8734:[.452,.008,1.15],8736:[.714,0,.722],8739:[.75,.249,.319],8740:[.75,.249,.319],8741:[.75,.248,.575],8742:[.75,.248,.575],8743:[.604,.017,.767],8744:[.604,.016,.767],8745:[.603,.016,.767],8746:[.604,.016,.767],8747:[.711,.211,.569,{ic:.063}],8764:[.391,-.109,.894],8768:[.583,.082,.319],8769:[.711,.21,.894],8771:[.502,0,.894],8772:[.711,.21,.894],8773:[.638,.027,.894],8775:[.711,.21,.894],8776:[.524,-.032,.894],8777:[.711,.21,.894],8781:[.533,.032,.894],8784:[.721,-.109,.894],8800:[.711,.21,.894],8801:[.505,0,.894],8802:[.711,.21,.894],8804:[.697,.199,.894],8805:[.697,.199,.894],8810:[.617,.116,1.15],8811:[.618,.116,1.15],8813:[.711,.21,.894],8814:[.711,.21,.894],8815:[.711,.21,.894],8816:[.711,.21,.894],8817:[.711,.21,.894],8826:[.585,.086,.894],8827:[.586,.086,.894],8832:[.711,.21,.894],8833:[.711,.21,.894],8834:[.587,.085,.894],8835:[.587,.086,.894],8836:[.711,.21,.894],8837:[.711,.21,.894],8838:[.697,.199,.894],8839:[.697,.199,.894],8840:[.711,.21,.894],8841:[.711,.21,.894],8846:[.604,.016,.767],8849:[.697,.199,.894],8850:[.697,.199,.894],8851:[.604,0,.767],8852:[.604,0,.767],8853:[.632,.132,.894],8854:[.632,.132,.894],8855:[.632,.132,.894],8856:[.632,.132,.894],8857:[.632,.132,.894],8866:[.693,0,.703],8867:[.693,0,.703],8868:[.694,0,.894],8869:[.693,0,.894],8872:[.75,.249,.974],8876:[.711,.21,.703],8877:[.75,.249,.974],8900:[.523,.021,.575],8901:[.336,-.166,.319],8902:[.502,0,.575],8904:[.54,.039,1],8930:[.711,.21,.894],8931:[.711,.21,.894],8942:[.951,.029,.319],8943:[.336,-.166,1.295],8945:[.871,-.101,1.323],8968:[.75,.248,.511],8969:[.75,.248,.511],8970:[.749,.248,.511],8971:[.749,.248,.511],8994:[.405,-.108,1.15],8995:[.392,-.126,1.15],9001:[.75,.249,.447],9002:[.75,.249,.447],9651:[.711,0,1.022],9653:[.711,0,1.022],9657:[.54,.039,.575],9661:[.5,.21,1.022],9663:[.5,.21,1.022],9667:[.539,.038,.575],9711:[.711,.211,1.15],9824:[.719,.129,.894],9825:[.711,.024,.894],9826:[.719,.154,.894],9827:[.719,.129,.894],9837:[.75,.017,.447],9838:[.741,.223,.447],9839:[.724,.224,.447],10072:[.75,.249,.319],10216:[.75,.249,.447],10217:[.75,.249,.447],10229:[.518,.017,1.805],10230:[.518,.017,1.833],10231:[.518,.017,2.126],10232:[.547,.046,1.868],10233:[.547,.046,1.87],10234:[.547,.046,2.126],10236:[.518,.017,1.833],10744:[.711,.21,.894],10799:[.53,.028,.894],10815:[.686,0,.9],10927:[.696,.199,.894],10928:[.697,.199,.894],12296:[.75,.249,.447],12297:[.75,.249,.447]}},function(t,e,r){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var n=r(1),i=r(181);e.doubleStruck=n.AddCSS(i.doubleStruck,{32:{c:" "},65:{c:"A"},66:{c:"B"},67:{c:"C"},68:{c:"D"},69:{c:"E"},70:{c:"F"},71:{c:"G"},72:{c:"H"},73:{c:"I"},74:{c:"J"},75:{c:"K"},76:{c:"L"},77:{c:"M"},78:{c:"N"},79:{c:"O"},80:{c:"P"},81:{c:"Q"},82:{c:"R"},83:{c:"S"},84:{c:"T"},85:{c:"U"},86:{c:"V"},87:{c:"W"},88:{c:"X"},89:{c:"Y"},90:{c:"Z"},107:{c:"k"},913:{c:"A",f:"B"},914:{c:"B",f:"B"},917:{c:"E",f:"B"},918:{c:"Z",f:"B"},919:{c:"H",f:"B"},921:{c:"I",f:"B"},922:{c:"K",f:"B"},924:{c:"M",f:"B"},925:{c:"N",f:"B"},927:{c:"O",f:"B"},929:{c:"P",f:"B"},930:{c:"\\398",f:"B"},932:{c:"T",f:"B"},935:{c:"X",f:"B"},978:{c:"\\3A5",f:"B"},988:{c:"F",f:"B"},8450:{c:"C",f:"A"},8461:{c:"H",f:"A"},8469:{c:"N",f:"A"},8473:{c:"P",f:"A"},8474:{c:"Q",f:"A"},8477:{c:"R",f:"A"},8484:{c:"Z",f:"A"}})},function(t,e,r){"use strict";Object.defineProperty(e,"__esModule",{value:!0}),e.doubleStruck={32:[0,0,.25],65:[.701,0,.722],66:[.683,0,.667],67:[.702,.019,.722],68:[.683,0,.722],69:[.683,0,.667],70:[.683,0,.611],71:[.702,.019,.778],72:[.683,0,.778],73:[.683,0,.389],74:[.683,.077,.5],75:[.683,0,.778],76:[.683,0,.667],77:[.683,0,.944],78:[.683,.02,.722],79:[.701,.019,.778],80:[.683,0,.611],81:[.701,.181,.778],82:[.683,0,.722],83:[.702,.012,.556],84:[.683,0,.667],85:[.683,.019,.722],86:[.683,.02,.722],87:[.683,.019,1],88:[.683,0,.722],89:[.683,0,.722],90:[.683,0,.667],107:[.683,0,.556],160:[0,0,.25],913:[.698,0,.869],914:[.686,0,.818],917:[.68,0,.756],918:[.686,0,.703],919:[.686,0,.9],921:[.686,0,.436],922:[.686,0,.901],924:[.686,0,1.092],925:[.686,0,.9],927:[.696,.01,.864],929:[.686,0,.786],930:[.696,.01,.894],932:[.675,0,.8],935:[.686,0,.869],978:[.697,0,.894],988:[.68,0,.724],8450:[.702,.019,.722],8461:[.683,0,.778],8469:[.683,.02,.722],8473:[.683,0,.611],8474:[.701,.181,.778],8477:[.683,0,.722],8484:[.683,0,.667]}},function(t,e,r){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var n=r(1),i=r(183);e.frakturBold=n.AddCSS(i.frakturBold,{32:{c:" "},33:{c:"!"},38:{c:"&"},40:{c:"("},41:{c:")"},42:{c:"*"},43:{c:"+"},44:{c:","},45:{c:"-"},46:{c:"."},47:{c:"/"},48:{c:"0"},49:{c:"1"},50:{c:"2"},51:{c:"3"},52:{c:"4"},53:{c:"5"},54:{c:"6"},55:{c:"7"},56:{c:"8"},57:{c:"9"},58:{c:":"},59:{c:";"},61:{c:"="},63:{c:"?"},65:{c:"A"},66:{c:"B"},67:{c:"C"},68:{c:"D"},69:{c:"E"},70:{c:"F"},71:{c:"G"},72:{c:"H"},73:{c:"I"},74:{c:"J"},75:{c:"K"},76:{c:"L"},77:{c:"M"},78:{c:"N"},79:{c:"O"},80:{c:"P"},81:{c:"Q"},82:{c:"R"},83:{c:"S"},84:{c:"T"},85:{c:"U"},86:{c:"V"},87:{c:"W"},88:{c:"X"},89:{c:"Y"},90:{c:"Z"},91:{c:"["},93:{c:"]"},94:{c:"^"},97:{c:"a"},98:{c:"b"},99:{c:"c"},100:{c:"d"},101:{c:"e"},102:{c:"f"},103:{c:"g"},104:{c:"h"},105:{c:"i"},106:{c:"j"},107:{c:"k"},108:{c:"l"},109:{c:"m"},110:{c:"n"},111:{c:"o"},112:{c:"p"},113:{c:"q"},114:{c:"r"},115:{c:"s"},116:{c:"t"},117:{c:"u"},118:{c:"v"},119:{c:"w"},120:{c:"x"},121:{c:"y"},122:{c:"z"},913:{c:"A",f:"B"},914:{c:"B",f:"B"},917:{c:"E",f:"B"},918:{c:"Z",f:"B"},919:{c:"H",f:"B"},921:{c:"I",f:"B"},922:{c:"K",f:"B"},924:{c:"M",f:"B"},925:{c:"N",f:"B"},927:{c:"O",f:"B"},929:{c:"P",f:"B"},930:{c:"\\398",f:"B"},932:{c:"T",f:"B"},935:{c:"X",f:"B"},978:{c:"\\3A5",f:"B"},988:{c:"F",f:"B"},8260:{c:"/"}})},function(t,e,r){"use strict";Object.defineProperty(e,"__esModule",{value:!0}),e.frakturBold={32:[0,0,.25],33:[.689,.012,.349],34:[.695,-.432,.254],38:[.696,.016,.871],39:[.695,-.436,.25],40:[.737,.186,.459],41:[.735,.187,.459],42:[.692,-.449,.328],43:[.598,.082,.893],44:[.107,.191,.328],45:[.275,-.236,.893],46:[.102,.015,.328],47:[.721,.182,.593],48:[.501,.012,.593],49:[.489,0,.593],50:[.491,0,.593],51:[.487,.193,.593],52:[.495,.196,.593],53:[.481,.19,.593],54:[.704,.012,.593],55:[.479,.197,.593],56:[.714,.005,.593],57:[.487,.195,.593],58:[.457,.012,.255],59:[.458,.19,.255],61:[.343,-.168,.582],63:[.697,.014,.428],65:[.686,.031,.847],66:[.684,.031,1.044],67:[.676,.032,.723],68:[.683,.029,.982],69:[.686,.029,.783],70:[.684,.146,.722],71:[.687,.029,.927],72:[.683,.126,.851],73:[.681,.025,.655],74:[.68,.141,.652],75:[.681,.026,.789],76:[.683,.028,.786],77:[.683,.032,1.239],78:[.679,.03,.983],79:[.726,.03,.976],80:[.688,.223,.977],81:[.726,.083,.976],82:[.688,.028,.978],83:[.685,.031,.978],84:[.686,.03,.79],85:[.688,.039,.851],86:[.685,.029,.982],87:[.683,.03,1.235],88:[.681,.035,.849],89:[.688,.214,.984],90:[.677,.148,.711],91:[.74,.13,.257],93:[.738,.132,.257],94:[.734,-.452,.59],97:[.472,.032,.603],98:[.69,.032,.59],99:[.473,.026,.464],100:[.632,.028,.589],101:[.471,.027,.472],102:[.687,.222,.388],103:[.472,.208,.595],104:[.687,.207,.615],105:[.686,.025,.331],106:[.682,.203,.332],107:[.682,.025,.464],108:[.681,.024,.337],109:[.476,.031,.921],110:[.473,.028,.654],111:[.482,.034,.609],112:[.557,.207,.604],113:[.485,.211,.596],114:[.472,.026,.46],115:[.479,.034,.523],116:[.648,.027,.393],117:[.472,.032,.589],118:[.546,.027,.604],119:[.549,.032,.918],120:[.471,.188,.459],121:[.557,.221,.589],122:[.471,.214,.461],160:[0,0,.25],913:[.698,0,.869],914:[.686,0,.818],917:[.68,0,.756],918:[.686,0,.703],919:[.686,0,.9],921:[.686,0,.436],922:[.686,0,.901],924:[.686,0,1.092],925:[.686,0,.9],927:[.696,.01,.864],929:[.686,0,.786],930:[.696,.01,.894],932:[.675,0,.8],935:[.686,0,.869],978:[.697,0,.894],988:[.68,0,.724],8216:[.708,-.411,.254],8217:[.692,-.394,.254],8260:[.721,.182,.593],58113:[.63,.027,.587],58114:[.693,.212,.394],58115:[.681,.219,.387],58116:[.473,.212,.593],58117:[.684,.027,.393],58120:[.679,.22,.981],58121:[.717,.137,.727]}},function(t,e,r){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var n=r(1),i=r(185);e.fraktur=n.AddCSS(i.fraktur,{32:{c:" "},33:{c:"!"},38:{c:"&"},40:{c:"("},41:{c:")"},42:{c:"*"},43:{c:"+"},44:{c:","},45:{c:"-"},46:{c:"."},47:{c:"/"},48:{c:"0"},49:{c:"1"},50:{c:"2"},51:{c:"3"},52:{c:"4"},53:{c:"5"},54:{c:"6"},55:{c:"7"},56:{c:"8"},57:{c:"9"},58:{c:":"},59:{c:";"},61:{c:"="},63:{c:"?"},65:{c:"A"},66:{c:"B"},67:{c:"C"},68:{c:"D"},69:{c:"E"},70:{c:"F"},71:{c:"G"},72:{c:"H"},73:{c:"I"},74:{c:"J"},75:{c:"K"},76:{c:"L"},77:{c:"M"},78:{c:"N"},79:{c:"O"},80:{c:"P"},81:{c:"Q"},82:{c:"R"},83:{c:"S"},84:{c:"T"},85:{c:"U"},86:{c:"V"},87:{c:"W"},88:{c:"X"},89:{c:"Y"},90:{c:"Z"},91:{c:"["},93:{c:"]"},94:{c:"^"},97:{c:"a"},98:{c:"b"},99:{c:"c"},100:{c:"d"},101:{c:"e"},102:{c:"f"},103:{c:"g"},104:{c:"h"},105:{c:"i"},106:{c:"j"},107:{c:"k"},108:{c:"l"},109:{c:"m"},110:{c:"n"},111:{c:"o"},112:{c:"p"},113:{c:"q"},114:{c:"r"},115:{c:"s"},116:{c:"t"},117:{c:"u"},118:{c:"v"},119:{c:"w"},120:{c:"x"},121:{c:"y"},122:{c:"z"},913:{c:"A",f:""},914:{c:"B",f:""},917:{c:"E",f:""},918:{c:"Z",f:""},919:{c:"H",f:""},921:{c:"I",f:""},922:{c:"K",f:""},924:{c:"M",f:""},925:{c:"N",f:""},927:{c:"O",f:""},929:{c:"P",f:""},930:{c:"\\398",f:""},932:{c:"T",f:""},935:{c:"X",f:""},978:{c:"\\3A5",f:""},988:{c:"F",f:""},8260:{c:"/"},8460:{c:"H",f:"FR"},8465:{c:"I",f:"FR"},8476:{c:"R",f:"FR"},8488:{c:"Z",f:"FR"},8493:{c:"C",f:"FR"}})},function(t,e,r){"use strict";Object.defineProperty(e,"__esModule",{value:!0}),e.fraktur={32:[0,0,.25],33:[.689,.012,.296],34:[.695,-.432,.215],38:[.698,.011,.738],39:[.695,-.436,.212],40:[.737,.186,.389],41:[.735,.187,.389],42:[.692,-.449,.278],43:[.598,.082,.756],44:[.107,.191,.278],45:[.275,-.236,.756],46:[.102,.015,.278],47:[.721,.182,.502],48:[.492,.013,.502],49:[.468,0,.502],50:[.474,0,.502],51:[.473,.182,.502],52:[.476,.191,.502],53:[.458,.184,.502],54:[.7,.013,.502],55:[.468,.181,.502],56:[.705,.01,.502],57:[.469,.182,.502],58:[.457,.012,.216],59:[.458,.189,.216],61:[.368,-.132,.756],63:[.693,.011,.362],65:[.696,.026,.718],66:[.691,.027,.884],67:[.685,.024,.613],68:[.685,.027,.832],69:[.685,.024,.663],70:[.686,.153,.611],71:[.69,.026,.785],72:[.666,.133,.72],73:[.686,.026,.554],74:[.686,.139,.552],75:[.68,.027,.668],76:[.686,.026,.666],77:[.692,.027,1.05],78:[.686,.025,.832],79:[.729,.027,.827],80:[.692,.218,.828],81:[.729,.069,.827],82:[.686,.026,.828],83:[.692,.027,.829],84:[.701,.027,.669],85:[.697,.027,.646],86:[.686,.026,.831],87:[.686,.027,1.046],88:[.688,.027,.719],89:[.686,.218,.833],90:[.729,.139,.602],91:[.74,.13,.278],93:[.738,.131,.278],94:[.734,-.452,.5],97:[.47,.035,.5],98:[.685,.031,.513],99:[.466,.029,.389],100:[.609,.033,.499],101:[.467,.03,.401],102:[.681,.221,.326],103:[.47,.209,.504],104:[.688,.205,.521],105:[.673,.02,.279],106:[.672,.208,.281],107:[.689,.025,.389],108:[.685,.02,.28],109:[.475,.026,.767],110:[.475,.022,.527],111:[.48,.028,.489],112:[.541,.212,.5],113:[.479,.219,.489],114:[.474,.021,.389],115:[.478,.029,.443],116:[.64,.02,.333],117:[.474,.023,.517],118:[.53,.028,.512],119:[.532,.028,.774],120:[.472,.188,.389],121:[.528,.218,.499],122:[.471,.214,.391],160:[0,0,.25],913:[.716,0,.75],914:[.683,0,.708],917:[.68,0,.681],918:[.683,0,.611],919:[.683,0,.75],921:[.683,0,.361],922:[.683,0,.778],924:[.683,0,.917],925:[.683,0,.75],927:[.705,.022,.778],929:[.683,0,.681],930:[.705,.022,.778],932:[.677,0,.722],935:[.683,0,.75],978:[.705,0,.778],988:[.68,0,.653],8216:[.708,-.41,.215],8217:[.692,-.395,.215],8260:[.721,.182,.502],8460:[.666,.133,.72],8465:[.686,.026,.554],8476:[.686,.026,.828],8488:[.729,.139,.602],8493:[.685,.024,.613],58112:[.683,.032,.497],58113:[.616,.03,.498],58114:[.68,.215,.333],58115:[.679,.224,.329],58116:[.471,.214,.503],58117:[.686,.02,.333],58118:[.577,.021,.334],58119:[.475,.022,.501]}},function(t,e,r){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var n=r(1),i=r(187);e.italic=n.AddCSS(i.italic,{32:{c:" "},33:{c:"!"},35:{c:"#"},37:{c:"%"},38:{c:"&"},40:{c:"("},41:{c:")"},42:{c:"*"},43:{c:"+"},44:{c:","},45:{c:"-"},46:{c:"."},47:{c:"/"},48:{c:"0"},49:{c:"1"},50:{c:"2"},51:{c:"3"},52:{c:"4"},53:{c:"5"},54:{c:"6"},55:{c:"7"},56:{c:"8"},57:{c:"9"},58:{c:":"},59:{c:";"},61:{c:"="},63:{c:"?"},64:{c:"@"},65:{c:"A"},66:{c:"B"},67:{c:"C"},68:{c:"D"},69:{c:"E"},70:{c:"F"},71:{c:"G"},72:{c:"H"},73:{c:"I"},74:{c:"J"},75:{c:"K"},76:{c:"L"},77:{c:"M"},78:{c:"N"},79:{c:"O"},80:{c:"P"},81:{c:"Q"},82:{c:"R"},83:{c:"S"},84:{c:"T"},85:{c:"U"},86:{c:"V"},87:{c:"W"},88:{c:"X"},89:{c:"Y"},90:{c:"Z"},91:{c:"["},93:{c:"]"},94:{c:"^"},95:{c:"_"},97:{c:"a"},98:{c:"b"},99:{c:"c"},100:{c:"d"},101:{c:"e"},102:{c:"f"},103:{c:"g"},104:{c:"h"},105:{c:"i"},106:{c:"j"},107:{c:"k"},108:{c:"l"},109:{c:"m"},110:{c:"n"},111:{c:"o"},112:{c:"p"},113:{c:"q"},114:{c:"r"},115:{c:"s"},116:{c:"t"},117:{c:"u"},118:{c:"v"},119:{c:"w"},120:{c:"x"},121:{c:"y"},122:{c:"z"},126:{c:"~"},913:{c:"A"},914:{c:"B"},917:{c:"E"},918:{c:"Z"},919:{c:"H"},921:{c:"I"},922:{c:"K"},924:{c:"M"},925:{c:"N"},927:{c:"O"},929:{c:"P"},930:{c:"\\398"},932:{c:"T"},935:{c:"X"},978:{c:"\\3A5"},988:{c:"F"},8213:{c:"\\2014"},8215:{c:"_"},8260:{c:"/"},8462:{c:"h",f:"I"},8710:{c:"\\394"},10744:{c:"/",f:"I"}})},function(t,e,r){"use strict";Object.defineProperty(e,"__esModule",{value:!0}),e.italic={32:[0,0,.25],33:[.716,0,.307,{ic:.073}],34:[.694,-.379,.514],35:[.694,.194,.818],37:[.75,.056,.818],38:[.716,.022,.767],39:[.694,-.379,.307,{ic:.07}],40:[.75,.25,.409,{ic:.108}],41:[.75,.25,.409],42:[.75,-.32,.511,{ic:.073}],43:[.557,.057,.767],44:[.121,.194,.307],45:[.251,-.18,.358],46:[.121,0,.307],47:[.716,.215,.778],48:[.665,.021,.511,{ic:.051}],49:[.666,0,.511],50:[.666,.022,.511],51:[.666,.022,.511,{ic:.051}],52:[.666,.194,.511],53:[.666,.022,.511,{ic:.056}],54:[.665,.022,.511,{ic:.054}],55:[.666,.022,.511,{ic:.123}],56:[.666,.021,.511],57:[.666,.022,.511],58:[.431,0,.307],59:[.431,.194,.307],61:[.367,-.133,.767],63:[.716,0,.511],64:[.705,.011,.767],65:[.716,0,.75,{sk:.139}],66:[.683,0,.759,{sk:.0833}],67:[.705,.022,.715,{sk:.0833}],68:[.683,0,.828,{sk:.0556}],69:[.68,0,.738,{sk:.0833}],70:[.68,0,.643,{ic:.106,sk:.0833}],71:[.705,.022,.786,{sk:.0833}],72:[.683,0,.831,{ic:.057,sk:.0556}],73:[.683,0,.44,{ic:.064,sk:.111}],74:[.683,.022,.555,{ic:.078,sk:.167}],75:[.683,0,.849,{sk:.0556}],76:[.683,0,.681,{sk:.0278}],77:[.683,0,.97,{ic:.081,sk:.0833}],78:[.683,0,.803,{ic:.085,sk:.0833}],79:[.704,.022,.763,{sk:.0833}],80:[.683,0,.642,{ic:.109,sk:.0833}],81:[.704,.194,.791,{sk:.0833}],82:[.683,.021,.759,{sk:.0833}],83:[.705,.022,.613,{sk:.0833}],84:[.677,0,.584,{ic:.12,sk:.0833}],85:[.683,.022,.683,{ic:.084,sk:.0278}],86:[.683,.022,.583,{ic:.186}],87:[.683,.022,.944,{ic:.104}],88:[.683,0,.828,{sk:.0833}],89:[.683,0,.581,{ic:.182}],90:[.683,0,.683,{sk:.0833}],91:[.75,.25,.307,{ic:.139}],93:[.75,.25,.307,{ic:.052}],94:[.694,-.527,.511],95:[-.025,.062,.511],97:[.441,.01,.529],98:[.694,.011,.429],99:[.442,.011,.433,{sk:.0556}],100:[.694,.01,.52,{sk:.167}],101:[.442,.011,.466,{sk:.0556}],102:[.705,.205,.49,{ic:.06,sk:.167}],103:[.442,.205,.477,{sk:.0278}],104:[.694,.011,.576,{sk:-.0278}],105:[.661,.011,.345],106:[.661,.204,.412],107:[.694,.011,.521],108:[.694,.011,.298,{sk:.0833}],109:[.442,.011,.878],110:[.442,.011,.6],111:[.441,.011,.485,{sk:.0556}],112:[.442,.194,.503,{sk:.0833}],113:[.442,.194,.446,{sk:.0833}],114:[.442,.011,.451,{sk:.0556}],115:[.442,.01,.469,{sk:.0556}],116:[.626,.011,.361,{sk:.0833}],117:[.442,.011,.572,{sk:.0278}],118:[.443,.011,.485,{sk:.0278}],119:[.443,.011,.716,{sk:.0833}],120:[.442,.011,.572,{sk:.0278}],121:[.442,.205,.49,{sk:.0556}],122:[.442,.011,.465,{sk:.0556}],126:[.318,-.208,.511,{ic:.06}],160:[0,0,.25],163:[.714,.011,.769],305:[.441,.01,.307],567:[.442,.204,.332],768:[.697,-.5,0],769:[.697,-.5,0],770:[.694,-.527,0],771:[.668,-.558,0,{ic:.06}],772:[.589,-.544,0,{ic:.054}],774:[.694,-.515,0,{ic:.062}],775:[.669,-.548,0],776:[.669,-.554,0],778:[.716,-.542,0],779:[.697,-.503,0,{ic:.065}],780:[.638,-.502,0],913:[.716,0,.75,{sk:.139}],914:[.683,0,.759,{sk:.0833}],915:[.68,0,.615,{ic:.106,sk:.0833}],916:[.716,0,.833,{sk:.167}],917:[.68,0,.738,{sk:.0833}],918:[.683,0,.683,{sk:.0833}],919:[.683,0,.831,{ic:.057,sk:.0556}],920:[.704,.022,.763,{sk:.0833}],921:[.683,0,.44,{ic:.064,sk:.111}],922:[.683,0,.849,{sk:.0556}],923:[.716,0,.694,{sk:.167}],924:[.683,0,.97,{ic:.081,sk:.0833}],925:[.683,0,.803,{ic:.085,sk:.0833}],926:[.677,0,.742,{sk:.0833}],927:[.704,.022,.763,{sk:.0833}],928:[.68,0,.831,{ic:.056,sk:.0556}],929:[.683,0,.642,{ic:.109,sk:.0833}],930:[.704,.022,.763,{sk:.0833}],931:[.683,0,.78,{sk:.0833}],932:[.677,0,.584,{ic:.12,sk:.0833}],933:[.705,0,.583,{ic:.117,sk:.0556}],934:[.683,0,.667,{sk:.0833}],935:[.683,0,.828,{sk:.0833}],936:[.683,0,.612,{ic:.08,sk:.0556}],937:[.704,0,.772,{sk:.0833}],945:[.442,.011,.64,{sk:.0278}],946:[.705,.194,.566,{sk:.0833}],947:[.441,.216,.518],948:[.717,.01,.444,{sk:.0556}],949:[.452,.022,.466,{sk:.0833}],950:[.704,.204,.438,{sk:.0833}],951:[.442,.216,.497,{sk:.0556}],952:[.705,.01,.469,{sk:.0833}],953:[.442,.01,.354,{sk:.0556}],954:[.442,.011,.576],955:[.694,.012,.583],956:[.442,.216,.603,{sk:.0278}],957:[.442,0,.494,{sk:.0278}],958:[.704,.205,.438,{sk:.111}],959:[.441,.011,.485,{sk:.0556}],960:[.431,.011,.57],961:[.442,.216,.517,{sk:.0833}],962:[.442,.107,.363,{sk:.0833}],963:[.431,.011,.571],964:[.431,.013,.437,{ic:.08,sk:.0278}],965:[.443,.01,.54,{sk:.0278}],966:[.442,.218,.654,{sk:.0833}],967:[.442,.204,.626,{sk:.0556}],968:[.694,.205,.651,{sk:.111}],969:[.443,.011,.622],977:[.705,.011,.591,{sk:.0833}],978:[.705,0,.583,{ic:.117,sk:.0556}],981:[.694,.205,.596,{sk:.0833}],982:[.431,.01,.828],988:[.68,0,.643,{ic:.106,sk:.0833}],1009:[.442,.194,.517,{sk:.0833}],1013:[.431,.011,.406,{sk:.0556}],8211:[.285,-.248,.511],8212:[.285,-.248,1.022],8213:[.285,-.248,1.022],8215:[-.025,.062,.511],8216:[.694,-.379,.307,{ic:.055}],8217:[.694,-.379,.307,{ic:.07}],8220:[.694,-.379,.514,{ic:.092}],8221:[.694,-.379,.514],8260:[.716,.215,.778],8462:[.694,.011,.576,{sk:-.0278}],8463:[.695,.013,.54],8710:[.716,0,.833,{sk:.167}],10744:[.716,.215,.778]}},function(t,e,r){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var n=r(1),i=r(189);e.largeop=n.AddCSS(i.largeop,{32:{c:" "},40:{c:"("},41:{c:")"},47:{c:"/"},91:{c:"["},93:{c:"]"},123:{c:"{"},125:{c:"}"},8260:{c:"/"},9001:{c:"\\27E8"},9002:{c:"\\27E9"},10072:{c:"\\2223"},10764:{c:"\\222C\\222C"},12296:{c:"\\27E8"},12297:{c:"\\27E9"}})},function(t,e,r){"use strict";Object.defineProperty(e,"__esModule",{value:!0}),e.largeop={32:[0,0,.25],40:[1.15,.649,.597],41:[1.15,.649,.597],47:[1.15,.649,.811],91:[1.15,.649,.472],92:[1.15,.649,.811],93:[1.15,.649,.472],123:[1.15,.649,.667],125:[1.15,.649,.667],160:[0,0,.25],710:[.772,-.565,1],732:[.75,-.611,1],770:[.772,-.565,0],771:[.75,-.611,0],8214:[.602,0,.778],8260:[1.15,.649,.811],8593:[.6,0,.667],8595:[.6,0,.667],8657:[.599,0,.778],8659:[.6,0,.778],8719:[.95,.45,1.278],8720:[.95,.45,1.278],8721:[.95,.45,1.444],8730:[1.15,.65,1],8739:[.627,.015,.333],8741:[.627,.015,.556],8747:[1.36,.862,.556,{ic:.388}],8748:[1.36,.862,1.084,{ic:.388}],8749:[1.36,.862,1.592,{ic:.388}],8750:[1.36,.862,.556,{ic:.388}],8896:[.95,.45,1.111],8897:[.95,.45,1.111],8898:[.949,.45,1.111],8899:[.95,.449,1.111],8968:[1.15,.649,.528],8969:[1.15,.649,.528],8970:[1.15,.649,.528],8971:[1.15,.649,.528],9001:[1.15,.649,.611],9002:[1.15,.649,.611],9168:[.602,0,.667],10072:[.627,.015,.333],10216:[1.15,.649,.611],10217:[1.15,.649,.611],10752:[.949,.449,1.511],10753:[.949,.449,1.511],10754:[.949,.449,1.511],10756:[.95,.449,1.111],10758:[.95,.45,1.111],10764:[1.36,.862,2.168,{ic:.388}],12296:[1.15,.649,.611],12297:[1.15,.649,.611]}},function(t,e,r){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var n=r(1),i=r(191);e.monospace=n.AddCSS(i.monospace,{32:{c:" "},33:{c:"!"},35:{c:"#"},36:{c:"$"},37:{c:"%"},38:{c:"&"},40:{c:"("},41:{c:")"},42:{c:"*"},43:{c:"+"},44:{c:","},45:{c:"-"},46:{c:"."},47:{c:"/"},48:{c:"0"},49:{c:"1"},50:{c:"2"},51:{c:"3"},52:{c:"4"},53:{c:"5"},54:{c:"6"},55:{c:"7"},56:{c:"8"},57:{c:"9"},58:{c:":"},59:{c:";"},60:{c:"<"},61:{c:"="},62:{c:">"},63:{c:"?"},64:{c:"@"},65:{c:"A"},66:{c:"B"},67:{c:"C"},68:{c:"D"},69:{c:"E"},70:{c:"F"},71:{c:"G"},72:{c:"H"},73:{c:"I"},74:{c:"J"},75:{c:"K"},76:{c:"L"},77:{c:"M"},78:{c:"N"},79:{c:"O"},80:{c:"P"},81:{c:"Q"},82:{c:"R"},83:{c:"S"},84:{c:"T"},85:{c:"U"},86:{c:"V"},87:{c:"W"},88:{c:"X"},89:{c:"Y"},90:{c:"Z"},91:{c:"["},93:{c:"]"},94:{c:"^"},95:{c:"_"},96:{c:"`"},97:{c:"a"},98:{c:"b"},99:{c:"c"},100:{c:"d"},101:{c:"e"},102:{c:"f"},103:{c:"g"},104:{c:"h"},105:{c:"i"},106:{c:"j"},107:{c:"k"},108:{c:"l"},109:{c:"m"},110:{c:"n"},111:{c:"o"},112:{c:"p"},113:{c:"q"},114:{c:"r"},115:{c:"s"},116:{c:"t"},117:{c:"u"},118:{c:"v"},119:{c:"w"},120:{c:"x"},121:{c:"y"},122:{c:"z"},123:{c:"{"},124:{c:"|"},125:{c:"}"},126:{c:"~"},697:{c:"\\2032"},913:{c:"A"},914:{c:"B"},917:{c:"E"},918:{c:"Z"},919:{c:"H"},921:{c:"I"},922:{c:"K"},924:{c:"M"},925:{c:"N"},927:{c:"O"},929:{c:"P"},930:{c:"\\398"},932:{c:"T"},935:{c:"X"},978:{c:"\\3A5"},988:{c:"F"},8215:{c:"_"},8243:{c:"\\2032\\2032"},8244:{c:"\\2032\\2032\\2032"},8260:{c:"/"},8279:{c:"\\2032\\2032\\2032\\2032"},8710:{c:"\\394"}})},function(t,e,r){"use strict";Object.defineProperty(e,"__esModule",{value:!0}),e.monospace={32:[0,0,.525],33:[.622,0,.525],34:[.623,-.333,.525],35:[.611,0,.525],36:[.694,.082,.525],37:[.694,.083,.525],38:[.622,.011,.525],39:[.611,-.287,.525],40:[.694,.082,.525],41:[.694,.082,.525],42:[.52,-.09,.525],43:[.531,-.081,.525],44:[.14,.139,.525],45:[.341,-.271,.525],46:[.14,0,.525],47:[.694,.083,.525],48:[.621,.01,.525],49:[.622,0,.525],50:[.622,0,.525],51:[.622,.011,.525],52:[.624,0,.525],53:[.611,.01,.525],54:[.622,.011,.525],55:[.627,.01,.525],56:[.621,.01,.525],57:[.622,.011,.525],58:[.431,0,.525],59:[.431,.139,.525],60:[.557,-.055,.525],61:[.417,-.195,.525],62:[.557,-.055,.525],63:[.617,0,.525],64:[.617,.006,.525],65:[.623,0,.525],66:[.611,0,.525],67:[.622,.011,.525],68:[.611,0,.525],69:[.611,0,.525],70:[.611,0,.525],71:[.622,.011,.525],72:[.611,0,.525],73:[.611,0,.525],74:[.611,.011,.525],75:[.611,0,.525],76:[.611,0,.525],77:[.611,0,.525],78:[.611,0,.525],79:[.621,.01,.525],80:[.611,0,.525],81:[.621,.138,.525],82:[.611,.011,.525],83:[.622,.011,.525],84:[.611,0,.525],85:[.611,.011,.525],86:[.611,.007,.525],87:[.611,.007,.525],88:[.611,0,.525],89:[.611,0,.525],90:[.611,0,.525],91:[.694,.082,.525],92:[.694,.083,.525],93:[.694,.082,.525],94:[.611,-.46,.525],95:[-.025,.095,.525],96:[.681,-.357,.525],97:[.439,.006,.525],98:[.611,.006,.525],99:[.44,.006,.525],100:[.611,.006,.525],101:[.44,.006,.525],102:[.617,0,.525],103:[.442,.229,.525],104:[.611,0,.525],105:[.612,0,.525],106:[.612,.228,.525],107:[.611,0,.525],108:[.611,0,.525],109:[.436,0,.525],110:[.436,0,.525],111:[.44,.006,.525],112:[.437,.221,.525],113:[.437,.221,.525],114:[.437,0,.525],115:[.44,.006,.525],116:[.554,.006,.525],117:[.431,.005,.525],118:[.431,0,.525],119:[.431,0,.525],120:[.431,0,.525],121:[.431,.228,.525],122:[.431,0,.525],123:[.694,.083,.525],124:[.694,.082,.525],125:[.694,.083,.525],126:[.611,-.466,.525],127:[.612,-.519,.525],160:[0,0,.525],305:[.431,0,.525],567:[.431,.228,.525],697:[.623,-.334,.525],768:[.611,-.485,0],769:[.611,-.485,0],770:[.611,-.46,0],771:[.611,-.466,0],772:[.577,-.5,0],774:[.611,-.504,0],776:[.612,-.519,0],778:[.619,-.499,0],780:[.577,-.449,0],913:[.623,0,.525],914:[.611,0,.525],915:[.611,0,.525],916:[.623,0,.525],917:[.611,0,.525],918:[.611,0,.525],919:[.611,0,.525],920:[.621,.01,.525],921:[.611,0,.525],922:[.611,0,.525],923:[.623,0,.525],924:[.611,0,.525],925:[.611,0,.525],926:[.611,0,.525],927:[.621,.01,.525],928:[.611,0,.525],929:[.611,0,.525],930:[.621,.01,.525],931:[.611,0,.525],932:[.611,0,.525],933:[.622,0,.525],934:[.611,0,.525],935:[.611,0,.525],936:[.611,0,.525],937:[.622,0,.525],978:[.622,0,.525],988:[.611,0,.525],8215:[-.025,.095,.525],8242:[.623,-.334,.525],8243:[.623,0,1.05],8244:[.623,0,1.575],8260:[.694,.083,.525],8279:[.623,0,2.1],8710:[.623,0,.525]}},function(t,e,r){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var n=r(1),i=r(193);e.normal=n.AddCSS(i.normal,{32:{c:" "},33:{c:"!"},35:{c:"#"},36:{c:"$"},37:{c:"%"},38:{c:"&"},40:{c:"("},41:{c:")"},42:{c:"*"},43:{c:"+"},44:{c:","},45:{c:"-"},46:{c:"."},47:{c:"/"},48:{c:"0"},49:{c:"1"},50:{c:"2"},51:{c:"3"},52:{c:"4"},53:{c:"5"},54:{c:"6"},55:{c:"7"},56:{c:"8"},57:{c:"9"},58:{c:":"},59:{c:";"},60:{c:"<"},61:{c:"="},62:{c:">"},63:{c:"?"},64:{c:"@"},65:{c:"A"},66:{c:"B"},67:{c:"C"},68:{c:"D"},69:{c:"E"},70:{c:"F"},71:{c:"G"},72:{c:"H"},73:{c:"I"},74:{c:"J"},75:{c:"K"},76:{c:"L"},77:{c:"M"},78:{c:"N"},79:{c:"O"},80:{c:"P"},81:{c:"Q"},82:{c:"R"},83:{c:"S"},84:{c:"T"},85:{c:"U"},86:{c:"V"},87:{c:"W"},88:{c:"X"},89:{c:"Y"},90:{c:"Z"},91:{c:"["},93:{c:"]"},94:{c:"^"},95:{c:"_"},96:{c:"`"},97:{c:"a"},98:{c:"b"},99:{c:"c"},100:{c:"d"},101:{c:"e"},102:{c:"f"},103:{c:"g"},104:{c:"h"},105:{c:"i"},106:{c:"j"},107:{c:"k"},108:{c:"l"},109:{c:"m"},110:{c:"n"},111:{c:"o"},112:{c:"p"},113:{c:"q"},114:{c:"r"},115:{c:"s"},116:{c:"t"},117:{c:"u"},118:{c:"v"},119:{c:"w"},120:{c:"x"},121:{c:"y"},122:{c:"z"},123:{c:"{"},124:{c:"|"},125:{c:"}"},126:{c:"~"},163:{f:"MI"},183:{c:"\\22C5"},697:{c:"\\2032"},913:{c:"A"},914:{c:"B"},917:{c:"E"},918:{c:"Z"},919:{c:"H"},921:{c:"I"},922:{c:"K"},924:{c:"M"},925:{c:"N"},927:{c:"O"},929:{c:"P"},930:{c:"\\398"},932:{c:"T"},935:{c:"X"},978:{c:"\\3A5"},988:{c:"F"},1014:{c:"\\220D"},8192:{c:""},8193:{c:""},8194:{c:""},8195:{c:""},8196:{c:""},8197:{c:""},8198:{c:""},8201:{c:""},8202:{c:""},8203:{c:""},8204:{c:""},8213:{c:"\\2014"},8214:{c:"\\2225"},8215:{c:"_"},8226:{c:"\\2219"},8243:{c:"\\2032\\2032"},8244:{c:"\\2032\\2032\\2032"},8246:{c:"\\2035\\2035"},8247:{c:"\\2035\\2035\\2035"},8254:{c:"\\2C9"},8260:{c:"/"},8279:{c:"\\2032\\2032\\2032\\2032"},8289:{c:""},8290:{c:""},8291:{c:""},8292:{c:""},8407:{c:"\\2192",f:"V"},8450:{c:"C",f:"A"},8459:{c:"H",f:"SC"},8460:{c:"H",f:"FR"},8461:{c:"H",f:"A"},8462:{c:"h",f:"I"},8463:{f:""},8464:{c:"J",f:"SC"},8465:{c:"I",f:"FR"},8466:{c:"L",f:"SC"},8469:{c:"N",f:"A"},8473:{c:"P",f:"A"},8474:{c:"Q",f:"A"},8475:{c:"R",f:"SC"},8476:{c:"R",f:"FR"},8477:{c:"R",f:"A"},8484:{c:"Z",f:"A"},8486:{c:"\\3A9",f:""},8488:{c:"Z",f:"FR"},8492:{c:"B",f:"SC"},8493:{c:"C",f:"FR"},8496:{c:"E",f:"SC"},8497:{c:"F",f:"SC"},8499:{c:"M",f:"SC"},8708:{c:"\\2203\\338"},8710:{c:"\\394"},8716:{c:"\\220B\\338"},8726:{f:""},8772:{c:"\\2243\\338"},8775:{c:"\\2246"},8777:{c:"\\2248\\338"},8802:{c:"\\2261\\338"},8813:{c:"\\224D\\338"},8820:{c:"\\2272\\338"},8821:{c:"\\2273\\338"},8824:{c:"\\2276\\338"},8825:{c:"\\2277\\338"},8836:{c:"\\2282\\338"},8837:{c:"\\2283\\338"},8930:{c:"\\2291\\338"},8931:{c:"\\2292\\338"},8965:{c:"\\22BC"},8966:{c:"\\2A5E"},8988:{c:"\\250C"},8989:{c:"\\2510"},8990:{c:"\\2514"},8991:{c:"\\2518"},9001:{c:"\\27E8"},9002:{c:"\\27E9"},9642:{c:"\\25A0"},9652:{c:"\\25B2"},9653:{c:"\\25B3"},9656:{c:"\\25B6"},9662:{c:"\\25BC"},9663:{c:"\\25BD"},9666:{c:"\\25C0"},9723:{c:"\\25A1"},9724:{c:"\\25A0"},10072:{c:"\\2223"},10744:{c:"/",f:"I"},10764:{c:"\\222C\\222C"},10799:{c:"\\D7"},12296:{c:"\\27E8"},12297:{c:"\\27E9"},119808:{c:"A",f:"B"},119809:{c:"B",f:"B"},119810:{c:"C",f:"B"},119811:{c:"D",f:"B"},119812:{c:"E",f:"B"},119813:{c:"F",f:"B"},119814:{c:"G",f:"B"},119815:{c:"H",f:"B"},119816:{c:"I",f:"B"},119817:{c:"J",f:"B"},119818:{c:"K",f:"B"},119819:{c:"L",f:"B"},119820:{c:"M",f:"B"},119821:{c:"N",f:"B"},119822:{c:"O",f:"B"},119823:{c:"P",f:"B"},119824:{c:"Q",f:"B"},119825:{c:"R",f:"B"},119826:{c:"S",f:"B"},119827:{c:"T",f:"B"},119828:{c:"U",f:"B"},119829:{c:"V",f:"B"},119830:{c:"W",f:"B"},119831:{c:"X",f:"B"},119832:{c:"Y",f:"B"},119833:{c:"Z",f:"B"},119834:{c:"a",f:"B"},119835:{c:"b",f:"B"},119836:{c:"c",f:"B"},119837:{c:"d",f:"B"},119838:{c:"e",f:"B"},119839:{c:"f",f:"B"},119840:{c:"g",f:"B"},119841:{c:"h",f:"B"},119842:{c:"i",f:"B"},119843:{c:"j",f:"B"},119844:{c:"k",f:"B"},119845:{c:"l",f:"B"},119846:{c:"m",f:"B"},119847:{c:"n",f:"B"},119848:{c:"o",f:"B"},119849:{c:"p",f:"B"},119850:{c:"q",f:"B"},119851:{c:"r",f:"B"},119852:{c:"s",f:"B"},119853:{c:"t",f:"B"},119854:{c:"u",f:"B"},119855:{c:"v",f:"B"},119856:{c:"w",f:"B"},119857:{c:"x",f:"B"},119858:{c:"y",f:"B"},119859:{c:"z",f:"B"},119860:{c:"A",f:"I"},119861:{c:"B",f:"I"},119862:{c:"C",f:"I"},119863:{c:"D",f:"I"},119864:{c:"E",f:"I"},119865:{c:"F",f:"I"},119866:{c:"G",f:"I"},119867:{c:"H",f:"I"},119868:{c:"I",f:"I"},119869:{c:"J",f:"I"},119870:{c:"K",f:"I"},119871:{c:"L",f:"I"},119872:{c:"M",f:"I"},119873:{c:"N",f:"I"},119874:{c:"O",f:"I"},119875:{c:"P",f:"I"},119876:{c:"Q",f:"I"},119877:{c:"R",f:"I"},119878:{c:"S",f:"I"},119879:{c:"T",f:"I"},119880:{c:"U",f:"I"},119881:{c:"V",f:"I"},119882:{c:"W",f:"I"},119883:{c:"X",f:"I"},119884:{c:"Y",f:"I"},119885:{c:"Z",f:"I"},119886:{c:"a",f:"I"},119887:{c:"b",f:"I"},119888:{c:"c",f:"I"},119889:{c:"d",f:"I"},119890:{c:"e",f:"I"},119891:{c:"f",f:"I"},119892:{c:"g",f:"I"},119893:{c:"h",f:"I"},119894:{c:"i",f:"I"},119895:{c:"j",f:"I"},119896:{c:"k",f:"I"},119897:{c:"l",f:"I"},119898:{c:"m",f:"I"},119899:{c:"n",f:"I"},119900:{c:"o",f:"I"},119901:{c:"p",f:"I"},119902:{c:"q",f:"I"},119903:{c:"r",f:"I"},119904:{c:"s",f:"I"},119905:{c:"t",f:"I"},119906:{c:"u",f:"I"},119907:{c:"v",f:"I"},119908:{c:"w",f:"I"},119909:{c:"x",f:"I"},119910:{c:"y",f:"I"},119911:{c:"z",f:"I"},119912:{c:"A",f:"BI"},119913:{c:"B",f:"BI"},119914:{c:"C",f:"BI"},119915:{c:"D",f:"BI"},119916:{c:"E",f:"BI"},119917:{c:"F",f:"BI"},119918:{c:"G",f:"BI"},119919:{c:"H",f:"BI"},119920:{c:"I",f:"BI"},119921:{c:"J",f:"BI"},119922:{c:"K",f:"BI"},119923:{c:"L",f:"BI"},119924:{c:"M",f:"BI"},119925:{c:"N",f:"BI"},119926:{c:"O",f:"BI"},119927:{c:"P",f:"BI"},119928:{c:"Q",f:"BI"},119929:{c:"R",f:"BI"},119930:{c:"S",f:"BI"},119931:{c:"T",f:"BI"},119932:{c:"U",f:"BI"},119933:{c:"V",f:"BI"},119934:{c:"W",f:"BI"},119935:{c:"X",f:"BI"},119936:{c:"Y",f:"BI"},119937:{c:"Z",f:"BI"},119938:{c:"a",f:"BI"},119939:{c:"b",f:"BI"},119940:{c:"c",f:"BI"},119941:{c:"d",f:"BI"},119942:{c:"e",f:"BI"},119943:{c:"f",f:"BI"},119944:{c:"g",f:"BI"},119945:{c:"h",f:"BI"},119946:{c:"i",f:"BI"},119947:{c:"j",f:"BI"},119948:{c:"k",f:"BI"},119949:{c:"l",f:"BI"},119950:{c:"m",f:"BI"},119951:{c:"n",f:"BI"},119952:{c:"o",f:"BI"},119953:{c:"p",f:"BI"},119954:{c:"q",f:"BI"},119955:{c:"r",f:"BI"},119956:{c:"s",f:"BI"},119957:{c:"t",f:"BI"},119958:{c:"u",f:"BI"},119959:{c:"v",f:"BI"},119960:{c:"w",f:"BI"},119961:{c:"x",f:"BI"},119962:{c:"y",f:"BI"},119963:{c:"z",f:"BI"},119964:{c:"A",f:"SC"},119965:{c:"B",f:"SC"},119966:{c:"C",f:"SC"},119967:{c:"D",f:"SC"},119968:{c:"E",f:"SC"},119969:{c:"F",f:"SC"},119970:{c:"G",f:"SC"},119971:{c:"H",f:"SC"},119972:{c:"I",f:"SC"},119973:{c:"J",f:"SC"},119974:{c:"K",f:"SC"},119975:{c:"L",f:"SC"},119976:{c:"M",f:"SC"},119977:{c:"N",f:"SC"},119978:{c:"O",f:"SC"},119979:{c:"P",f:"SC"},119980:{c:"Q",f:"SC"},119981:{c:"R",f:"SC"},119982:{c:"S",f:"SC"},119983:{c:"T",f:"SC"},119984:{c:"U",f:"SC"},119985:{c:"V",f:"SC"},119986:{c:"W",f:"SC"},119987:{c:"X",f:"SC"},119988:{c:"Y",f:"SC"},119989:{c:"Z",f:"SC"},119990:{c:"a",f:"I"},119991:{c:"b",f:"I"},119992:{c:"c",f:"I"},119993:{c:"d",f:"I"},119994:{c:"e",f:"I"},119995:{c:"f",f:"I"},119996:{c:"g",f:"I"},119997:{c:"h",f:"I"},119998:{c:"i",f:"I"},119999:{c:"j",f:"I"},12e4:{c:"k",f:"I"},120001:{c:"l",f:"I"},120002:{c:"m",f:"I"},120003:{c:"n",f:"I"},120004:{c:"o",f:"I"},120005:{c:"p",f:"I"},120006:{c:"q",f:"I"},120007:{c:"r",f:"I"},120008:{c:"s",f:"I"},120009:{c:"t",f:"I"},120010:{c:"u",f:"I"},120011:{c:"v",f:"I"},120012:{c:"w",f:"I"},120013:{c:"x",f:"I"},120014:{c:"y",f:"I"},120015:{c:"z",f:"I"},120016:{c:"A",f:"SC"},120017:{c:"B",f:"SC"},120018:{c:"C",f:"SC"},120019:{c:"D",f:"SC"},120020:{c:"E",f:"SC"},120021:{c:"F",f:"SC"},120022:{c:"G",f:"SC"},120023:{c:"H",f:"SC"},120024:{c:"I",f:"SC"},120025:{c:"J",f:"SC"},120026:{c:"K",f:"SC"},120027:{c:"L",f:"SC"},120028:{c:"M",f:"SC"},120029:{c:"N",f:"SC"},120030:{c:"O",f:"SC"},120031:{c:"P",f:"SC"},120032:{c:"Q",f:"SC"},120033:{c:"R",f:"SC"},120034:{c:"S",f:"SC"},120035:{c:"T",f:"SC"},120036:{c:"U",f:"SC"},120037:{c:"V",f:"SC"},120038:{c:"W",f:"SC"},120039:{c:"X",f:"SC"},120040:{c:"Y",f:"SC"},120041:{c:"Z",f:"SC"},120042:{c:"a",f:"BI"},120043:{c:"b",f:"BI"},120044:{c:"c",f:"BI"},120045:{c:"d",f:"BI"},120046:{c:"e",f:"BI"},120047:{c:"f",f:"BI"},120048:{c:"g",f:"BI"},120049:{c:"h",f:"BI"},120050:{c:"i",f:"BI"},120051:{c:"j",f:"BI"},120052:{c:"k",f:"BI"},120053:{c:"l",f:"BI"},120054:{c:"m",f:"BI"},120055:{c:"n",f:"BI"},120056:{c:"o",f:"BI"},120057:{c:"p",f:"BI"},120058:{c:"q",f:"BI"},120059:{c:"r",f:"BI"},120060:{c:"s",f:"BI"},120061:{c:"t",f:"BI"},120062:{c:"u",f:"BI"},120063:{c:"v",f:"BI"},120064:{c:"w",f:"BI"},120065:{c:"x",f:"BI"},120066:{c:"y",f:"BI"},120067:{c:"z",f:"BI"},120068:{c:"A",f:"FR"},120069:{c:"B",f:"FR"},120070:{c:"C",f:"FR"},120071:{c:"D",f:"FR"},120072:{c:"E",f:"FR"},120073:{c:"F",f:"FR"},120074:{c:"G",f:"FR"},120075:{c:"H",f:"FR"},120076:{c:"I",f:"FR"},120077:{c:"J",f:"FR"},120078:{c:"K",f:"FR"},120079:{c:"L",f:"FR"},120080:{c:"M",f:"FR"},120081:{c:"N",f:"FR"},120082:{c:"O",f:"FR"},120083:{c:"P",f:"FR"},120084:{c:"Q",f:"FR"},120085:{c:"R",f:"FR"},120086:{c:"S",f:"FR"},120087:{c:"T",f:"FR"},120088:{c:"U",f:"FR"},120089:{c:"V",f:"FR"},120090:{c:"W",f:"FR"},120091:{c:"X",f:"FR"},120092:{c:"Y",f:"FR"},120093:{c:"Z",f:"FR"},120094:{c:"a",f:"FR"},120095:{c:"b",f:"FR"},120096:{c:"c",f:"FR"},120097:{c:"d",f:"FR"},120098:{c:"e",f:"FR"},120099:{c:"f",f:"FR"},120100:{c:"g",f:"FR"},120101:{c:"h",f:"FR"},120102:{c:"i",f:"FR"},120103:{c:"j",f:"FR"},120104:{c:"k",f:"FR"},120105:{c:"l",f:"FR"},120106:{c:"m",f:"FR"},120107:{c:"n",f:"FR"},120108:{c:"o",f:"FR"},120109:{c:"p",f:"FR"},120110:{c:"q",f:"FR"},120111:{c:"r",f:"FR"},120112:{c:"s",f:"FR"},120113:{c:"t",f:"FR"},120114:{c:"u",f:"FR"},120115:{c:"v",f:"FR"},120116:{c:"w",f:"FR"},120117:{c:"x",f:"FR"},120118:{c:"y",f:"FR"},120119:{c:"z",f:"FR"},120120:{c:"A",f:"A"},120121:{c:"B",f:"A"},120122:{c:"C",f:"A"},120123:{c:"D",f:"A"},120124:{c:"E",f:"A"},120125:{c:"F",f:"A"},120126:{c:"G",f:"A"},120127:{c:"H",f:"A"},120128:{c:"I",f:"A"},120129:{c:"J",f:"A"},120130:{c:"K",f:"A"},120131:{c:"L",f:"A"},120132:{c:"M",f:"A"},120133:{c:"N",f:"A"},120134:{c:"O",f:"A"},120135:{c:"P",f:"A"},120136:{c:"Q",f:"A"},120137:{c:"R",f:"A"},120138:{c:"S",f:"A"},120139:{c:"T",f:"A"},120140:{c:"U",f:"A"},120141:{c:"V",f:"A"},120142:{c:"W",f:"A"},120143:{c:"X",f:"A"},120144:{c:"Y",f:"A"},120145:{c:"Z",f:"A"},120146:{c:"a",f:"B"},120147:{c:"b",f:"B"},120148:{c:"c",f:"B"},120149:{c:"d",f:"B"},120150:{c:"e",f:"B"},120151:{c:"f",f:"B"},120152:{c:"g",f:"B"},120153:{c:"h",f:"B"},120154:{c:"i",f:"B"},120155:{c:"j",f:"B"},120156:{c:"k",f:"A"},120157:{c:"l",f:"B"},120158:{c:"m",f:"B"},120159:{c:"n",f:"B"},120160:{c:"o",f:"B"},120161:{c:"p",f:"B"},120162:{c:"q",f:"B"},120163:{c:"r",f:"B"},120164:{c:"s",f:"B"},120165:{c:"t",f:"B"},120166:{c:"u",f:"B"},120167:{c:"v",f:"B"},120168:{c:"w",f:"B"},120169:{c:"x",f:"B"},120170:{c:"y",f:"B"},120171:{c:"z",f:"B"},120172:{c:"A",f:"FR-B"},120173:{c:"B",f:"FR-B"},120174:{c:"C",f:"FR-B"},120175:{c:"D",f:"FR-B"},120176:{c:"E",f:"FR-B"},120177:{c:"F",f:"FR-B"},120178:{c:"G",f:"FR-B"},120179:{c:"H",f:"FR-B"},120180:{c:"I",f:"FR-B"},120181:{c:"J",f:"FR-B"},120182:{c:"K",f:"FR-B"},120183:{c:"L",f:"FR-B"},120184:{c:"M",f:"FR-B"},120185:{c:"N",f:"FR-B"},120186:{c:"O",f:"FR-B"},120187:{c:"P",f:"FR-B"},120188:{c:"Q",f:"FR-B"},120189:{c:"R",f:"FR-B"},120190:{c:"S",f:"FR-B"},120191:{c:"T",f:"FR-B"},120192:{c:"U",f:"FR-B"},120193:{c:"V",f:"FR-B"},120194:{c:"W",f:"FR-B"},120195:{c:"X",f:"FR-B"},120196:{c:"Y",f:"FR-B"},120197:{c:"Z",f:"FR-B"},120198:{c:"a",f:"FR-B"},120199:{c:"b",f:"FR-B"},120200:{c:"c",f:"FR-B"},120201:{c:"d",f:"FR-B"},120202:{c:"e",f:"FR-B"},120203:{c:"f",f:"FR-B"},120204:{c:"g",f:"FR-B"},120205:{c:"h",f:"FR-B"},120206:{c:"i",f:"FR-B"},120207:{c:"j",f:"FR-B"},120208:{c:"k",f:"FR-B"},120209:{c:"l",f:"FR-B"},120210:{c:"m",f:"FR-B"},120211:{c:"n",f:"FR-B"},120212:{c:"o",f:"FR-B"},120213:{c:"p",f:"FR-B"},120214:{c:"q",f:"FR-B"},120215:{c:"r",f:"FR-B"},120216:{c:"s",f:"FR-B"},120217:{c:"t",f:"FR-B"},120218:{c:"u",f:"FR-B"},120219:{c:"v",f:"FR-B"},120220:{c:"w",f:"FR-B"},120221:{c:"x",f:"FR-B"},120222:{c:"y",f:"FR-B"},120223:{c:"z",f:"FR-B"},120224:{c:"A",f:"SS"},120225:{c:"B",f:"SS"},120226:{c:"C",f:"SS"},120227:{c:"D",f:"SS"},120228:{c:"E",f:"SS"},120229:{c:"F",f:"SS"},120230:{c:"G",f:"SS"},120231:{c:"H",f:"SS"},120232:{c:"I",f:"SS"},120233:{c:"J",f:"SS"},120234:{c:"K",f:"SS"},120235:{c:"L",f:"SS"},120236:{c:"M",f:"SS"},120237:{c:"N",f:"SS"},120238:{c:"O",f:"SS"},120239:{c:"P",f:"SS"},120240:{c:"Q",f:"SS"},120241:{c:"R",f:"SS"},120242:{c:"S",f:"SS"},120243:{c:"T",f:"SS"},120244:{c:"U",f:"SS"},120245:{c:"V",f:"SS"},120246:{c:"W",f:"SS"},120247:{c:"X",f:"SS"},120248:{c:"Y",f:"SS"},120249:{c:"Z",f:"SS"},120250:{c:"a",f:"SS"},120251:{c:"b",f:"SS"},120252:{c:"c",f:"SS"},120253:{c:"d",f:"SS"},120254:{c:"e",f:"SS"},120255:{c:"f",f:"SS"},120256:{c:"g",f:"SS"},120257:{c:"h",f:"SS"},120258:{c:"i",f:"SS"},120259:{c:"j",f:"SS"},120260:{c:"k",f:"SS"},120261:{c:"l",f:"SS"},120262:{c:"m",f:"SS"},120263:{c:"n",f:"SS"},120264:{c:"o",f:"SS"},120265:{c:"p",f:"SS"},120266:{c:"q",f:"SS"},120267:{c:"r",f:"SS"},120268:{c:"s",f:"SS"},120269:{c:"t",f:"SS"},120270:{c:"u",f:"SS"},120271:{c:"v",f:"SS"},120272:{c:"w",f:"SS"},120273:{c:"x",f:"SS"},120274:{c:"y",f:"SS"},120275:{c:"z",f:"SS"},120276:{c:"A",f:"SS-B"},120277:{c:"B",f:"SS-B"},120278:{c:"C",f:"SS-B"},120279:{c:"D",f:"SS-B"},120280:{c:"E",f:"SS-B"},120281:{c:"F",f:"SS-B"},120282:{c:"G",f:"SS-B"},120283:{c:"H",f:"SS-B"},120284:{c:"I",f:"SS-B"},120285:{c:"J",f:"SS-B"},120286:{c:"K",f:"SS-B"},120287:{c:"L",f:"SS-B"},120288:{c:"M",f:"SS-B"},120289:{c:"N",f:"SS-B"},120290:{c:"O",f:"SS-B"},120291:{c:"P",f:"SS-B"},120292:{c:"Q",f:"SS-B"},120293:{c:"R",f:"SS-B"},120294:{c:"S",f:"SS-B"},120295:{c:"T",f:"SS-B"},120296:{c:"U",f:"SS-B"},120297:{c:"V",f:"SS-B"},120298:{c:"W",f:"SS-B"},120299:{c:"X",f:"SS-B"},120300:{c:"Y",f:"SS-B"},120301:{c:"Z",f:"SS-B"},120302:{c:"a",f:"SS-B"},120303:{c:"b",f:"SS-B"},120304:{c:"c",f:"SS-B"},120305:{c:"d",f:"SS-B"},120306:{c:"e",f:"SS-B"},120307:{c:"f",f:"SS-B"},120308:{c:"g",f:"SS-B"},120309:{c:"h",f:"SS-B"},120310:{c:"i",f:"SS-B"},120311:{c:"j",f:"SS-B"},120312:{c:"k",f:"SS-B"},120313:{c:"l",f:"SS-B"},120314:{c:"m",f:"SS-B"},120315:{c:"n",f:"SS-B"},120316:{c:"o",f:"SS-B"},120317:{c:"p",f:"SS-B"},120318:{c:"q",f:"SS-B"},120319:{c:"r",f:"SS-B"},120320:{c:"s",f:"SS-B"},120321:{c:"t",f:"SS-B"},120322:{c:"u",f:"SS-B"},120323:{c:"v",f:"SS-B"},120324:{c:"w",f:"SS-B"},120325:{c:"x",f:"SS-B"},120326:{c:"y",f:"SS-B"},120327:{c:"z",f:"SS-B"},120328:{c:"A",f:"SS-I"},120329:{c:"B",f:"SS-I"},120330:{c:"C",f:"SS-I"},120331:{c:"D",f:"SS-I"},120332:{c:"E",f:"SS-I"},120333:{c:"F",f:"SS-I"},120334:{c:"G",f:"SS-I"},120335:{c:"H",f:"SS-I"},120336:{c:"I",f:"SS-I"},120337:{c:"J",f:"SS-I"},120338:{c:"K",f:"SS-I"},120339:{c:"L",f:"SS-I"},120340:{c:"M",f:"SS-I"},120341:{c:"N",f:"SS-I"},120342:{c:"O",f:"SS-I"},120343:{c:"P",f:"SS-I"},120344:{c:"Q",f:"SS-I"},120345:{c:"R",f:"SS-I"},120346:{c:"S",f:"SS-I"},120347:{c:"T",f:"SS-I"},120348:{c:"U",f:"SS-I"},120349:{c:"V",f:"SS-I"},120350:{c:"W",f:"SS-I"},120351:{c:"X",f:"SS-I"},120352:{c:"Y",f:"SS-I"},120353:{c:"Z",f:"SS-I"},120354:{c:"a",f:"SS-I"},120355:{c:"b",f:"SS-I"},120356:{c:"c",f:"SS-I"},120357:{c:"d",f:"SS-I"},120358:{c:"e",f:"SS-I"},120359:{c:"f",f:"SS-I"},120360:{c:"g",f:"SS-I"},120361:{c:"h",f:"SS-I"},120362:{c:"i",f:"SS-I"},120363:{c:"j",f:"SS-I"},120364:{c:"k",f:"SS-I"},120365:{c:"l",f:"SS-I"},120366:{c:"m",f:"SS-I"},120367:{c:"n",f:"SS-I"},120368:{c:"o",f:"SS-I"},120369:{c:"p",f:"SS-I"},120370:{c:"q",f:"SS-I"},120371:{c:"r",f:"SS-I"},120372:{c:"s",f:"SS-I"},120373:{c:"t",f:"SS-I"},120374:{c:"u",f:"SS-I"},120375:{c:"v",f:"SS-I"},120376:{c:"w",f:"SS-I"},120377:{c:"x",f:"SS-I"},120378:{c:"y",f:"SS-I"},120379:{c:"z",f:"SS-I"},120380:{c:"A",f:"SS-I"},120381:{c:"B",f:"SS-I"},120382:{c:"C",f:"SS-I"},120383:{c:"D",f:"SS-I"},120384:{c:"E",f:"SS-I"},120385:{c:"F",f:"SS-I"},120386:{c:"G",f:"SS-I"},120387:{c:"H",f:"SS-I"},120388:{c:"I",f:"SS-I"},120389:{c:"J",f:"SS-I"},120390:{c:"K",f:"SS-I"},120391:{c:"L",f:"SS-I"},120392:{c:"M",f:"SS-I"},120393:{c:"N",f:"SS-I"},120394:{c:"O",f:"SS-I"},120395:{c:"P",f:"SS-I"},120396:{c:"Q",f:"SS-I"},120397:{c:"R",f:"SS-I"},120398:{c:"S",f:"SS-I"},120399:{c:"T",f:"SS-I"},120400:{c:"U",f:"SS-I"},120401:{c:"V",f:"SS-I"},120402:{c:"W",f:"SS-I"},120403:{c:"X",f:"SS-I"},120404:{c:"Y",f:"SS-I"},120405:{c:"Z",f:"SS-I"},120406:{c:"a",f:"SS-I"},120407:{c:"b",f:"SS-I"},120408:{c:"c",f:"SS-I"},120409:{c:"d",f:"SS-I"},120410:{c:"e",f:"SS-I"},120411:{c:"f",f:"SS-I"},120412:{c:"g",f:"SS-I"},120413:{c:"h",f:"SS-I"},120414:{c:"i",f:"SS-I"},120415:{c:"j",f:"SS-I"},120416:{c:"k",f:"SS-I"},120417:{c:"l",f:"SS-I"},120418:{c:"m",f:"SS-I"},120419:{c:"n",f:"SS-I"},120420:{c:"o",f:"SS-I"},120421:{c:"p",f:"SS-I"},120422:{c:"q",f:"SS-I"},120423:{c:"r",f:"SS-I"},120424:{c:"s",f:"SS-I"},120425:{c:"t",f:"SS-I"},120426:{c:"u",f:"SS-I"},120427:{c:"v",f:"SS-I"},120428:{c:"w",f:"SS-I"},120429:{c:"x",f:"SS-I"},120430:{c:"y",f:"SS-I"},120431:{c:"z",f:"SS-I"},120432:{c:"A",f:"T"},120433:{c:"B",f:"T"},120434:{c:"C",f:"T"},120435:{c:"D",f:"T"},120436:{c:"E",f:"T"},120437:{c:"F",f:"T"},120438:{c:"G",f:"T"},120439:{c:"H",f:"T"},120440:{c:"I",f:"T"},120441:{c:"J",f:"T"},120442:{c:"K",f:"T"},120443:{c:"L",f:"T"},120444:{c:"M",f:"T"},120445:{c:"N",f:"T"},120446:{c:"O",f:"T"},120447:{c:"P",f:"T"},120448:{c:"Q",f:"T"},120449:{c:"R",f:"T"},120450:{c:"S",f:"T"},120451:{c:"T",f:"T"},120452:{c:"U",f:"T"},120453:{c:"V",f:"T"},120454:{c:"W",f:"T"},120455:{c:"X",f:"T"},120456:{c:"Y",f:"T"},120457:{c:"Z",f:"T"},120458:{c:"a",f:"T"},120459:{c:"b",f:"T"},120460:{c:"c",f:"T"},120461:{c:"d",f:"T"},120462:{c:"e",f:"T"},120463:{c:"f",f:"T"},120464:{c:"g",f:"T"},120465:{c:"h",f:"T"},120466:{c:"i",f:"T"},120467:{c:"j",f:"T"},120468:{c:"k",f:"T"},120469:{c:"l",f:"T"},120470:{c:"m",f:"T"},120471:{c:"n",f:"T"},120472:{c:"o",f:"T"},120473:{c:"p",f:"T"},120474:{c:"q",f:"T"},120475:{c:"r",f:"T"},120476:{c:"s",f:"T"},120477:{c:"t",f:"T"},120478:{c:"u",f:"T"},120479:{c:"v",f:"T"},120480:{c:"w",f:"T"},120481:{c:"x",f:"T"},120482:{c:"y",f:"T"},120483:{c:"z",f:"T"},120484:{c:"\\131",f:"MI"},120485:{c:"\\237",f:"MI"},120488:{c:"A",f:"B"},120489:{c:"B",f:"B"},120490:{c:"\\393",f:"B"},120491:{c:"\\394",f:"B"},120492:{c:"E",f:"B"},120493:{c:"Z",f:"B"},120494:{c:"H",f:"B"},120495:{c:"\\398",f:"B"},120496:{c:"I",f:"B"},120497:{c:"K",f:"B"},120498:{c:"\\39B",f:"B"},120499:{c:"M",f:"B"},120500:{c:"N",f:"B"},120501:{c:"\\39E",f:"B"},120502:{c:"O",f:"B"},120503:{c:"\\3A0",f:"B"},120504:{c:"P",f:"B"},120505:{c:"\\398",f:"B"},120506:{c:"\\3A3",f:"B"},120507:{c:"T",f:"B"},120508:{c:"\\3A5",f:"B"},120509:{c:"\\3A6",f:"B"},120510:{c:"X",f:"B"},120511:{c:"\\3A8",f:"B"},120512:{c:"\\3A9",f:"B"},120513:{c:"\\2207",f:"B"},120514:{c:"\\3B1",f:"BI"},120515:{c:"\\3B2",f:"BI"},120516:{c:"\\3B3",f:"BI"},120517:{c:"\\3B4",f:"BI"},120518:{c:"\\3B5",f:"BI"},120519:{c:"\\3B6",f:"BI"},120520:{c:"\\3B7",f:"BI"},120521:{c:"\\3B8",f:"BI"},120522:{c:"\\3B9",f:"BI"},120523:{c:"\\3BA",f:"BI"},120524:{c:"\\3BB",f:"BI"},120525:{c:"\\3BC",f:"BI"},120526:{c:"\\3BD",f:"BI"},120527:{c:"\\3BE",f:"BI"},120528:{c:"\\3BF",f:"BI"},120529:{c:"\\3C0",f:"BI"},120530:{c:"\\3C1",f:"BI"},120531:{c:"\\3C2",f:"BI"},120532:{c:"\\3C3",f:"BI"},120533:{c:"\\3C4",f:"BI"},120534:{c:"\\3C5",f:"BI"},120535:{c:"\\3C6",f:"BI"},120536:{c:"\\3C7",f:"BI"},120537:{c:"\\3C8",f:"BI"},120538:{c:"\\3C9",f:"BI"},120539:{c:"\\2202",f:"BI"},120540:{c:"\\3F5",f:"BI"},120541:{c:"\\3D1",f:"BI"},120542:{c:"\\E009",f:"A"},120543:{c:"\\3D5",f:"BI"},120544:{c:"\\3F1",f:"BI"},120545:{c:"\\3D6",f:"BI"},120546:{c:"A",f:"I"},120547:{c:"B",f:"I"},120548:{c:"\\393",f:"I"},120549:{c:"\\394",f:"I"},120550:{c:"E",f:"I"},120551:{c:"Z",f:"I"},120552:{c:"H",f:"I"},120553:{c:"\\398",f:"I"},120554:{c:"I",f:"I"},120555:{c:"K",f:"I"},120556:{c:"\\39B",f:"I"},120557:{c:"M",f:"I"},120558:{c:"N",f:"I"},120559:{c:"\\39E",f:"I"},120560:{c:"O",f:"I"},120561:{c:"\\3A0",f:"I"},120562:{c:"P",f:"I"},120563:{c:"\\398",f:"I"},120564:{c:"\\3A3",f:"I"},120565:{c:"T",f:"I"},120566:{c:"\\3A5",f:"I"},120567:{c:"\\3A6",f:"I"},120568:{c:"X",f:"I"},120569:{c:"\\3A8",f:"I"},120570:{c:"\\3A9",f:"I"},120571:{c:"\\2207",f:""},120572:{c:"\\3B1",f:"I"},120573:{c:"\\3B2",f:"I"},120574:{c:"\\3B3",f:"I"},120575:{c:"\\3B4",f:"I"},120576:{c:"\\3B5",f:"I"},120577:{c:"\\3B6",f:"I"},120578:{c:"\\3B7",f:"I"},120579:{c:"\\3B8",f:"I"},120580:{c:"\\3B9",f:"I"},120581:{c:"\\3BA",f:"I"},120582:{c:"\\3BB",f:"I"},120583:{c:"\\3BC",f:"I"},120584:{c:"\\3BD",f:"I"},120585:{c:"\\3BE",f:"I"},120586:{c:"\\3BF",f:"I"},120587:{c:"\\3C0",f:"I"},120588:{c:"\\3C1",f:"I"},120589:{c:"\\3C2",f:"I"},120590:{c:"\\3C3",f:"I"},120591:{c:"\\3C4",f:"I"},120592:{c:"\\3C5",f:"I"},120593:{c:"\\3C6",f:"I"},120594:{c:"\\3C7",f:"I"},120595:{c:"\\3C8",f:"I"},120596:{c:"\\3C9",f:"I"},120597:{c:"\\2202",f:""},120598:{c:"\\3F5",f:"I"},120599:{c:"\\3D1",f:"I"},120600:{c:"\\E009",f:"A"},120601:{c:"\\3D5",f:"I"},120602:{c:"\\3F1",f:"I"},120603:{c:"\\3D6",f:"I"},120604:{c:"A",f:"BI"},120605:{c:"B",f:"BI"},120606:{c:"\\393",f:"BI"},120607:{c:"\\394",f:"BI"},120608:{c:"E",f:"BI"},120609:{c:"Z",f:"BI"},120610:{c:"H",f:"BI"},120611:{c:"\\398",f:"BI"},120612:{c:"I",f:"BI"},120613:{c:"K",f:"BI"},120614:{c:"\\39B",f:"BI"},120615:{c:"M",f:"BI"},120616:{c:"N",f:"BI"},120617:{c:"\\39E",f:"BI"},120618:{c:"O",f:"BI"},120619:{c:"\\3A0",f:"BI"},120620:{c:"P",f:"BI"},120621:{c:"\\398",f:"BI"},120622:{c:"\\3A3",f:"BI"},120623:{c:"T",f:"BI"},120624:{c:"\\3A5",f:"BI"},120625:{c:"\\3A6",f:"BI"},120626:{c:"X",f:"BI"},120627:{c:"\\3A8",f:"BI"},120628:{c:"\\3A9",f:"BI"},120629:{c:"\\2207",f:""},120630:{c:"\\3B1",f:"BI"},120631:{c:"\\3B2",f:"BI"},120632:{c:"\\3B3",f:"BI"},120633:{c:"\\3B4",f:"BI"},120634:{c:"\\3B5",f:"BI"},120635:{c:"\\3B6",f:"BI"},120636:{c:"\\3B7",f:"BI"},120637:{c:"\\3B8",f:"BI"},120638:{c:"\\3B9",f:"BI"},120639:{c:"\\3BA",f:"BI"},120640:{c:"\\3BB",f:"BI"},120641:{c:"\\3BC",f:"BI"},120642:{c:"\\3BD",f:"BI"},120643:{c:"\\3BE",f:"BI"},120644:{c:"\\3BF",f:"BI"},120645:{c:"\\3C0",f:"BI"},120646:{c:"\\3C1",f:"BI"},120647:{c:"\\3C2",f:"BI"},120648:{c:"\\3C3",f:"BI"},120649:{c:"\\3C4",f:"BI"},120650:{c:"\\3C5",f:"BI"},120651:{c:"\\3C6",f:"BI"},120652:{c:"\\3C7",f:"BI"},120653:{c:"\\3C8",f:"BI"},120654:{c:"\\3C9",f:"BI"},120655:{c:"\\2202",f:""},120656:{c:"\\3F5",f:"BI"},120657:{c:"\\3D1",f:"BI"},120658:{c:"\\E009",f:"A"},120659:{c:"\\3D5",f:"BI"},120660:{c:"\\3F1",f:"BI"},120661:{c:"\\3D6",f:"BI"},120662:{c:"A",f:"SS-B"},120663:{c:"B",f:"SS-B"},120664:{c:"\\393",f:"SS-B"},120665:{c:"\\394",f:"SS-B"},120666:{c:"E",f:"SS-B"},120667:{c:"Z",f:"SS-B"},120668:{c:"H",f:"SS-B"},120669:{c:"\\398",f:"SS-B"},120670:{c:"I",f:"SS-B"},120671:{c:"K",f:"SS-B"},120672:{c:"\\39B",f:"SS-B"},120673:{c:"M",f:"SS-B"},120674:{c:"N",f:"SS-B"},120675:{c:"\\39E",f:"SS-B"},120676:{c:"O",f:"SS-B"},120677:{c:"\\3A0",f:"SS-B"},120678:{c:"P",f:"SS-B"},120679:{c:"\\398",f:"SS-B"},120680:{c:"\\3A3",f:"SS-B"},120681:{c:"T",f:"SS-B"},120682:{c:"\\3A5",f:"SS-B"},120683:{c:"\\3A6",f:"SS-B"},120684:{c:"X",f:"SS-B"},120685:{c:"\\3A8",f:"SS-B"},120686:{c:"\\3A9",f:"SS-B"},120687:{c:"\\2207",f:""},120688:{c:"\\3B1",f:"BI"},120689:{c:"\\3B2",f:"BI"},120690:{c:"\\3B3",f:"BI"},120691:{c:"\\3B4",f:"BI"},120692:{c:"\\3B5",f:"BI"},120693:{c:"\\3B6",f:"BI"},120694:{c:"\\3B7",f:"BI"},120695:{c:"\\3B8",f:"BI"},120696:{c:"\\3B9",f:"BI"},120697:{c:"\\3BA",f:"BI"},120698:{c:"\\3BB",f:"BI"},120699:{c:"\\3BC",f:"BI"},120700:{c:"\\3BD",f:"BI"},120701:{c:"\\3BE",f:"BI"},120702:{c:"\\3BF",f:"BI"},120703:{c:"\\3C0",f:"BI"},120704:{c:"\\3C1",f:"BI"},120705:{c:"\\3C2",f:"BI"},120706:{c:"\\3C3",f:"BI"},120707:{c:"\\3C4",f:"BI"},120708:{c:"\\3C5",f:"BI"},120709:{c:"\\3C6",f:"BI"},120710:{c:"\\3C7",f:"BI"},120711:{c:"\\3C8",f:"BI"},120712:{c:"\\3C9",f:"BI"},120713:{c:"\\2202",f:""},120714:{c:"\\3F5",f:"BI"},120715:{c:"\\3D1",f:"BI"},120716:{c:"\\E009",f:"A"},120717:{c:"\\3D5",f:"BI"},120718:{c:"\\3F1",f:"BI"},120719:{c:"\\3D6",f:"BI"},120720:{c:"A",f:"SS-I"},120721:{c:"B",f:"SS-I"},120722:{c:"\\393",f:"SS-I"},120723:{c:"\\394",f:"SS-I"},120724:{c:"E",f:"SS-I"},120725:{c:"Z",f:"SS-I"},120726:{c:"H",f:"SS-I"},120727:{c:"\\398",f:"SS-I"},120728:{c:"I",f:"SS-I"},120729:{c:"K",f:"SS-I"},120730:{c:"\\39B",f:"SS-I"},120731:{c:"M",f:"SS-I"},120732:{c:"N",f:"SS-I"},120733:{c:"\\39E",f:"SS-I"},120734:{c:"O",f:"SS-I"},120735:{c:"\\3A0",f:"SS-I"},120736:{c:"P",f:"SS-I"},120737:{c:"\\398",f:"SS-I"},120738:{c:"\\3A3",f:"SS-I"},120739:{c:"T",f:"SS-I"},120740:{c:"\\3A5",f:"SS-I"},120741:{c:"\\3A6",f:"SS-I"},120742:{c:"X",f:"SS-I"},120743:{c:"\\3A8",f:"SS-I"},120744:{c:"\\3A9",f:"SS-I"},120745:{c:"\\2207",f:""},120746:{c:"\\3B1",f:"BI"},120747:{c:"\\3B2",f:"BI"},120748:{c:"\\3B3",f:"BI"},120749:{c:"\\3B4",f:"BI"},120750:{c:"\\3B5",f:"BI"},120751:{c:"\\3B6",f:"BI"},120752:{c:"\\3B7",f:"BI"},120753:{c:"\\3B8",f:"BI"},120754:{c:"\\3B9",f:"BI"},120755:{c:"\\3BA",f:"BI"},120756:{c:"\\3BB",f:"BI"},120757:{c:"\\3BC",f:"BI"},120758:{c:"\\3BD",f:"BI"},120759:{c:"\\3BE",f:"BI"},120760:{c:"\\3BF",f:"BI"},120761:{c:"\\3C0",f:"BI"},120762:{c:"\\3C1",f:"BI"},120763:{c:"\\3C2",f:"BI"},120764:{c:"\\3C3",f:"BI"},120765:{c:"\\3C4",f:"BI"},120766:{c:"\\3C5",f:"BI"},120767:{c:"\\3C6",f:"BI"},120768:{c:"\\3C7",f:"BI"},120769:{c:"\\3C8",f:"BI"},120770:{c:"\\3C9",f:"BI"},120771:{c:"\\2202",f:""},120772:{c:"\\3F5",f:"BI"},120773:{c:"\\3D1",f:"BI"},120774:{c:"\\E009",f:"A"},120775:{c:"\\3D5",f:"BI"},120776:{c:"\\3F1",f:"BI"},120777:{c:"\\3D6",f:"BI"},120778:{c:"F",f:"I"},120779:{c:"\\3DD",f:"A"},120782:{c:"0",f:"B"},120783:{c:"1",f:"B"},120784:{c:"2",f:"B"},120785:{c:"3",f:"B"},120786:{c:"4",f:"B"},120787:{c:"5",f:"B"},120788:{c:"6",f:"B"},120789:{c:"7",f:"B"},120790:{c:"8",f:"B"},120791:{c:"9",f:"B"},120792:{c:"0",f:"B"},120793:{c:"1",f:"B"},120794:{c:"2",f:"B"},120795:{c:"3",f:"B"},120796:{c:"4",f:"B"},120797:{c:"5",f:"B"},120798:{c:"6",f:"B"},120799:{c:"7",f:"B"},120800:{c:"8",f:"B"},120801:{c:"9",f:"B"},120802:{c:"0",f:"SS"},120803:{c:"1",f:"SS"},120804:{c:"2",f:"SS"},120805:{c:"3",f:"SS"},120806:{c:"4",f:"SS"},120807:{c:"5",f:"SS"},120808:{c:"6",f:"SS"},120809:{c:"7",f:"SS"},120810:{c:"8",f:"SS"},120811:{c:"9",f:"SS"},120812:{c:"0",f:"SS-B"},120813:{c:"1",f:"SS-B"},120814:{c:"2",f:"SS-B"},120815:{c:"3",f:"SS-B"},120816:{c:"4",f:"SS-B"},120817:{c:"5",f:"SS-B"},120818:{c:"6",f:"SS-B"},120819:{c:"7",f:"SS-B"},120820:{c:"8",f:"SS-B"},120821:{c:"9",f:"SS-B"},120822:{c:"0",f:"T"},120823:{c:"1",f:"T"},120824:{c:"2",f:"T"},120825:{c:"3",f:"T"},120826:{c:"4",f:"T"},120827:{c:"5",f:"T"},120828:{c:"6",f:"T"},120829:{c:"7",f:"T"},120830:{c:"8",f:"T"},120831:{c:"9",f:"T"}})},function(t,e,r){"use strict";Object.defineProperty(e,"__esModule",{value:!0}),e.normal={32:[0,0,.25],33:[.716,0,.278],34:[.694,-.379,.5],35:[.694,.194,.833],36:[.75,.056,.5],37:[.75,.056,.833],38:[.716,.022,.778],39:[.694,-.379,.278],40:[.75,.25,.389],41:[.75,.25,.389],42:[.75,-.32,.5],43:[.583,.082,.778],44:[.121,.194,.278],45:[.252,-.179,.333],46:[.12,0,.278],47:[.75,.25,.5],48:[.666,.022,.5],49:[.666,0,.5],50:[.666,0,.5],51:[.665,.022,.5],52:[.677,0,.5],53:[.666,.022,.5],54:[.666,.022,.5],55:[.676,.022,.5],56:[.666,.022,.5],57:[.666,.022,.5],58:[.43,0,.278],59:[.43,.194,.278],60:[.54,.04,.778],61:[.583,.082,.778],62:[.54,.04,.778],63:[.705,0,.472],64:[.705,.011,.778],65:[.716,0,.75],66:[.683,0,.708],67:[.705,.021,.722],68:[.683,0,.764],69:[.68,0,.681],70:[.68,0,.653],71:[.705,.022,.785],72:[.683,0,.75],73:[.683,0,.361],74:[.683,.022,.514],75:[.683,0,.778],76:[.683,0,.625],77:[.683,0,.917],78:[.683,0,.75],79:[.705,.022,.778],80:[.683,0,.681],81:[.705,.193,.778],82:[.683,.022,.736],83:[.705,.022,.556],84:[.677,0,.722],85:[.683,.022,.75],86:[.683,.022,.75],87:[.683,.022,1.028],88:[.683,0,.75],89:[.683,0,.75],90:[.683,0,.611],91:[.75,.25,.278],92:[.75,.25,.5],93:[.75,.25,.278],94:[.694,-.531,.5],95:[-.025,.062,.5],96:[.699,-.505,.5],97:[.448,.011,.5],98:[.694,.011,.556],99:[.448,.011,.444],100:[.694,.011,.556],101:[.448,.011,.444],102:[.705,0,.306,{ic:.066}],103:[.453,.206,.5],104:[.694,0,.556],105:[.669,0,.278],106:[.669,.205,.306],107:[.694,0,.528],108:[.694,0,.278],109:[.442,0,.833],110:[.442,0,.556],111:[.448,.01,.5],112:[.442,.194,.556],113:[.442,.194,.528],114:[.442,0,.392],115:[.448,.011,.394],116:[.615,.01,.389],117:[.442,.011,.556],118:[.431,.011,.528],119:[.431,.011,.722],120:[.431,0,.528],121:[.431,.204,.528],122:[.431,0,.444],123:[.75,.25,.5],124:[.75,.249,.278],125:[.75,.25,.5],126:[.318,-.215,.5],160:[0,0,.25],163:[.714,.011,.769],165:[.683,0,.75],168:[.669,-.554,.5],172:[.356,-.089,.667],174:[.709,.175,.947],175:[.59,-.544,.5],176:[.715,-.542,.5],177:[.666,0,.778],180:[.699,-.505,.5],183:[.31,-.19,.278],215:[.491,-.009,.778],240:[.749,.021,.556],247:[.537,.036,.778],295:[.695,.013,.54],305:[.442,0,.278,{sk:.0278}],567:[.442,.205,.306,{sk:.0833}],697:[.56,-.043,.275],710:[.694,-.531,.5],711:[.644,-.513,.5],713:[.59,-.544,.5],714:[.699,-.505,.5],715:[.699,-.505,.5],728:[.694,-.515,.5],729:[.669,-.549,.5],730:[.715,-.542,.5],732:[.668,-.565,.5],768:[.699,-.505,0],769:[.699,-.505,0],770:[.694,-.531,0],771:[.668,-.565,0],772:[.59,-.544,0],774:[.694,-.515,0],775:[.669,-.549,0],776:[.669,-.554,0],778:[.715,-.542,0],779:[.701,-.51,0],780:[.644,-.513,0],824:[.716,.215,0],913:[.716,0,.75],914:[.683,0,.708],915:[.68,0,.625],916:[.716,0,.833],917:[.68,0,.681],918:[.683,0,.611],919:[.683,0,.75],920:[.705,.022,.778],921:[.683,0,.361],922:[.683,0,.778],923:[.716,0,.694],924:[.683,0,.917],925:[.683,0,.75],926:[.677,0,.667],927:[.705,.022,.778],928:[.68,0,.75],929:[.683,0,.681],930:[.705,.022,.778],931:[.683,0,.722],932:[.677,0,.722],933:[.705,0,.778],934:[.683,0,.722],935:[.683,0,.75],936:[.683,0,.778],937:[.704,0,.722],978:[.705,0,.778],988:[.68,0,.653],989:[.605,.085,.778],1008:[.434,.006,.667,{ic:.067}],1014:[.44,0,.429],8192:[0,0,.5],8193:[0,0,1],8194:[0,0,.5],8195:[0,0,1],8196:[0,0,.333],8197:[0,0,.25],8198:[0,0,.167],8201:[0,0,.167],8202:[0,0,.1],8203:[0,0,0],8204:[0,0,0],8211:[.285,-.248,.5],8212:[.285,-.248,1],8213:[.285,-.248,1],8214:[.75,.25,.5],8215:[-.025,.062,.5],8216:[.694,-.379,.278],8217:[.694,-.379,.278],8220:[.694,-.379,.5],8221:[.694,-.379,.5],8224:[.705,.216,.444],8225:[.705,.205,.444],8226:[.444,-.055,.5],8230:[.12,0,1.172],8242:[.56,-.043,.275],8243:[.56,0,.55],8244:[.56,0,.825],8245:[.56,-.043,.275],8246:[.56,0,.55],8247:[.56,0,.825],8254:[.59,-.544,.5],8260:[.75,.25,.5],8279:[.56,0,1.1],8289:[0,0,0],8290:[0,0,0],8291:[0,0,0],8292:[0,0,0],8407:[.714,-.516,.5],8450:[.702,.019,.722],8459:[.717,.036,.969,{ic:.272,sk:.333}],8460:[.666,.133,.72],8461:[.683,0,.778],8462:[.694,.011,.576,{sk:-.0278}],8463:[.695,.013,.54],8464:[.717,.314,1.052,{ic:.081,sk:.417}],8465:[.686,.026,.554],8466:[.717,.017,.874,{ic:.161,sk:.306}],8467:[.705,.02,.417,{sk:.111}],8469:[.683,.02,.722],8472:[.453,.216,.636,{sk:.111}],8473:[.683,0,.611],8474:[.701,.181,.778],8475:[.717,.017,.85,{sk:.194}],8476:[.686,.026,.828],8477:[.683,0,.722],8484:[.683,0,.667],8486:[.704,0,.722],8487:[.684,.022,.722],8488:[.729,.139,.602],8492:[.708,.028,.908,{sk:.194}],8493:[.685,.024,.613],8496:[.707,.008,.562,{ic:.156,sk:.139}],8497:[.735,.036,.895,{ic:.095,sk:.222}],8498:[.695,0,.556],8499:[.721,.05,1.08,{ic:.136,sk:.444}],8501:[.694,0,.611],8502:[.763,.021,.667],8503:[.764,.043,.444],8504:[.764,.043,.667],8513:[.705,.023,.639],8592:[.511,.011,1],8593:[.694,.193,.5],8594:[.511,.011,1],8595:[.694,.194,.5],8596:[.511,.011,1],8597:[.772,.272,.5],8598:[.72,.195,1],8599:[.72,.195,1],8600:[.695,.22,1],8601:[.695,.22,1],8602:[.437,-.06,1],8603:[.437,-.06,1],8606:[.417,-.083,1],8608:[.417,-.083,1],8610:[.417,-.083,1.111],8611:[.417,-.083,1.111],8614:[.511,.011,1],8617:[.511,.011,1.126],8618:[.511,.011,1.126],8619:[.575,.041,1],8620:[.575,.041,1],8621:[.417,-.083,1.389],8622:[.437,-.06,1],8624:[.722,0,.5],8625:[.722,0,.5],8630:[.461,0,1],8631:[.46,0,1],8634:[.65,.083,.778],8635:[.65,.083,.778],8636:[.511,-.23,1],8637:[.27,.011,1],8638:[.694,.194,.417],8639:[.694,.194,.417],8640:[.511,-.23,1],8641:[.27,.011,1],8642:[.694,.194,.417],8643:[.694,.194,.417],8644:[.667,0,1],8646:[.667,0,1],8647:[.583,.083,1],8648:[.694,.193,.833],8649:[.583,.083,1],8650:[.694,.194,.833],8651:[.514,.014,1],8652:[.671,.011,1],8653:[.534,.035,1],8654:[.534,.037,1],8655:[.534,.035,1],8656:[.525,.024,1],8657:[.694,.194,.611],8658:[.525,.024,1],8659:[.694,.194,.611],8660:[.526,.025,1],8661:[.772,.272,.611],8666:[.611,.111,1],8667:[.611,.111,1],8669:[.417,-.083,1],8672:[.437,-.064,1.334],8674:[.437,-.064,1.334],8704:[.694,.022,.556],8705:[.846,.021,.5],8706:[.715,.022,.531,{sk:.0833}],8707:[.694,0,.556],8708:[.716,.215,.556],8709:[.772,.078,.5],8710:[.716,0,.833],8711:[.683,.033,.833],8712:[.54,.04,.667],8713:[.716,.215,.667],8715:[.54,.04,.667],8716:[.716,.215,.667],8717:[.44,0,.429],8719:[.75,.25,.944],8720:[.75,.25,.944],8721:[.75,.25,1.056],8722:[.583,.082,.778],8723:[.5,.166,.778],8724:[.766,.093,.778],8725:[.75,.25,.5],8726:[.43,.023,.778],8727:[.465,-.035,.5],8728:[.444,-.055,.5],8729:[.444,-.055,.5],8730:[.8,.2,.833],8733:[.442,.011,.778],8734:[.442,.011,1],8736:[.694,0,.722],8737:[.714,.02,.722],8738:[.551,.051,.722],8739:[.75,.249,.278],8740:[.75,.252,.278],8741:[.75,.25,.5],8742:[.75,.25,.5],8743:[.598,.022,.667],8744:[.598,.022,.667],8745:[.598,.022,.667],8746:[.598,.022,.667],8747:[.716,.216,.417,{ic:.055}],8748:[.805,.306,.819,{ic:.138}],8749:[.805,.306,1.166,{ic:.138}],8750:[.805,.306,.472,{ic:.138}],8756:[.471,.082,.667],8757:[.471,.082,.667],8764:[.367,-.133,.778],8765:[.367,-.133,.778],8768:[.583,.083,.278],8769:[.467,-.032,.778],8770:[.463,-.034,.778],8771:[.464,-.036,.778],8772:[.716,.215,.778],8773:[.589,-.022,.778],8774:[.652,.155,.778],8775:[.652,.155,.778],8776:[.483,-.055,.778],8777:[.716,.215,.778],8778:[.579,.039,.778],8781:[.484,-.016,.778],8782:[.492,-.008,.778],8783:[.492,-.133,.778],8784:[.67,-.133,.778],8785:[.609,.108,.778],8786:[.601,.101,.778],8787:[.601,.102,.778],8790:[.367,-.133,.778],8791:[.721,-.133,.778],8796:[.859,-.133,.778],8800:[.716,.215,.778],8801:[.464,-.036,.778],8802:[.716,.215,.778],8804:[.636,.138,.778],8805:[.636,.138,.778],8806:[.753,.175,.778],8807:[.753,.175,.778],8808:[.752,.286,.778],8809:[.752,.286,.778],8810:[.568,.067,1],8811:[.567,.067,1],8812:[.75,.25,.5],8813:[.716,.215,.778],8814:[.708,.209,.778],8815:[.708,.209,.778],8816:[.801,.303,.778],8817:[.801,.303,.778],8818:[.732,.228,.778],8819:[.732,.228,.778],8820:[.732,.228,.778],8821:[.732,.228,.778],8822:[.681,.253,.778],8823:[.681,.253,.778],8824:[.716,.253,.778],8825:[.716,.253,.778],8826:[.539,.041,.778],8827:[.539,.041,.778],8828:[.58,.153,.778],8829:[.58,.154,.778],8830:[.732,.228,.778],8831:[.732,.228,.778],8832:[.705,.208,.778],8833:[.705,.208,.778],8834:[.54,.04,.778],8835:[.54,.04,.778],8836:[.716,.215,.778],8837:[.716,.215,.778],8838:[.636,.138,.778],8839:[.636,.138,.778],8840:[.801,.303,.778],8841:[.801,.303,.778],8842:[.635,.241,.778],8843:[.635,.241,.778],8846:[.598,.022,.667],8847:[.539,.041,.778],8848:[.539,.041,.778],8849:[.636,.138,.778],8850:[.636,.138,.778],8851:[.598,0,.667],8852:[.598,0,.667],8853:[.583,.083,.778],8854:[.583,.083,.778],8855:[.583,.083,.778],8856:[.583,.083,.778],8857:[.583,.083,.778],8858:[.582,.082,.778],8859:[.582,.082,.778],8861:[.582,.082,.778],8862:[.689,0,.778],8863:[.689,0,.778],8864:[.689,0,.778],8865:[.689,0,.778],8866:[.694,0,.611],8867:[.694,0,.611],8868:[.668,0,.778],8869:[.668,0,.778],8872:[.75,.249,.867],8873:[.694,0,.722],8874:[.694,0,.889],8876:[.695,0,.611],8877:[.695,0,.611],8878:[.695,0,.722],8879:[.695,0,.722],8882:[.539,.041,.778],8883:[.539,.041,.778],8884:[.636,.138,.778],8885:[.636,.138,.778],8888:[.408,-.092,1.111],8890:[.431,.212,.556],8891:[.716,0,.611],8892:[.716,0,.611],8896:[.75,.249,.833],8897:[.75,.249,.833],8898:[.75,.249,.833],8899:[.75,.249,.833],8900:[.488,-.012,.5],8901:[.31,-.19,.278],8902:[.486,-.016,.5],8903:[.545,.044,.778],8904:[.505,.005,.9],8905:[.492,-.008,.778],8906:[.492,-.008,.778],8907:[.694,.022,.778],8908:[.694,.022,.778],8909:[.464,-.036,.778],8910:[.578,.021,.76],8911:[.578,.022,.76],8912:[.54,.04,.778],8913:[.54,.04,.778],8914:[.598,.022,.667],8915:[.598,.022,.667],8916:[.736,.022,.667],8918:[.541,.041,.778],8919:[.541,.041,.778],8920:[.568,.067,1.333],8921:[.568,.067,1.333],8922:[.886,.386,.778],8923:[.886,.386,.778],8926:[.734,0,.778],8927:[.734,0,.778],8928:[.801,.303,.778],8929:[.801,.303,.778],8930:[.716,.215,.778],8931:[.716,.215,.778],8934:[.73,.359,.778],8935:[.73,.359,.778],8936:[.73,.359,.778],8937:[.73,.359,.778],8938:[.706,.208,.778],8939:[.706,.208,.778],8940:[.802,.303,.778],8941:[.801,.303,.778],8942:[1.3,.03,.278],8943:[.31,-.19,1.172],8945:[1.52,-.1,1.282],8965:[.716,0,.611],8966:[.813,.097,.611],8968:[.75,.25,.444],8969:[.75,.25,.444],8970:[.75,.25,.444],8971:[.75,.25,.444],8988:[.694,-.306,.5],8989:[.694,-.306,.5],8990:[.366,.022,.5],8991:[.366,.022,.5],8994:[.388,-.122,1],8995:[.378,-.134,1],9001:[.75,.25,.389],9002:[.75,.25,.389],9136:[.744,.244,.412],9137:[.744,.244,.412],9168:[.602,0,.667],9416:[.709,.175,.902],9484:[.694,-.306,.5],9488:[.694,-.306,.5],9492:[.366,.022,.5],9496:[.366,.022,.5],9585:[.694,.195,.889],9586:[.694,.195,.889],9632:[.689,0,.778],9633:[.689,0,.778],9642:[.689,0,.778],9650:[.575,.02,.722],9651:[.716,0,.889],9652:[.575,.02,.722],9653:[.716,0,.889],9654:[.539,.041,.778],9656:[.539,.041,.778],9657:[.505,.005,.5],9660:[.576,.019,.722],9661:[.5,.215,.889],9662:[.576,.019,.722],9663:[.5,.215,.889],9664:[.539,.041,.778],9666:[.539,.041,.778],9667:[.505,.005,.5],9674:[.716,.132,.667],9711:[.715,.215,1],9723:[.689,0,.778],9724:[.689,0,.778],9733:[.694,.111,.944],9824:[.727,.13,.778],9825:[.716,.033,.778],9826:[.727,.162,.778],9827:[.726,.13,.778],9837:[.75,.022,.389],9838:[.734,.223,.389],9839:[.723,.223,.389],10003:[.706,.034,.833],10016:[.716,.022,.833],10072:[.75,.249,.278],10216:[.75,.25,.389],10217:[.75,.25,.389],10222:[.744,.244,.412],10223:[.744,.244,.412],10229:[.511,.011,1.609],10230:[.511,.011,1.638],10231:[.511,.011,1.859],10232:[.525,.024,1.609],10233:[.525,.024,1.638],10234:[.525,.024,1.858],10236:[.511,.011,1.638],10731:[.716,.132,.667],10744:[.716,.215,.778],10752:[.75,.25,1.111],10753:[.75,.25,1.111],10754:[.75,.25,1.111],10756:[.75,.249,.833],10758:[.75,.249,.833],10764:[.805,.306,1.638,{ic:.138}],10799:[.491,-.009,.778],10815:[.683,0,.75],10846:[.813,.097,.611],10877:[.636,.138,.778],10878:[.636,.138,.778],10885:[.762,.29,.778],10886:[.762,.29,.778],10887:[.635,.241,.778],10888:[.635,.241,.778],10889:[.761,.387,.778],10890:[.761,.387,.778],10891:[1.003,.463,.778],10892:[1.003,.463,.778],10901:[.636,.138,.778],10902:[.636,.138,.778],10927:[.636,.138,.778],10928:[.636,.138,.778],10933:[.752,.286,.778],10934:[.752,.286,.778],10935:[.761,.294,.778],10936:[.761,.294,.778],10937:[.761,.337,.778],10938:[.761,.337,.778],10949:[.753,.215,.778],10950:[.753,.215,.778],10955:[.783,.385,.778],10956:[.783,.385,.778],12296:[.75,.25,.389],12297:[.75,.25,.389],57350:[.43,.023,.222],57351:[.431,.024,.389],57352:[.605,.085,.778],57353:[.434,.006,.667,{ic:.067}],57356:[.752,.284,.778],57357:[.752,.284,.778],57358:[.919,.421,.778],57359:[.801,.303,.778],57360:[.801,.303,.778],57361:[.919,.421,.778],57366:[.828,.33,.778],57367:[.752,.332,.778],57368:[.828,.33,.778],57369:[.752,.333,.778],57370:[.634,.255,.778],57371:[.634,.254,.778],119808:[.698,0,.869],119809:[.686,0,.818],119810:[.697,.011,.831],119811:[.686,0,.882],119812:[.68,0,.756],119813:[.68,0,.724],119814:[.697,.01,.904],119815:[.686,0,.9],119816:[.686,0,.436],119817:[.686,.011,.594],119818:[.686,0,.901],119819:[.686,0,.692],119820:[.686,0,1.092],119821:[.686,0,.9],119822:[.696,.01,.864],119823:[.686,0,.786],119824:[.696,.193,.864],119825:[.686,.011,.862],119826:[.697,.011,.639],119827:[.675,0,.8],119828:[.686,.011,.885],119829:[.686,.007,.869],119830:[.686,.007,1.189],119831:[.686,0,.869],119832:[.686,0,.869],119833:[.686,0,.703],119834:[.453,.006,.559],119835:[.694,.006,.639],119836:[.453,.006,.511],119837:[.694,.006,.639],119838:[.452,.006,.527],119839:[.7,0,.351,{ic:.101}],119840:[.455,.201,.575],119841:[.694,0,.639],119842:[.695,0,.319],119843:[.695,.2,.351],119844:[.694,0,.607],119845:[.694,0,.319],119846:[.45,0,.958],119847:[.45,0,.639],119848:[.452,.005,.575],119849:[.45,.194,.639],119850:[.45,.194,.607],119851:[.45,0,.474],119852:[.453,.006,.454],119853:[.635,.005,.447],119854:[.45,.006,.639],119855:[.444,0,.607],119856:[.444,0,.831],119857:[.444,0,.607],119858:[.444,.2,.607],119859:[.444,0,.511],119860:[.716,0,.75,{sk:.139}],119861:[.683,0,.759,{sk:.0833}],119862:[.705,.022,.715,{sk:.0833}],119863:[.683,0,.828,{sk:.0556}],119864:[.68,0,.738,{sk:.0833}],119865:[.68,0,.643,{ic:.106,sk:.0833}],119866:[.705,.022,.786,{sk:.0833}],119867:[.683,0,.831,{ic:.057,sk:.0556}],119868:[.683,0,.44,{ic:.064,sk:.111}],119869:[.683,.022,.555,{ic:.078,sk:.167}],119870:[.683,0,.849,{sk:.0556}],119871:[.683,0,.681,{sk:.0278}],119872:[.683,0,.97,{ic:.081,sk:.0833}],119873:[.683,0,.803,{ic:.085,sk:.0833}],119874:[.704,.022,.763,{sk:.0833}],119875:[.683,0,.642,{ic:.109,sk:.0833}],119876:[.704,.194,.791,{sk:.0833}],119877:[.683,.021,.759,{sk:.0833}],119878:[.705,.022,.613,{sk:.0833}],119879:[.677,0,.584,{ic:.12,sk:.0833}],119880:[.683,.022,.683,{ic:.084,sk:.0278}],119881:[.683,.022,.583,{ic:.186}],119882:[.683,.022,.944,{ic:.104}],119883:[.683,0,.828,{sk:.0833}],119884:[.683,0,.581,{ic:.182}],119885:[.683,0,.683,{sk:.0833}],119886:[.441,.01,.529],119887:[.694,.011,.429],119888:[.442,.011,.433,{sk:.0556}],119889:[.694,.01,.52,{sk:.167}],119890:[.442,.011,.466,{sk:.0556}],119891:[.705,.205,.49,{ic:.06,sk:.167}],119892:[.442,.205,.477,{sk:.0278}],119893:[.694,.011,.576,{sk:-.0278}],119894:[.661,.011,.345],119895:[.661,.204,.412],119896:[.694,.011,.521],119897:[.694,.011,.298,{sk:.0833}],119898:[.442,.011,.878],119899:[.442,.011,.6],119900:[.441,.011,.485,{sk:.0556}],119901:[.442,.194,.503,{sk:.0833}],119902:[.442,.194,.446,{sk:.0833}],119903:[.442,.011,.451,{sk:.0556}],119904:[.442,.01,.469,{sk:.0556}],119905:[.626,.011,.361,{sk:.0833}],119906:[.442,.011,.572,{sk:.0278}],119907:[.443,.011,.485,{sk:.0278}],119908:[.443,.011,.716,{sk:.0833}],119909:[.442,.011,.572,{sk:.0278}],119910:[.442,.205,.49,{sk:.0556}],119911:[.442,.011,.465,{sk:.0556}],119912:[.711,0,.869,{sk:.16}],119913:[.686,0,.866,{sk:.0958}],119914:[.703,.017,.817,{sk:.0958}],119915:[.686,0,.938,{sk:.0639}],119916:[.68,0,.81,{sk:.0958}],119917:[.68,0,.689,{ic:.12,sk:.0958}],119918:[.703,.016,.887,{sk:.0958}],119919:[.686,0,.982,{sk:.0639}],119920:[.686,0,.511,{ic:.062,sk:.128}],119921:[.686,.017,.631,{ic:.063,sk:.192}],119922:[.686,0,.971,{sk:.0639}],119923:[.686,0,.756,{sk:.0319}],119924:[.686,0,1.142,{ic:.077,sk:.0958}],119925:[.686,0,.95,{ic:.077,sk:.0958}],119926:[.703,.017,.837,{sk:.0958}],119927:[.686,0,.723,{ic:.124,sk:.0958}],119928:[.703,.194,.869,{sk:.0958}],119929:[.686,.017,.872,{sk:.0958}],119930:[.703,.017,.693,{sk:.0958}],119931:[.675,0,.637,{ic:.135,sk:.0958}],119932:[.686,.016,.8,{ic:.077,sk:.0319}],119933:[.686,.016,.678,{ic:.208}],119934:[.686,.017,1.093,{ic:.114}],119935:[.686,0,.947,{sk:.0958}],119936:[.686,0,.675,{ic:.201}],119937:[.686,0,.773,{sk:.0958}],119938:[.452,.008,.633],119939:[.694,.008,.521],119940:[.451,.008,.513,{sk:.0639}],119941:[.694,.008,.61,{sk:.192}],119942:[.452,.008,.554,{sk:.0639}],119943:[.701,.201,.568,{ic:.056,sk:.192}],119944:[.452,.202,.545,{sk:.0319}],119945:[.694,.008,.668,{sk:-.0319}],119946:[.694,.008,.405],119947:[.694,.202,.471],119948:[.694,.008,.604],119949:[.694,.008,.348,{sk:.0958}],119950:[.452,.008,1.032],119951:[.452,.008,.713],119952:[.452,.008,.585,{sk:.0639}],119953:[.452,.194,.601,{sk:.0958}],119954:[.452,.194,.542,{sk:.0958}],119955:[.452,.008,.529,{sk:.0639}],119956:[.451,.008,.531,{sk:.0639}],119957:[.643,.007,.415,{sk:.0958}],119958:[.452,.008,.681,{sk:.0319}],119959:[.453,.008,.567,{sk:.0319}],119960:[.453,.008,.831,{sk:.0958}],119961:[.452,.008,.659,{sk:.0319}],119962:[.452,.202,.59,{sk:.0639}],119963:[.452,.008,.555,{sk:.0639}],119964:[.717,.008,.803,{ic:.213,sk:.389}],119965:[.708,.028,.908,{sk:.194}],119966:[.728,.026,.666,{ic:.153,sk:.278}],119967:[.708,.031,.774,{ic:.081,sk:.111}],119968:[.707,.008,.562,{ic:.156,sk:.139}],119969:[.735,.036,.895,{ic:.095,sk:.222}],119970:[.717,.037,.61,{ic:.128,sk:.25}],119971:[.717,.036,.969,{ic:.272,sk:.333}],119972:[.717,.017,.809,{ic:.137,sk:.333}],119973:[.717,.314,1.052,{ic:.081,sk:.417}],119974:[.717,.037,.914,{ic:.29,sk:.361}],119975:[.717,.017,.874,{ic:.161,sk:.306}],119976:[.721,.05,1.08,{ic:.136,sk:.444}],119977:[.726,.036,.902,{ic:.306,sk:.389}],119978:[.707,.008,.738,{ic:.067,sk:.167}],119979:[.716,.037,1.013,{sk:.222}],119980:[.717,.017,.883,{sk:.278}],119981:[.717,.017,.85,{sk:.194}],119982:[.708,.036,.868,{ic:.148,sk:.333}],119983:[.735,.037,.747,{ic:.249,sk:.222}],119984:[.717,.017,.8,{ic:.16,sk:.25}],119985:[.717,.017,.622,{ic:.228,sk:.222}],119986:[.717,.017,.805,{ic:.221,sk:.25}],119987:[.717,.017,.944,{ic:.187,sk:.278}],119988:[.716,.017,.71,{ic:.249,sk:.194}],119989:[.717,.016,.821,{ic:.211,sk:.306}],119990:[.441,.01,.529],119991:[.694,.011,.429],119992:[.442,.011,.433,{sk:.0556}],119993:[.694,.01,.52,{sk:.167}],119994:[.442,.011,.466,{sk:.0556}],119995:[.705,.205,.49,{ic:.06,sk:.167}],119996:[.442,.205,.477,{sk:.0278}],119997:[.694,.011,.576,{sk:-.0278}],119998:[.661,.011,.345],119999:[.661,.204,.412],12e4:[.694,.011,.521],120001:[.694,.011,.298,{sk:.0833}],120002:[.442,.011,.878],120003:[.442,.011,.6],120004:[.441,.011,.485,{sk:.0556}],120005:[.442,.194,.503,{sk:.0833}],120006:[.442,.194,.446,{sk:.0833}],120007:[.442,.011,.451,{sk:.0556}],120008:[.442,.01,.469,{sk:.0556}],120009:[.626,.011,.361,{sk:.0833}],120010:[.442,.011,.572,{sk:.0278}],120011:[.443,.011,.485,{sk:.0278}],120012:[.443,.011,.716,{sk:.0833}],120013:[.442,.011,.572,{sk:.0278}],120014:[.442,.205,.49,{sk:.0556}],120015:[.442,.011,.465,{sk:.0556}],120016:[.717,.008,.803,{ic:.213,sk:.389}],120017:[.708,.028,.908,{sk:.194}],120018:[.728,.026,.666,{ic:.153,sk:.278}],120019:[.708,.031,.774,{ic:.081,sk:.111}],120020:[.707,.008,.562,{ic:.156,sk:.139}],120021:[.735,.036,.895,{ic:.095,sk:.222}],120022:[.717,.037,.61,{ic:.128,sk:.25}],120023:[.717,.036,.969,{ic:.272,sk:.333}],120024:[.717,.017,.809,{ic:.137,sk:.333}],120025:[.717,.314,1.052,{ic:.081,sk:.417}],120026:[.717,.037,.914,{ic:.29,sk:.361}],120027:[.717,.017,.874,{ic:.161,sk:.306}],120028:[.721,.05,1.08,{ic:.136,sk:.444}],120029:[.726,.036,.902,{ic:.306,sk:.389}],120030:[.707,.008,.738,{ic:.067,sk:.167}],120031:[.716,.037,1.013,{sk:.222}],120032:[.717,.017,.883,{sk:.278}],120033:[.717,.017,.85,{sk:.194}],120034:[.708,.036,.868,{ic:.148,sk:.333}],120035:[.735,.037,.747,{ic:.249,sk:.222}],120036:[.717,.017,.8,{ic:.16,sk:.25}],120037:[.717,.017,.622,{ic:.228,sk:.222}],120038:[.717,.017,.805,{ic:.221,sk:.25}],120039:[.717,.017,.944,{ic:.187,sk:.278}],120040:[.716,.017,.71,{ic:.249,sk:.194}],120041:[.717,.016,.821,{ic:.211,sk:.306}],120042:[.452,.008,.633],120043:[.694,.008,.521],120044:[.451,.008,.513,{sk:.0639}],120045:[.694,.008,.61,{sk:.192}],120046:[.452,.008,.554,{sk:.0639}],120047:[.701,.201,.568,{ic:.056,sk:.192}],120048:[.452,.202,.545,{sk:.0319}],120049:[.694,.008,.668,{sk:-.0319}],120050:[.694,.008,.405],120051:[.694,.202,.471],120052:[.694,.008,.604],120053:[.694,.008,.348,{sk:.0958}],120054:[.452,.008,1.032],120055:[.452,.008,.713],120056:[.452,.008,.585,{sk:.0639}],120057:[.452,.194,.601,{sk:.0958}],120058:[.452,.194,.542,{sk:.0958}],120059:[.452,.008,.529,{sk:.0639}],120060:[.451,.008,.531,{sk:.0639}],120061:[.643,.007,.415,{sk:.0958}],120062:[.452,.008,.681,{sk:.0319}],120063:[.453,.008,.567,{sk:.0319}],120064:[.453,.008,.831,{sk:.0958}],120065:[.452,.008,.659,{sk:.0319}],120066:[.452,.202,.59,{sk:.0639}],120067:[.452,.008,.555,{sk:.0639}],120068:[.696,.026,.718],120069:[.691,.027,.884],120070:[.685,.024,.613],120071:[.685,.027,.832],120072:[.685,.024,.663],120073:[.686,.153,.611],120074:[.69,.026,.785],120075:[.666,.133,.72],120076:[.686,.026,.554],120077:[.686,.139,.552],120078:[.68,.027,.668],120079:[.686,.026,.666],120080:[.692,.027,1.05],120081:[.686,.025,.832],120082:[.729,.027,.827],120083:[.692,.218,.828],120084:[.729,.069,.827],120085:[.686,.026,.828],120086:[.692,.027,.829],120087:[.701,.027,.669],120088:[.697,.027,.646],120089:[.686,.026,.831],120090:[.686,.027,1.046],120091:[.688,.027,.719],120092:[.686,.218,.833],120093:[.729,.139,.602],120094:[.47,.035,.5],120095:[.685,.031,.513],120096:[.466,.029,.389],120097:[.609,.033,.499],120098:[.467,.03,.401],120099:[.681,.221,.326],120100:[.47,.209,.504],120101:[.688,.205,.521],120102:[.673,.02,.279],120103:[.672,.208,.281],120104:[.689,.025,.389],120105:[.685,.02,.28],120106:[.475,.026,.767],120107:[.475,.022,.527],120108:[.48,.028,.489],120109:[.541,.212,.5],120110:[.479,.219,.489],120111:[.474,.021,.389],120112:[.478,.029,.443],120113:[.64,.02,.333],120114:[.474,.023,.517],120115:[.53,.028,.512],120116:[.532,.028,.774],120117:[.472,.188,.389],120118:[.528,.218,.499],120119:[.471,.214,.391],120120:[.701,0,.722],120121:[.683,0,.667],120122:[.702,.019,.722],120123:[.683,0,.722],120124:[.683,0,.667],120125:[.683,0,.611],120126:[.702,.019,.778],120127:[.683,0,.778],120128:[.683,0,.389],120129:[.683,.077,.5],120130:[.683,0,.778],120131:[.683,0,.667],120132:[.683,0,.944],120133:[.683,.02,.722],120134:[.701,.019,.778],120135:[.683,0,.611],120136:[.701,.181,.778],120137:[.683,0,.722],120138:[.702,.012,.556],120139:[.683,0,.667],120140:[.683,.019,.722],120141:[.683,.02,.722],120142:[.683,.019,1],120143:[.683,0,.722],120144:[.683,0,.722],120145:[.683,0,.667],120146:[.453,.006,.559],120147:[.694,.006,.639],120148:[.453,.006,.511],120149:[.694,.006,.639],120150:[.452,.006,.527],120151:[.7,0,.351,{ic:.101}],120152:[.455,.201,.575],120153:[.694,0,.639],120154:[.695,0,.319],120155:[.695,.2,.351],120156:[.683,0,.556],120157:[.694,0,.319],120158:[.45,0,.958],120159:[.45,0,.639],120160:[.452,.005,.575],120161:[.45,.194,.639],120162:[.45,.194,.607],120163:[.45,0,.474],120164:[.453,.006,.454],120165:[.635,.005,.447],120166:[.45,.006,.639],120167:[.444,0,.607],120168:[.444,0,.831],120169:[.444,0,.607],120170:[.444,.2,.607],120171:[.444,0,.511],120172:[.686,.031,.847],120173:[.684,.031,1.044],120174:[.676,.032,.723],120175:[.683,.029,.982],120176:[.686,.029,.783],120177:[.684,.146,.722],120178:[.687,.029,.927],120179:[.683,.126,.851],120180:[.681,.025,.655],120181:[.68,.141,.652],120182:[.681,.026,.789],120183:[.683,.028,.786],120184:[.683,.032,1.239],120185:[.679,.03,.983],120186:[.726,.03,.976],120187:[.688,.223,.977],120188:[.726,.083,.976],120189:[.688,.028,.978],120190:[.685,.031,.978],120191:[.686,.03,.79],120192:[.688,.039,.851],120193:[.685,.029,.982],120194:[.683,.03,1.235],120195:[.681,.035,.849],120196:[.688,.214,.984],120197:[.677,.148,.711],120198:[.472,.032,.603],120199:[.69,.032,.59],120200:[.473,.026,.464],120201:[.632,.028,.589],120202:[.471,.027,.472],120203:[.687,.222,.388],120204:[.472,.208,.595],120205:[.687,.207,.615],120206:[.686,.025,.331],120207:[.682,.203,.332],120208:[.682,.025,.464],120209:[.681,.024,.337],120210:[.476,.031,.921],120211:[.473,.028,.654],120212:[.482,.034,.609],120213:[.557,.207,.604],120214:[.485,.211,.596],120215:[.472,.026,.46],120216:[.479,.034,.523],120217:[.648,.027,.393],120218:[.472,.032,.589],120219:[.546,.027,.604],120220:[.549,.032,.918],120221:[.471,.188,.459],120222:[.557,.221,.589],120223:[.471,.214,.461],120224:[.694,0,.667],120225:[.694,0,.667],120226:[.705,.011,.639],120227:[.694,0,.722],120228:[.691,0,.597],120229:[.691,0,.569],120230:[.704,.011,.667],120231:[.694,0,.708],120232:[.694,0,.278],120233:[.694,.022,.472],120234:[.694,0,.694],120235:[.694,0,.542],120236:[.694,0,.875],120237:[.694,0,.708],120238:[.715,.022,.736],120239:[.694,0,.639],120240:[.715,.125,.736],120241:[.694,0,.646],120242:[.716,.022,.556],120243:[.688,0,.681],120244:[.694,.022,.688],120245:[.694,0,.667],120246:[.694,0,.944],120247:[.694,0,.667],120248:[.694,0,.667],120249:[.694,0,.611],120250:[.46,.01,.481],120251:[.694,.011,.517],120252:[.46,.01,.444],120253:[.694,.01,.517],120254:[.461,.01,.444],120255:[.705,0,.306],120256:[.455,.206,.5],120257:[.694,0,.517],120258:[.68,0,.239],120259:[.68,.205,.267],120260:[.694,0,.489],120261:[.694,0,.239],120262:[.455,0,.794],120263:[.455,0,.517],120264:[.46,.01,.5],120265:[.455,.194,.517],120266:[.455,.194,.517],120267:[.455,0,.342],120268:[.46,.01,.383],120269:[.571,.01,.361],120270:[.444,.01,.517],120271:[.444,0,.461],120272:[.444,0,.683],120273:[.444,0,.461],120274:[.444,.204,.461],120275:[.444,0,.435],120276:[.694,0,.733],120277:[.694,0,.733],120278:[.704,.011,.703],120279:[.694,0,.794],120280:[.691,0,.642],120281:[.691,0,.611],120282:[.705,.011,.733],120283:[.694,0,.794],120284:[.694,0,.331],120285:[.694,.022,.519],120286:[.694,0,.764],120287:[.694,0,.581],120288:[.694,0,.978],120289:[.694,0,.794],120290:[.716,.022,.794],120291:[.694,0,.703],120292:[.716,.106,.794],120293:[.694,0,.703],120294:[.716,.022,.611],120295:[.688,0,.733],120296:[.694,.022,.764],120297:[.694,0,.733],120298:[.694,0,1.039],120299:[.694,0,.733],120300:[.694,0,.733],120301:[.694,0,.672],120302:[.475,.011,.525],120303:[.694,.01,.561],120304:[.475,.011,.489],120305:[.694,.011,.561],120306:[.474,.01,.511],120307:[.705,0,.336],120308:[.469,.206,.55],120309:[.694,0,.561],120310:[.695,0,.256],120311:[.695,.205,.286],120312:[.694,0,.531],120313:[.694,0,.256],120314:[.469,0,.867],120315:[.468,0,.561],120316:[.474,.011,.55],120317:[.469,.194,.561],120318:[.469,.194,.561],120319:[.469,0,.372],120320:[.474,.01,.422],120321:[.589,.01,.404],120322:[.458,.011,.561],120323:[.458,0,.5],120324:[.458,0,.744],120325:[.458,0,.5],120326:[.458,.205,.5],120327:[.458,0,.476],120328:[.694,0,.667],120329:[.694,0,.667],120330:[.705,.01,.639,{ic:.08}],120331:[.694,0,.722],120332:[.691,0,.597,{ic:.091}],120333:[.691,0,.569,{ic:.104}],120334:[.705,.011,.667,{ic:.063}],120335:[.694,0,.708,{ic:.06}],120336:[.694,0,.278,{ic:.06}],120337:[.694,.022,.472,{ic:.063}],120338:[.694,0,.694,{ic:.091}],120339:[.694,0,.542],120340:[.694,0,.875,{ic:.054}],120341:[.694,0,.708,{ic:.058}],120342:[.716,.022,.736],120343:[.694,0,.639,{ic:.051}],120344:[.716,.125,.736],120345:[.694,0,.646,{ic:.052}],120346:[.716,.022,.556,{ic:.053}],120347:[.688,0,.681,{ic:.109}],120348:[.694,.022,.688,{ic:.059}],120349:[.694,0,.667,{ic:.132}],120350:[.694,0,.944,{ic:.132}],120351:[.694,0,.667,{ic:.091}],120352:[.694,0,.667,{ic:.143}],120353:[.694,0,.611,{ic:.091}],120354:[.461,.01,.481],120355:[.694,.011,.517],120356:[.46,.011,.444,{ic:.055}],120357:[.694,.01,.517,{ic:.071}],120358:[.46,.011,.444],120359:[.705,0,.306,{ic:.188}],120360:[.455,.206,.5,{ic:.068}],120361:[.694,0,.517],120362:[.68,0,.239,{ic:.076}],120363:[.68,.204,.267,{ic:.069}],120364:[.694,0,.489,{ic:.054}],120365:[.694,0,.239,{ic:.072}],120366:[.455,0,.794],120367:[.454,0,.517],120368:[.461,.011,.5],120369:[.455,.194,.517],120370:[.455,.194,.517],120371:[.455,0,.342,{ic:.082}],120372:[.461,.011,.383,{ic:.053}],120373:[.571,.011,.361],120374:[.444,.01,.517],120375:[.444,0,.461,{ic:.079}],120376:[.444,0,.683,{ic:.079}],120377:[.444,0,.461,{ic:.076}],120378:[.444,.205,.461,{ic:.079}],120379:[.444,0,.435,{ic:.059}],120380:[.694,0,.667],120381:[.694,0,.667],120382:[.705,.01,.639,{ic:.08}],120383:[.694,0,.722],120384:[.691,0,.597,{ic:.091}],120385:[.691,0,.569,{ic:.104}],120386:[.705,.011,.667,{ic:.063}],120387:[.694,0,.708,{ic:.06}],120388:[.694,0,.278,{ic:.06}],120389:[.694,.022,.472,{ic:.063}],120390:[.694,0,.694,{ic:.091}],120391:[.694,0,.542],120392:[.694,0,.875,{ic:.054}],120393:[.694,0,.708,{ic:.058}],120394:[.716,.022,.736],120395:[.694,0,.639,{ic:.051}],120396:[.716,.125,.736],120397:[.694,0,.646,{ic:.052}],120398:[.716,.022,.556,{ic:.053}],120399:[.688,0,.681,{ic:.109}],120400:[.694,.022,.688,{ic:.059}],120401:[.694,0,.667,{ic:.132}],120402:[.694,0,.944,{ic:.132}],120403:[.694,0,.667,{ic:.091}],120404:[.694,0,.667,{ic:.143}],120405:[.694,0,.611,{ic:.091}],120406:[.461,.01,.481],120407:[.694,.011,.517],120408:[.46,.011,.444,{ic:.055}],120409:[.694,.01,.517,{ic:.071}],120410:[.46,.011,.444],120411:[.705,0,.306,{ic:.188}],120412:[.455,.206,.5,{ic:.068}],120413:[.694,0,.517],120414:[.68,0,.239,{ic:.076}],120415:[.68,.204,.267,{ic:.069}],120416:[.694,0,.489,{ic:.054}],120417:[.694,0,.239,{ic:.072}],120418:[.455,0,.794],120419:[.454,0,.517],120420:[.461,.011,.5],120421:[.455,.194,.517],120422:[.455,.194,.517],120423:[.455,0,.342,{ic:.082}],120424:[.461,.011,.383,{ic:.053}],120425:[.571,.011,.361],120426:[.444,.01,.517],120427:[.444,0,.461,{ic:.079}],120428:[.444,0,.683,{ic:.079}],120429:[.444,0,.461,{ic:.076}],120430:[.444,.205,.461,{ic:.079}],120431:[.444,0,.435,{ic:.059}],120432:[.623,0,.525],120433:[.611,0,.525],120434:[.622,.011,.525],120435:[.611,0,.525],120436:[.611,0,.525],120437:[.611,0,.525],120438:[.622,.011,.525],120439:[.611,0,.525],120440:[.611,0,.525],120441:[.611,.011,.525],120442:[.611,0,.525],120443:[.611,0,.525],120444:[.611,0,.525],120445:[.611,0,.525],120446:[.621,.01,.525],120447:[.611,0,.525],120448:[.621,.138,.525],120449:[.611,.011,.525],120450:[.622,.011,.525],120451:[.611,0,.525],120452:[.611,.011,.525],120453:[.611,.007,.525],120454:[.611,.007,.525],120455:[.611,0,.525],120456:[.611,0,.525],120457:[.611,0,.525],120458:[.439,.006,.525],120459:[.611,.006,.525],120460:[.44,.006,.525],120461:[.611,.006,.525],120462:[.44,.006,.525],120463:[.617,0,.525],120464:[.442,.229,.525],120465:[.611,0,.525],120466:[.612,0,.525],120467:[.612,.228,.525],120468:[.611,0,.525],120469:[.611,0,.525],120470:[.436,0,.525],120471:[.436,0,.525],120472:[.44,.006,.525],120473:[.437,.221,.525],120474:[.437,.221,.525],120475:[.437,0,.525],120476:[.44,.006,.525],120477:[.554,.006,.525],120478:[.431,.005,.525],120479:[.431,0,.525],120480:[.431,0,.525],120481:[.431,0,.525],120482:[.431,.228,.525],120483:[.431,0,.525],120484:[.441,.01,.307],120485:[.442,.204,.332],120488:[.698,0,.869],120489:[.686,0,.818],120490:[.68,0,.692],120491:[.698,0,.958],120492:[.68,0,.756],120493:[.686,0,.703],120494:[.686,0,.9],120495:[.696,.01,.894],120496:[.686,0,.436],120497:[.686,0,.901],120498:[.698,0,.806],120499:[.686,0,1.092],120500:[.686,0,.9],120501:[.675,0,.767],120502:[.696,.01,.864],120503:[.68,0,.9],120504:[.686,0,.786],120505:[.696,.01,.894],120506:[.686,0,.831],120507:[.675,0,.8],120508:[.697,0,.894],120509:[.686,0,.831],120510:[.686,0,.869],120511:[.686,0,.894],120512:[.696,0,.831],120513:[.686,.024,.958],120514:[.452,.008,.761,{sk:.0319}],120515:[.701,.194,.66,{sk:.0958}],120516:[.451,.211,.59],120517:[.725,.008,.522,{sk:.0639}],120518:[.461,.017,.529,{sk:.0958}],120519:[.711,.202,.508,{sk:.0958}],120520:[.452,.211,.6,{sk:.0639}],120521:[.702,.008,.562,{sk:.0958}],120522:[.452,.008,.412,{sk:.0639}],120523:[.452,.008,.668],120524:[.694,.013,.671],120525:[.452,.211,.708,{sk:.0319}],120526:[.452,0,.577,{sk:.0319}],120527:[.711,.201,.508,{sk:.128}],120528:[.452,.008,.585,{sk:.0639}],120529:[.444,.008,.682],120530:[.451,.211,.612,{sk:.0958}],120531:[.451,.105,.424,{sk:.0958}],120532:[.444,.008,.686],120533:[.444,.013,.521,{ic:.089,sk:.0319}],120534:[.453,.008,.631,{sk:.0319}],120535:[.452,.216,.747,{sk:.0958}],120536:[.452,.201,.718,{sk:.0639}],120537:[.694,.202,.758,{sk:.128}],120538:[.453,.008,.718],120539:[.71,.017,.628,{sk:.0958}],120540:[.444,.007,.483,{sk:.0639}],120541:[.701,.008,.692,{sk:.0958}],120542:[.434,.006,.667,{ic:.067}],120543:[.694,.202,.712,{sk:.0958}],120544:[.451,.194,.612,{sk:.0958}],120545:[.444,.008,.975],120546:[.716,0,.75,{sk:.139}],120547:[.683,0,.759,{sk:.0833}],120548:[.68,0,.615,{ic:.106,sk:.0833}],120549:[.716,0,.833,{sk:.167}],120550:[.68,0,.738,{sk:.0833}],120551:[.683,0,.683,{sk:.0833}],120552:[.683,0,.831,{ic:.057,sk:.0556}],120553:[.704,.022,.763,{sk:.0833}],120554:[.683,0,.44,{ic:.064,sk:.111}],120555:[.683,0,.849,{sk:.0556}],120556:[.716,0,.694,{sk:.167}],120557:[.683,0,.97,{ic:.081,sk:.0833}],120558:[.683,0,.803,{ic:.085,sk:.0833}],120559:[.677,0,.742,{sk:.0833}],120560:[.704,.022,.763,{sk:.0833}],120561:[.68,0,.831,{ic:.056,sk:.0556}],120562:[.683,0,.642,{ic:.109,sk:.0833}],120563:[.704,.022,.763,{sk:.0833}],120564:[.683,0,.78,{sk:.0833}],120565:[.677,0,.584,{ic:.12,sk:.0833}],120566:[.705,0,.583,{ic:.117,sk:.0556}],120567:[.683,0,.667,{sk:.0833}],120568:[.683,0,.828,{sk:.0833}],120569:[.683,0,.612,{ic:.08,sk:.0556}],120570:[.704,0,.772,{sk:.0833}],120571:[.683,.033,.833],120572:[.442,.011,.64,{sk:.0278}],120573:[.705,.194,.566,{sk:.0833}],120574:[.441,.216,.518],120575:[.717,.01,.444,{sk:.0556}],120576:[.452,.022,.466,{sk:.0833}],120577:[.704,.204,.438,{sk:.0833}],120578:[.442,.216,.497,{sk:.0556}],120579:[.705,.01,.469,{sk:.0833}],120580:[.442,.01,.354,{sk:.0556}],120581:[.442,.011,.576],120582:[.694,.012,.583],120583:[.442,.216,.603,{sk:.0278}],120584:[.442,0,.494,{sk:.0278}],120585:[.704,.205,.438,{sk:.111}],120586:[.441,.011,.485,{sk:.0556}],120587:[.431,.011,.57],120588:[.442,.216,.517,{sk:.0833}],120589:[.442,.107,.363,{sk:.0833}],120590:[.431,.011,.571],120591:[.431,.013,.437,{ic:.08,sk:.0278}],120592:[.443,.01,.54,{sk:.0278}],120593:[.442,.218,.654,{sk:.0833}],120594:[.442,.204,.626,{sk:.0556}],120595:[.694,.205,.651,{sk:.111}],120596:[.443,.011,.622],120597:[.715,.022,.531,{sk:.0833}],120598:[.431,.011,.406,{sk:.0556}],120599:[.705,.011,.591,{sk:.0833}],120600:[.434,.006,.667,{ic:.067}],120601:[.694,.205,.596,{sk:.0833}],120602:[.442,.194,.517,{sk:.0833}],120603:[.431,.01,.828],120604:[.711,0,.869,{sk:.16}],120605:[.686,0,.866,{sk:.0958}],120606:[.68,0,.657,{ic:.12,sk:.0958}],120607:[.711,0,.958,{sk:.192}],120608:[.68,0,.81,{sk:.0958}],120609:[.686,0,.773,{sk:.0958}],120610:[.686,0,.982,{sk:.0639}],120611:[.702,.017,.867,{sk:.0958}],120612:[.686,0,.511,{ic:.062,sk:.128}],120613:[.686,0,.971,{sk:.0639}],120614:[.711,0,.806,{sk:.192}],120615:[.686,0,1.142,{ic:.077,sk:.0958}],120616:[.686,0,.95,{ic:.077,sk:.0958}],120617:[.675,0,.841,{sk:.0958}],120618:[.703,.017,.837,{sk:.0958}],120619:[.68,0,.982,{sk:.0639}],120620:[.686,0,.723,{ic:.124,sk:.0958}],120621:[.702,.017,.867,{sk:.0958}],120622:[.686,0,.885,{sk:.0958}],120623:[.675,0,.637,{ic:.135,sk:.0958}],120624:[.703,0,.671,{ic:.131,sk:.0639}],120625:[.686,0,.767,{sk:.0958}],120626:[.686,0,.947,{sk:.0958}],120627:[.686,0,.714,{ic:.076,sk:.0639}],120628:[.703,0,.879,{sk:.0958}],120629:[.683,.033,.833],120630:[.452,.008,.761,{sk:.0319}],120631:[.701,.194,.66,{sk:.0958}],120632:[.451,.211,.59],120633:[.725,.008,.522,{sk:.0639}],120634:[.461,.017,.529,{sk:.0958}],120635:[.711,.202,.508,{sk:.0958}],120636:[.452,.211,.6,{sk:.0639}],120637:[.702,.008,.562,{sk:.0958}],120638:[.452,.008,.412,{sk:.0639}],120639:[.452,.008,.668],120640:[.694,.013,.671],120641:[.452,.211,.708,{sk:.0319}],120642:[.452,0,.577,{sk:.0319}],120643:[.711,.201,.508,{sk:.128}],120644:[.452,.008,.585,{sk:.0639}],120645:[.444,.008,.682],120646:[.451,.211,.612,{sk:.0958}],120647:[.451,.105,.424,{sk:.0958}],120648:[.444,.008,.686],120649:[.444,.013,.521,{ic:.089,sk:.0319}],120650:[.453,.008,.631,{sk:.0319}],120651:[.452,.216,.747,{sk:.0958}],120652:[.452,.201,.718,{sk:.0639}],120653:[.694,.202,.758,{sk:.128}],120654:[.453,.008,.718],120655:[.715,.022,.531,{sk:.0833}],120656:[.444,.007,.483,{sk:.0639}],120657:[.701,.008,.692,{sk:.0958}],120658:[.434,.006,.667,{ic:.067}],120659:[.694,.202,.712,{sk:.0958}],120660:[.451,.194,.612,{sk:.0958}],120661:[.444,.008,.975],120662:[.694,0,.733],120663:[.694,0,.733],120664:[.691,0,.581],120665:[.694,0,.917],120666:[.691,0,.642],120667:[.694,0,.672],120668:[.694,0,.794],120669:[.716,.022,.856],120670:[.694,0,.331],120671:[.694,0,.764],120672:[.694,0,.672],120673:[.694,0,.978],120674:[.694,0,.794],120675:[.688,0,.733],120676:[.716,.022,.794],120677:[.691,0,.794],120678:[.694,0,.703],120679:[.716,.022,.856],120680:[.694,0,.794],120681:[.688,0,.733],120682:[.715,0,.856],120683:[.694,0,.794],120684:[.694,0,.733],120685:[.694,0,.856],120686:[.716,0,.794],120687:[.683,.033,.833],120688:[.452,.008,.761,{sk:.0319}],120689:[.701,.194,.66,{sk:.0958}],120690:[.451,.211,.59],120691:[.725,.008,.522,{sk:.0639}],120692:[.461,.017,.529,{sk:.0958}],120693:[.711,.202,.508,{sk:.0958}],120694:[.452,.211,.6,{sk:.0639}],120695:[.702,.008,.562,{sk:.0958}],120696:[.452,.008,.412,{sk:.0639}],120697:[.452,.008,.668],120698:[.694,.013,.671],120699:[.452,.211,.708,{sk:.0319}],120700:[.452,0,.577,{sk:.0319}],120701:[.711,.201,.508,{sk:.128}],120702:[.452,.008,.585,{sk:.0639}],120703:[.444,.008,.682],120704:[.451,.211,.612,{sk:.0958}],120705:[.451,.105,.424,{sk:.0958}],120706:[.444,.008,.686],120707:[.444,.013,.521,{ic:.089,sk:.0319}],120708:[.453,.008,.631,{sk:.0319}],120709:[.452,.216,.747,{sk:.0958}],120710:[.452,.201,.718,{sk:.0639}],120711:[.694,.202,.758,{sk:.128}],120712:[.453,.008,.718],120713:[.715,.022,.531,{sk:.0833}],120714:[.444,.007,.483,{sk:.0639}],120715:[.701,.008,.692,{sk:.0958}],120716:[.434,.006,.667,{ic:.067}],120717:[.694,.202,.712,{sk:.0958}],120718:[.451,.194,.612,{sk:.0958}],120719:[.444,.008,.975],120720:[.694,0,.667],120721:[.694,0,.667],120722:[.691,0,.542,{ic:.104}],120723:[.694,0,.833],120724:[.691,0,.597,{ic:.091}],120725:[.694,0,.611,{ic:.091}],120726:[.694,0,.708,{ic:.06}],120727:[.715,.022,.778],120728:[.694,0,.278,{ic:.06}],120729:[.694,0,.694,{ic:.091}],120730:[.694,0,.611],120731:[.694,0,.875,{ic:.054}],120732:[.694,0,.708,{ic:.058}],120733:[.688,0,.667,{ic:.098}],120734:[.716,.022,.736],120735:[.691,0,.708,{ic:.06}],120736:[.694,0,.639,{ic:.051}],120737:[.715,.022,.778],120738:[.694,0,.722,{ic:.091}],120739:[.688,0,.681,{ic:.109}],120740:[.716,0,.778,{ic:.065}],120741:[.694,0,.722],120742:[.694,0,.667,{ic:.091}],120743:[.694,0,.778,{ic:.076}],120744:[.716,0,.722],120745:[.683,.033,.833],120746:[.452,.008,.761,{sk:.0319}],120747:[.701,.194,.66,{sk:.0958}],120748:[.451,.211,.59],120749:[.725,.008,.522,{sk:.0639}],120750:[.461,.017,.529,{sk:.0958}],120751:[.711,.202,.508,{sk:.0958}],120752:[.452,.211,.6,{sk:.0639}],120753:[.702,.008,.562,{sk:.0958}],120754:[.452,.008,.412,{sk:.0639}],120755:[.452,.008,.668],120756:[.694,.013,.671],120757:[.452,.211,.708,{sk:.0319}],120758:[.452,0,.577,{sk:.0319}],120759:[.711,.201,.508,{sk:.128}],120760:[.452,.008,.585,{sk:.0639}],120761:[.444,.008,.682],120762:[.451,.211,.612,{sk:.0958}],120763:[.451,.105,.424,{sk:.0958}],120764:[.444,.008,.686],120765:[.444,.013,.521,{ic:.089,sk:.0319}],120766:[.453,.008,.631,{sk:.0319}],120767:[.452,.216,.747,{sk:.0958}],120768:[.452,.201,.718,{sk:.0639}],120769:[.694,.202,.758,{sk:.128}],120770:[.453,.008,.718],120771:[.715,.022,.531,{sk:.0833}],120772:[.444,.007,.483,{sk:.0639}],120773:[.701,.008,.692,{sk:.0958}],120774:[.434,.006,.667,{ic:.067}],120775:[.694,.202,.712,{sk:.0958}],120776:[.451,.194,.612,{sk:.0958}],120777:[.444,.008,.975],120778:[.68,0,.643,{ic:.106,sk:.0833}],120779:[.605,.085,.778],120782:[.654,.01,.575],120783:[.655,0,.575],120784:[.654,0,.575],120785:[.655,.011,.575],120786:[.656,0,.575],120787:[.655,.011,.575],120788:[.655,.011,.575],120789:[.676,.011,.575],120790:[.654,.011,.575],120791:[.654,.011,.575],120792:[.654,.01,.575],120793:[.655,0,.575],120794:[.654,0,.575],120795:[.655,.011,.575],120796:[.656,0,.575],120797:[.655,.011,.575],120798:[.655,.011,.575],120799:[.676,.011,.575],120800:[.654,.011,.575],120801:[.654,.011,.575],120802:[.678,.022,.5],120803:[.678,0,.5],120804:[.677,0,.5],120805:[.678,.022,.5],120806:[.656,0,.5],120807:[.656,.021,.5],120808:[.677,.022,.5],120809:[.656,.011,.5],120810:[.678,.022,.5],120811:[.677,.022,.5],120812:[.715,.022,.55],120813:[.716,0,.55],120814:[.716,0,.55],120815:[.716,.022,.55],120816:[.694,0,.55],120817:[.694,.022,.55],120818:[.716,.022,.55],120819:[.695,.011,.55],120820:[.715,.022,.55],120821:[.716,.022,.55],120822:[.621,.01,.525],120823:[.622,0,.525],120824:[.622,0,.525],120825:[.622,.011,.525],120826:[.624,0,.525],120827:[.611,.01,.525],120828:[.622,.011,.525],120829:[.627,.01,.525],120830:[.621,.01,.525],120831:[.622,.011,.525]}},function(t,e,r){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var n=r(1),i=r(195);e.sansSerifBoldItalic=n.AddCSS(i.sansSerifBoldItalic,{32:{c:" "},33:{c:"!"},35:{c:"#"},36:{c:"$"},37:{c:"%"},38:{c:"&"},40:{c:"("},41:{c:")"},42:{c:"*"},43:{c:"+"},44:{c:","},45:{c:"-"},46:{c:"."},47:{c:"/"},48:{c:"0"},49:{c:"1"},50:{c:"2"},51:{c:"3"},52:{c:"4"},53:{c:"5"},54:{c:"6"},55:{c:"7"},56:{c:"8"},57:{c:"9"},58:{c:":"},59:{c:";"},61:{c:"="},63:{c:"?"},64:{c:"@"},65:{c:"A"},66:{c:"B"},67:{c:"C"},68:{c:"D"},69:{c:"E"},70:{c:"F"},71:{c:"G"},72:{c:"H"},73:{c:"I"},74:{c:"J"},75:{c:"K"},76:{c:"L"},77:{c:"M"},78:{c:"N"},79:{c:"O"},80:{c:"P"},81:{c:"Q"},82:{c:"R"},83:{c:"S"},84:{c:"T"},85:{c:"U"},86:{c:"V"},87:{c:"W"},88:{c:"X"},89:{c:"Y"},90:{c:"Z"},91:{c:"["},93:{c:"]"},94:{c:"^"},95:{c:"_"},97:{c:"a"},98:{c:"b"},99:{c:"c"},100:{c:"d"},101:{c:"e"},102:{c:"f"},103:{c:"g"},104:{c:"h"},105:{c:"i"},106:{c:"j"},107:{c:"k"},108:{c:"l"},109:{c:"m"},110:{c:"n"},111:{c:"o"},112:{c:"p"},113:{c:"q"},114:{c:"r"},115:{c:"s"},116:{c:"t"},117:{c:"u"},118:{c:"v"},119:{c:"w"},120:{c:"x"},121:{c:"y"},122:{c:"z"},126:{c:"~"},913:{c:"A"},914:{c:"B"},917:{c:"E"},918:{c:"Z"},919:{c:"H"},921:{c:"I"},922:{c:"K"},924:{c:"M"},925:{c:"N"},927:{c:"O"},929:{c:"P"},930:{c:"\\398"},932:{c:"T"},935:{c:"X"},978:{c:"\\3A5"},988:{c:"F"},8213:{c:"\\2014"},8215:{c:"_"},8260:{c:"/"},8710:{c:"\\394"}})},function(t,e,r){"use strict";Object.defineProperty(e,"__esModule",{value:!0}),e.sansSerifBoldItalic={32:[0,0,.25],33:[.694,0,.319],34:[.694,-.471,.5],35:[.694,.194,.833],36:[.75,.056,.5,{ic:.065}],37:[.75,.056,.833],38:[.716,.022,.758],39:[.694,-.471,.278,{ic:.057}],40:[.75,.25,.389,{ic:.102}],41:[.75,.25,.389],42:[.75,-.306,.5,{ic:.068}],43:[.583,.083,.778],44:[.098,.125,.278],45:[.259,-.186,.333],46:[.098,0,.278],47:[.75,.25,.5,{ic:.1}],48:[.678,.022,.5],49:[.678,0,.5],50:[.678,0,.5,{ic:.051}],51:[.678,.022,.5],52:[.656,0,.5],53:[.656,.022,.5,{ic:.055}],54:[.678,.022,.5],55:[.656,.011,.5,{ic:.096}],56:[.678,.022,.5,{ic:.054}],57:[.677,.022,.5],58:[.444,0,.278],59:[.444,.125,.278],61:[.37,-.13,.778],63:[.704,0,.472,{ic:.064}],64:[.705,.01,.667],65:[.694,0,.667],66:[.694,0,.667],67:[.705,.01,.639,{ic:.08}],68:[.694,0,.722],69:[.691,0,.597,{ic:.091}],70:[.691,0,.569,{ic:.104}],71:[.705,.011,.667,{ic:.063}],72:[.694,0,.708,{ic:.06}],73:[.694,0,.278,{ic:.06}],74:[.694,.022,.472,{ic:.063}],75:[.694,0,.694,{ic:.091}],76:[.694,0,.542],77:[.694,0,.875,{ic:.054}],78:[.694,0,.708,{ic:.058}],79:[.716,.022,.736],80:[.694,0,.639,{ic:.051}],81:[.716,.125,.736],82:[.694,0,.646,{ic:.052}],83:[.716,.022,.556,{ic:.053}],84:[.688,0,.681,{ic:.109}],85:[.694,.022,.688,{ic:.059}],86:[.694,0,.667,{ic:.132}],87:[.694,0,.944,{ic:.132}],88:[.694,0,.667,{ic:.091}],89:[.694,0,.667,{ic:.143}],90:[.694,0,.611,{ic:.091}],91:[.75,.25,.289,{ic:.136}],93:[.75,.25,.289,{ic:.064}],94:[.694,-.527,.5],95:[-.038,.114,.5,{ic:.065}],97:[.461,.01,.481],98:[.694,.011,.517],99:[.46,.011,.444,{ic:.055}],100:[.694,.01,.517,{ic:.071}],101:[.46,.011,.444],102:[.705,0,.306,{ic:.188}],103:[.455,.206,.5,{ic:.068}],104:[.694,0,.517],105:[.68,0,.239,{ic:.076}],106:[.68,.204,.267,{ic:.069}],107:[.694,0,.489,{ic:.054}],108:[.694,0,.239,{ic:.072}],109:[.455,0,.794],110:[.454,0,.517],111:[.461,.011,.5],112:[.455,.194,.517],113:[.455,.194,.517],114:[.455,0,.342,{ic:.082}],115:[.461,.011,.383,{ic:.053}],116:[.571,.011,.361],117:[.444,.01,.517],118:[.444,0,.461,{ic:.079}],119:[.444,0,.683,{ic:.079}],120:[.444,0,.461,{ic:.076}],121:[.444,.205,.461,{ic:.079}],122:[.444,0,.435,{ic:.059}],126:[.327,-.193,.5,{ic:.06}],160:[0,0,.25],305:[.444,0,.239],567:[.444,.204,.267],768:[.694,-.527,0],769:[.694,-.527,0,{ic:.063}],770:[.694,-.527,0],771:[.677,-.543,0,{ic:.06}],772:[.631,-.552,0,{ic:.064}],774:[.694,-.508,0,{ic:.073}],775:[.68,-.576,0],776:[.68,-.582,0],778:[.693,-.527,0],779:[.694,-.527,0,{ic:.063}],780:[.654,-.487,0,{ic:.06}],913:[.694,0,.667],914:[.694,0,.667],915:[.691,0,.542,{ic:.104}],916:[.694,0,.833],917:[.691,0,.597,{ic:.091}],918:[.694,0,.611,{ic:.091}],919:[.694,0,.708,{ic:.06}],920:[.715,.022,.778],921:[.694,0,.278,{ic:.06}],922:[.694,0,.694,{ic:.091}],923:[.694,0,.611],924:[.694,0,.875,{ic:.054}],925:[.694,0,.708,{ic:.058}],926:[.688,0,.667,{ic:.098}],927:[.716,.022,.736],928:[.691,0,.708,{ic:.06}],929:[.694,0,.639,{ic:.051}],930:[.715,.022,.778],931:[.694,0,.722,{ic:.091}],932:[.688,0,.681,{ic:.109}],933:[.716,0,.778,{ic:.065}],934:[.694,0,.722],935:[.694,0,.667,{ic:.091}],936:[.694,0,.778,{ic:.076}],937:[.716,0,.722],978:[.716,0,.778,{ic:.065}],988:[.691,0,.569,{ic:.104}],8211:[.312,-.236,.5,{ic:.065}],8212:[.312,-.236,1,{ic:.065}],8213:[.312,-.236,1,{ic:.065}],8215:[-.038,.114,.5,{ic:.065}],8216:[.694,-.471,.278,{ic:.058}],8217:[.694,-.471,.278,{ic:.057}],8220:[.694,-.471,.5,{ic:.114}],8221:[.694,-.471,.5],8260:[.75,.25,.5,{ic:.1}],8710:[.694,0,.833]}},function(t,e,r){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var n=r(1),i=r(197);e.sansSerifBold=n.AddCSS(i.sansSerifBold,{32:{c:" "},33:{c:"!"},35:{c:"#"},36:{c:"$"},37:{c:"%"},38:{c:"&"},40:{c:"("},41:{c:")"},42:{c:"*"},43:{c:"+"},44:{c:","},45:{c:"-"},46:{c:"."},47:{c:"/"},48:{c:"0"},49:{c:"1"},50:{c:"2"},51:{c:"3"},52:{c:"4"},53:{c:"5"},54:{c:"6"},55:{c:"7"},56:{c:"8"},57:{c:"9"},58:{c:":"},59:{c:";"},61:{c:"="},63:{c:"?"},64:{c:"@"},65:{c:"A"},66:{c:"B"},67:{c:"C"},68:{c:"D"},69:{c:"E"},70:{c:"F"},71:{c:"G"},72:{c:"H"},73:{c:"I"},74:{c:"J"},75:{c:"K"},76:{c:"L"},77:{c:"M"},78:{c:"N"},79:{c:"O"},80:{c:"P"},81:{c:"Q"},82:{c:"R"},83:{c:"S"},84:{c:"T"},85:{c:"U"},86:{c:"V"},87:{c:"W"},88:{c:"X"},89:{c:"Y"},90:{c:"Z"},91:{c:"["},93:{c:"]"},94:{c:"^"},95:{c:"_"},97:{c:"a"},98:{c:"b"},99:{c:"c"},100:{c:"d"},101:{c:"e"},102:{c:"f"},103:{c:"g"},104:{c:"h"},105:{c:"i"},106:{c:"j"},107:{c:"k"},108:{c:"l"},109:{c:"m"},110:{c:"n"},111:{c:"o"},112:{c:"p"},113:{c:"q"},114:{c:"r"},115:{c:"s"},116:{c:"t"},117:{c:"u"},118:{c:"v"},119:{c:"w"},120:{c:"x"},121:{c:"y"},122:{c:"z"},126:{c:"~"},913:{c:"A"},914:{c:"B"},917:{c:"E"},918:{c:"Z"},919:{c:"H"},921:{c:"I"},922:{c:"K"},924:{c:"M"},925:{c:"N"},927:{c:"O"},929:{c:"P"},930:{c:"\\398"},932:{c:"T"},935:{c:"X"},978:{c:"\\3A5"},988:{c:"F"},8213:{c:"\\2014"},8215:{c:"_"},8260:{c:"/"},8710:{c:"\\394"}})},function(t,e,r){"use strict";Object.defineProperty(e,"__esModule",{value:!0}),e.sansSerifBold={32:[0,0,.25],33:[.694,0,.367],34:[.694,-.442,.558],35:[.694,.193,.917],36:[.75,.056,.55],37:[.75,.056,1.029],38:[.716,.022,.831],39:[.694,-.442,.306],40:[.75,.249,.428],41:[.75,.25,.428],42:[.75,-.293,.55],43:[.617,.116,.856],44:[.146,.106,.306],45:[.273,-.186,.367],46:[.146,0,.306],47:[.75,.249,.55],48:[.715,.022,.55],49:[.716,0,.55],50:[.716,0,.55],51:[.716,.022,.55],52:[.694,0,.55],53:[.694,.022,.55],54:[.716,.022,.55],55:[.695,.011,.55],56:[.715,.022,.55],57:[.716,.022,.55],58:[.458,0,.306],59:[.458,.106,.306],61:[.407,-.094,.856],63:[.705,0,.519],64:[.704,.011,.733],65:[.694,0,.733],66:[.694,0,.733],67:[.704,.011,.703],68:[.694,0,.794],69:[.691,0,.642],70:[.691,0,.611],71:[.705,.011,.733],72:[.694,0,.794],73:[.694,0,.331],74:[.694,.022,.519],75:[.694,0,.764],76:[.694,0,.581],77:[.694,0,.978],78:[.694,0,.794],79:[.716,.022,.794],80:[.694,0,.703],81:[.716,.106,.794],82:[.694,0,.703],83:[.716,.022,.611],84:[.688,0,.733],85:[.694,.022,.764],86:[.694,0,.733],87:[.694,0,1.039],88:[.694,0,.733],89:[.694,0,.733],90:[.694,0,.672],91:[.75,.25,.343],93:[.75,.25,.343],94:[.694,-.537,.55],95:[-.023,.11,.55],97:[.475,.011,.525],98:[.694,.01,.561],99:[.475,.011,.489],100:[.694,.011,.561],101:[.474,.01,.511],102:[.705,0,.336],103:[.469,.206,.55],104:[.694,0,.561],105:[.695,0,.256],106:[.695,.205,.286],107:[.694,0,.531],108:[.694,0,.256],109:[.469,0,.867],110:[.468,0,.561],111:[.474,.011,.55],112:[.469,.194,.561],113:[.469,.194,.561],114:[.469,0,.372],115:[.474,.01,.422],116:[.589,.01,.404],117:[.458,.011,.561],118:[.458,0,.5],119:[.458,0,.744],120:[.458,0,.5],121:[.458,.205,.5],122:[.458,0,.476],126:[.344,-.198,.55],160:[0,0,.25],305:[.458,0,.256],567:[.458,.205,.286],768:[.694,-.537,0],769:[.694,-.537,0],770:[.694,-.537,0],771:[.694,-.548,0],772:[.66,-.56,0],774:[.694,-.552,0],775:[.695,-.596,0],776:[.695,-.595,0],778:[.694,-.538,0],779:[.694,-.537,0],780:[.657,-.5,0],913:[.694,0,.733],914:[.694,0,.733],915:[.691,0,.581],916:[.694,0,.917],917:[.691,0,.642],918:[.694,0,.672],919:[.694,0,.794],920:[.716,.022,.856],921:[.694,0,.331],922:[.694,0,.764],923:[.694,0,.672],924:[.694,0,.978],925:[.694,0,.794],926:[.688,0,.733],927:[.716,.022,.794],928:[.691,0,.794],929:[.694,0,.703],930:[.716,.022,.856],931:[.694,0,.794],932:[.688,0,.733],933:[.715,0,.856],934:[.694,0,.794],935:[.694,0,.733],936:[.694,0,.856],937:[.716,0,.794],978:[.715,0,.856],988:[.691,0,.611],8211:[.327,-.24,.55],8212:[.327,-.24,1.1],8213:[.327,-.24,1.1],8215:[-.023,.11,.55],8216:[.694,-.443,.306],8217:[.694,-.442,.306],8220:[.694,-.443,.558],8221:[.694,-.442,.558],8260:[.75,.249,.55],8710:[.694,0,.917]}},function(t,e,r){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var n=r(1),i=r(199);e.sansSerifItalic=n.AddCSS(i.sansSerifItalic,{32:{c:" "},33:{c:"!"},35:{c:"#"},36:{c:"$"},37:{c:"%"},38:{c:"&"},40:{c:"("},41:{c:")"},42:{c:"*"},43:{c:"+"},44:{c:","},45:{c:"-"},46:{c:"."},47:{c:"/"},48:{c:"0"},49:{c:"1"},50:{c:"2"},51:{c:"3"},52:{c:"4"},53:{c:"5"},54:{c:"6"},55:{c:"7"},56:{c:"8"},57:{c:"9"},58:{c:":"},59:{c:";"},61:{c:"="},63:{c:"?"},64:{c:"@"},65:{c:"A"},66:{c:"B"},67:{c:"C"},68:{c:"D"},69:{c:"E"},70:{c:"F"},71:{c:"G"},72:{c:"H"},73:{c:"I"},74:{c:"J"},75:{c:"K"},76:{c:"L"},77:{c:"M"},78:{c:"N"},79:{c:"O"},80:{c:"P"},81:{c:"Q"},82:{c:"R"},83:{c:"S"},84:{c:"T"},85:{c:"U"},86:{c:"V"},87:{c:"W"},88:{c:"X"},89:{c:"Y"},90:{c:"Z"},91:{c:"["},93:{c:"]"},94:{c:"^"},95:{c:"_"},97:{c:"a"},98:{c:"b"},99:{c:"c"},100:{c:"d"},101:{c:"e"},102:{c:"f"},103:{c:"g"},104:{c:"h"},105:{c:"i"},106:{c:"j"},107:{c:"k"},108:{c:"l"},109:{c:"m"},110:{c:"n"},111:{c:"o"},112:{c:"p"},113:{c:"q"},114:{c:"r"},115:{c:"s"},116:{c:"t"},117:{c:"u"},118:{c:"v"},119:{c:"w"},120:{c:"x"},121:{c:"y"},122:{c:"z"},126:{c:"~"},913:{c:"A"},914:{c:"B"},917:{c:"E"},918:{c:"Z"},919:{c:"H"},921:{c:"I"},922:{c:"K"},924:{c:"M"},925:{c:"N"},927:{c:"O"},929:{c:"P"},930:{c:"\\398"},932:{c:"T"},935:{c:"X"},978:{c:"\\3A5"},988:{c:"F"},8213:{c:"\\2014"},8215:{c:"_"},8260:{c:"/"},8710:{c:"\\394"}})},function(t,e,r){"use strict";Object.defineProperty(e,"__esModule",{value:!0}),e.sansSerifItalic={32:[0,0,.25],33:[.694,0,.319],34:[.694,-.471,.5],35:[.694,.194,.833],36:[.75,.056,.5,{ic:.065}],37:[.75,.056,.833],38:[.716,.022,.758],39:[.694,-.471,.278,{ic:.057}],40:[.75,.25,.389,{ic:.102}],41:[.75,.25,.389],42:[.75,-.306,.5,{ic:.068}],43:[.583,.083,.778],44:[.098,.125,.278],45:[.259,-.186,.333],46:[.098,0,.278],47:[.75,.25,.5,{ic:.1}],48:[.678,.022,.5],49:[.678,0,.5],50:[.678,0,.5,{ic:.051}],51:[.678,.022,.5],52:[.656,0,.5],53:[.656,.022,.5,{ic:.055}],54:[.678,.022,.5],55:[.656,.011,.5,{ic:.096}],56:[.678,.022,.5,{ic:.054}],57:[.677,.022,.5],58:[.444,0,.278],59:[.444,.125,.278],61:[.37,-.13,.778],63:[.704,0,.472,{ic:.064}],64:[.705,.01,.667],65:[.694,0,.667],66:[.694,0,.667],67:[.705,.01,.639,{ic:.08}],68:[.694,0,.722],69:[.691,0,.597,{ic:.091}],70:[.691,0,.569,{ic:.104}],71:[.705,.011,.667,{ic:.063}],72:[.694,0,.708,{ic:.06}],73:[.694,0,.278,{ic:.06}],74:[.694,.022,.472,{ic:.063}],75:[.694,0,.694,{ic:.091}],76:[.694,0,.542],77:[.694,0,.875,{ic:.054}],78:[.694,0,.708,{ic:.058}],79:[.716,.022,.736],80:[.694,0,.639,{ic:.051}],81:[.716,.125,.736],82:[.694,0,.646,{ic:.052}],83:[.716,.022,.556,{ic:.053}],84:[.688,0,.681,{ic:.109}],85:[.694,.022,.688,{ic:.059}],86:[.694,0,.667,{ic:.132}],87:[.694,0,.944,{ic:.132}],88:[.694,0,.667,{ic:.091}],89:[.694,0,.667,{ic:.143}],90:[.694,0,.611,{ic:.091}],91:[.75,.25,.289,{ic:.136}],93:[.75,.25,.289,{ic:.064}],94:[.694,-.527,.5],95:[-.038,.114,.5,{ic:.065}],97:[.461,.01,.481],98:[.694,.011,.517],99:[.46,.011,.444,{ic:.055}],100:[.694,.01,.517,{ic:.071}],101:[.46,.011,.444],102:[.705,0,.306,{ic:.188}],103:[.455,.206,.5,{ic:.068}],104:[.694,0,.517],105:[.68,0,.239,{ic:.076}],106:[.68,.204,.267,{ic:.069}],107:[.694,0,.489,{ic:.054}],108:[.694,0,.239,{ic:.072}],109:[.455,0,.794],110:[.454,0,.517],111:[.461,.011,.5],112:[.455,.194,.517],113:[.455,.194,.517],114:[.455,0,.342,{ic:.082}],115:[.461,.011,.383,{ic:.053}],116:[.571,.011,.361],117:[.444,.01,.517],118:[.444,0,.461,{ic:.079}],119:[.444,0,.683,{ic:.079}],120:[.444,0,.461,{ic:.076}],121:[.444,.205,.461,{ic:.079}],122:[.444,0,.435,{ic:.059}],126:[.327,-.193,.5,{ic:.06}],160:[0,0,.25],305:[.444,0,.239],567:[.444,.204,.267],768:[.694,-.527,0],769:[.694,-.527,0,{ic:.063}],770:[.694,-.527,0],771:[.677,-.543,0,{ic:.06}],772:[.631,-.552,0,{ic:.064}],774:[.694,-.508,0,{ic:.073}],775:[.68,-.576,0],776:[.68,-.582,0],778:[.693,-.527,0],779:[.694,-.527,0,{ic:.063}],780:[.654,-.487,0,{ic:.06}],913:[.694,0,.667],914:[.694,0,.667],915:[.691,0,.542,{ic:.104}],916:[.694,0,.833],917:[.691,0,.597,{ic:.091}],918:[.694,0,.611,{ic:.091}],919:[.694,0,.708,{ic:.06}],920:[.715,.022,.778],921:[.694,0,.278,{ic:.06}],922:[.694,0,.694,{ic:.091}],923:[.694,0,.611],924:[.694,0,.875,{ic:.054}],925:[.694,0,.708,{ic:.058}],926:[.688,0,.667,{ic:.098}],927:[.716,.022,.736],928:[.691,0,.708,{ic:.06}],929:[.694,0,.639,{ic:.051}],930:[.715,.022,.778],931:[.694,0,.722,{ic:.091}],932:[.688,0,.681,{ic:.109}],933:[.716,0,.778,{ic:.065}],934:[.694,0,.722],935:[.694,0,.667,{ic:.091}],936:[.694,0,.778,{ic:.076}],937:[.716,0,.722],978:[.716,0,.778,{ic:.065}],988:[.691,0,.569,{ic:.104}],8211:[.312,-.236,.5,{ic:.065}],8212:[.312,-.236,1,{ic:.065}],8213:[.312,-.236,1,{ic:.065}],8215:[-.038,.114,.5,{ic:.065}],8216:[.694,-.471,.278,{ic:.058}],8217:[.694,-.471,.278,{ic:.057}],8220:[.694,-.471,.5,{ic:.114}],8221:[.694,-.471,.5],8260:[.75,.25,.5,{ic:.1}],8710:[.694,0,.833]}},function(t,e,r){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var n=r(1),i=r(201);e.sansSerif=n.AddCSS(i.sansSerif,{32:{c:" "},33:{c:"!"},35:{c:"#"},36:{c:"$"},37:{c:"%"},38:{c:"&"},40:{c:"("},41:{c:")"},42:{c:"*"},43:{c:"+"},44:{c:","},45:{c:"-"},46:{c:"."},47:{c:"/"},48:{c:"0"},49:{c:"1"},50:{c:"2"},51:{c:"3"},52:{c:"4"},53:{c:"5"},54:{c:"6"},55:{c:"7"},56:{c:"8"},57:{c:"9"},58:{c:":"},59:{c:";"},61:{c:"="},63:{c:"?"},64:{c:"@"},65:{c:"A"},66:{c:"B"},67:{c:"C"},68:{c:"D"},69:{c:"E"},70:{c:"F"},71:{c:"G"},72:{c:"H"},73:{c:"I"},74:{c:"J"},75:{c:"K"},76:{c:"L"},77:{c:"M"},78:{c:"N"},79:{c:"O"},80:{c:"P"},81:{c:"Q"},82:{c:"R"},83:{c:"S"},84:{c:"T"},85:{c:"U"},86:{c:"V"},87:{c:"W"},88:{c:"X"},89:{c:"Y"},90:{c:"Z"},91:{c:"["},93:{c:"]"},94:{c:"^"},95:{c:"_"},97:{c:"a"},98:{c:"b"},99:{c:"c"},100:{c:"d"},101:{c:"e"},102:{c:"f"},103:{c:"g"},104:{c:"h"},105:{c:"i"},106:{c:"j"},107:{c:"k"},108:{c:"l"},109:{c:"m"},110:{c:"n"},111:{c:"o"},112:{c:"p"},113:{c:"q"},114:{c:"r"},115:{c:"s"},116:{c:"t"},117:{c:"u"},118:{c:"v"},119:{c:"w"},120:{c:"x"},121:{c:"y"},122:{c:"z"},126:{c:"~"},913:{c:"A"},914:{c:"B"},917:{c:"E"},918:{c:"Z"},919:{c:"H"},921:{c:"I"},922:{c:"K"},924:{c:"M"},925:{c:"N"},927:{c:"O"},929:{c:"P"},930:{c:"\\398"},932:{c:"T"},935:{c:"X"},978:{c:"\\3A5"},988:{c:"F"},8213:{c:"\\2014"},8215:{c:"_"},8260:{c:"/"},8710:{c:"\\394"}})},function(t,e,r){"use strict";Object.defineProperty(e,"__esModule",{value:!0}),e.sansSerif={32:[0,0,.25],33:[.694,0,.319],34:[.694,-.471,.5],35:[.694,.194,.833],36:[.75,.056,.5],37:[.75,.056,.833],38:[.716,.022,.758],39:[.694,-.471,.278],40:[.75,.25,.389],41:[.75,.25,.389],42:[.75,-.306,.5],43:[.583,.082,.778],44:[.098,.125,.278],45:[.259,-.186,.333],46:[.098,0,.278],47:[.75,.25,.5],48:[.678,.022,.5],49:[.678,0,.5],50:[.677,0,.5],51:[.678,.022,.5],52:[.656,0,.5],53:[.656,.021,.5],54:[.677,.022,.5],55:[.656,.011,.5],56:[.678,.022,.5],57:[.677,.022,.5],58:[.444,0,.278],59:[.444,.125,.278],61:[.37,-.13,.778],63:[.704,0,.472],64:[.704,.011,.667],65:[.694,0,.667],66:[.694,0,.667],67:[.705,.011,.639],68:[.694,0,.722],69:[.691,0,.597],70:[.691,0,.569],71:[.704,.011,.667],72:[.694,0,.708],73:[.694,0,.278],74:[.694,.022,.472],75:[.694,0,.694],76:[.694,0,.542],77:[.694,0,.875],78:[.694,0,.708],79:[.715,.022,.736],80:[.694,0,.639],81:[.715,.125,.736],82:[.694,0,.646],83:[.716,.022,.556],84:[.688,0,.681],85:[.694,.022,.688],86:[.694,0,.667],87:[.694,0,.944],88:[.694,0,.667],89:[.694,0,.667],90:[.694,0,.611],91:[.75,.25,.289],93:[.75,.25,.289],94:[.694,-.527,.5],95:[-.038,.114,.5],97:[.46,.01,.481],98:[.694,.011,.517],99:[.46,.01,.444],100:[.694,.01,.517],101:[.461,.01,.444],102:[.705,0,.306],103:[.455,.206,.5],104:[.694,0,.517],105:[.68,0,.239],106:[.68,.205,.267],107:[.694,0,.489],108:[.694,0,.239],109:[.455,0,.794],110:[.455,0,.517],111:[.46,.01,.5],112:[.455,.194,.517],113:[.455,.194,.517],114:[.455,0,.342],115:[.46,.01,.383],116:[.571,.01,.361],117:[.444,.01,.517],118:[.444,0,.461],119:[.444,0,.683],120:[.444,0,.461],121:[.444,.204,.461],122:[.444,0,.435],126:[.327,-.193,.5],160:[0,0,.25],305:[.444,0,.239],567:[.444,.205,.267],768:[.694,-.527,0],769:[.694,-.527,0],770:[.694,-.527,0],771:[.677,-.543,0],772:[.631,-.552,0],774:[.694,-.508,0],775:[.68,-.576,0],776:[.68,-.582,0],778:[.694,-.527,0],779:[.694,-.527,0],780:[.654,-.487,0],913:[.694,0,.667],914:[.694,0,.667],915:[.691,0,.542],916:[.694,0,.833],917:[.691,0,.597],918:[.694,0,.611],919:[.694,0,.708],920:[.716,.021,.778],921:[.694,0,.278],922:[.694,0,.694],923:[.694,0,.611],924:[.694,0,.875],925:[.694,0,.708],926:[.688,0,.667],927:[.715,.022,.736],928:[.691,0,.708],929:[.694,0,.639],930:[.716,.021,.778],931:[.694,0,.722],932:[.688,0,.681],933:[.716,0,.778],934:[.694,0,.722],935:[.694,0,.667],936:[.694,0,.778],937:[.716,0,.722],978:[.716,0,.778],988:[.691,0,.569],8211:[.312,-.236,.5],8212:[.312,-.236,1],8213:[.312,-.236,1],8215:[-.038,.114,.5],8216:[.694,-.471,.278],8217:[.694,-.471,.278],8220:[.694,-.471,.5],8221:[.694,-.471,.5],8260:[.75,.25,.5],8710:[.694,0,.833]}},function(t,e,r){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var n=r(1),i=r(203);e.scriptBold=n.AddCSS(i.scriptBold,{32:{c:" "},65:{c:"A"},66:{c:"B"},67:{c:"C"},68:{c:"D"},69:{c:"E"},70:{c:"F"},71:{c:"G"},72:{c:"H"},73:{c:"I"},74:{c:"J"},75:{c:"K"},76:{c:"L"},77:{c:"M"},78:{c:"N"},79:{c:"O"},80:{c:"P"},81:{c:"Q"},82:{c:"R"},83:{c:"S"},84:{c:"T"},85:{c:"U"},86:{c:"V"},87:{c:"W"},88:{c:"X"},89:{c:"Y"},90:{c:"Z"},913:{c:"A",f:"B"},914:{c:"B",f:"B"},917:{c:"E",f:"B"},918:{c:"Z",f:"B"},919:{c:"H",f:"B"},921:{c:"I",f:"B"},922:{c:"K",f:"B"},924:{c:"M",f:"B"},925:{c:"N",f:"B"},927:{c:"O",f:"B"},929:{c:"P",f:"B"},930:{c:"\\398",f:"B"},932:{c:"T",f:"B"},935:{c:"X",f:"B"},978:{c:"\\3A5",f:"B"},988:{c:"F",f:"B"}})},function(t,e,r){"use strict";Object.defineProperty(e,"__esModule",{value:!0}),e.scriptBold={32:[0,0,.25],65:[.717,.008,.803,{ic:.213,sk:.389}],66:[.708,.028,.908,{sk:.194}],67:[.728,.026,.666,{ic:.153,sk:.278}],68:[.708,.031,.774,{ic:.081,sk:.111}],69:[.707,.008,.562,{ic:.156,sk:.139}],70:[.735,.036,.895,{ic:.095,sk:.222}],71:[.717,.037,.61,{ic:.128,sk:.25}],72:[.717,.036,.969,{ic:.272,sk:.333}],73:[.717,.017,.809,{ic:.137,sk:.333}],74:[.717,.314,1.052,{ic:.081,sk:.417}],75:[.717,.037,.914,{ic:.29,sk:.361}],76:[.717,.017,.874,{ic:.161,sk:.306}],77:[.721,.05,1.08,{ic:.136,sk:.444}],78:[.726,.036,.902,{ic:.306,sk:.389}],79:[.707,.008,.738,{ic:.067,sk:.167}],80:[.716,.037,1.013,{sk:.222}],81:[.717,.017,.883,{sk:.278}],82:[.717,.017,.85,{sk:.194}],83:[.708,.036,.868,{ic:.148,sk:.333}],84:[.735,.037,.747,{ic:.249,sk:.222}],85:[.717,.017,.8,{ic:.16,sk:.25}],86:[.717,.017,.622,{ic:.228,sk:.222}],87:[.717,.017,.805,{ic:.221,sk:.25}],88:[.717,.017,.944,{ic:.187,sk:.278}],89:[.716,.017,.71,{ic:.249,sk:.194}],90:[.717,.016,.821,{ic:.211,sk:.306}],160:[0,0,.25],913:[.698,0,.869],914:[.686,0,.818],917:[.68,0,.756],918:[.686,0,.703],919:[.686,0,.9],921:[.686,0,.436],922:[.686,0,.901],924:[.686,0,1.092],925:[.686,0,.9],927:[.696,.01,.864],929:[.686,0,.786],930:[.696,.01,.894],932:[.675,0,.8],935:[.686,0,.869],978:[.697,0,.894],988:[.68,0,.724]}},function(t,e,r){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var n=r(1),i=r(205);e.script=n.AddCSS(i.script,{32:{c:" "},65:{c:"A"},66:{c:"B"},67:{c:"C"},68:{c:"D"},69:{c:"E"},70:{c:"F"},71:{c:"G"},72:{c:"H"},73:{c:"I"},74:{c:"J"},75:{c:"K"},76:{c:"L"},77:{c:"M"},78:{c:"N"},79:{c:"O"},80:{c:"P"},81:{c:"Q"},82:{c:"R"},83:{c:"S"},84:{c:"T"},85:{c:"U"},86:{c:"V"},87:{c:"W"},88:{c:"X"},89:{c:"Y"},90:{c:"Z"},913:{c:"A",f:""},914:{c:"B",f:""},917:{c:"E",f:""},918:{c:"Z",f:""},919:{c:"H",f:""},921:{c:"I",f:""},922:{c:"K",f:""},924:{c:"M",f:""},925:{c:"N",f:""},927:{c:"O",f:""},929:{c:"P",f:""},930:{c:"\\398",f:""},932:{c:"T",f:""},935:{c:"X",f:""},978:{c:"\\3A5",f:""},988:{c:"F",f:""},8459:{c:"H",f:"SC"},8464:{c:"J",f:"SC"},8466:{c:"L",f:"SC"},8475:{c:"R",f:"SC"},8492:{c:"B",f:"SC"},8496:{c:"E",f:"SC"},8497:{c:"F",f:"SC"},8499:{c:"M",f:"SC"}})},function(t,e,r){"use strict";Object.defineProperty(e,"__esModule",{value:!0}),e.script={32:[0,0,.25],65:[.717,.008,.803,{ic:.213,sk:.389}],66:[.708,.028,.908,{sk:.194}],67:[.728,.026,.666,{ic:.153,sk:.278}],68:[.708,.031,.774,{ic:.081,sk:.111}],69:[.707,.008,.562,{ic:.156,sk:.139}],70:[.735,.036,.895,{ic:.095,sk:.222}],71:[.717,.037,.61,{ic:.128,sk:.25}],72:[.717,.036,.969,{ic:.272,sk:.333}],73:[.717,.017,.809,{ic:.137,sk:.333}],74:[.717,.314,1.052,{ic:.081,sk:.417}],75:[.717,.037,.914,{ic:.29,sk:.361}],76:[.717,.017,.874,{ic:.161,sk:.306}],77:[.721,.05,1.08,{ic:.136,sk:.444}],78:[.726,.036,.902,{ic:.306,sk:.389}],79:[.707,.008,.738,{ic:.067,sk:.167}],80:[.716,.037,1.013,{sk:.222}],81:[.717,.017,.883,{sk:.278}],82:[.717,.017,.85,{sk:.194}],83:[.708,.036,.868,{ic:.148,sk:.333}],84:[.735,.037,.747,{ic:.249,sk:.222}],85:[.717,.017,.8,{ic:.16,sk:.25}],86:[.717,.017,.622,{ic:.228,sk:.222}],87:[.717,.017,.805,{ic:.221,sk:.25}],88:[.717,.017,.944,{ic:.187,sk:.278}],89:[.716,.017,.71,{ic:.249,sk:.194}],90:[.717,.016,.821,{ic:.211,sk:.306}],160:[0,0,.25],913:[.716,0,.75],914:[.683,0,.708],917:[.68,0,.681],918:[.683,0,.611],919:[.683,0,.75],921:[.683,0,.361],922:[.683,0,.778],924:[.683,0,.917],925:[.683,0,.75],927:[.705,.022,.778],929:[.683,0,.681],930:[.705,.022,.778],932:[.677,0,.722],935:[.683,0,.75],978:[.705,0,.778],988:[.68,0,.653],8459:[.717,.036,.969,{ic:.272,sk:.333}],8464:[.717,.314,1.052,{ic:.081,sk:.417}],8466:[.717,.017,.874,{ic:.161,sk:.306}],8475:[.717,.017,.85,{sk:.194}],8492:[.708,.028,.908,{sk:.194}],8496:[.707,.008,.562,{ic:.156,sk:.139}],8497:[.735,.036,.895,{ic:.095,sk:.222}],8499:[.721,.05,1.08,{ic:.136,sk:.444}]}},function(t,e,r){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var n=r(1),i=r(207);e.smallop=n.AddCSS(i.smallop,{32:{c:" "},40:{c:"("},41:{c:")"},47:{c:"/"},91:{c:"["},93:{c:"]"},123:{c:"{"},125:{c:"}"},8260:{c:"/"},9001:{c:"\\27E8"},9002:{c:"\\27E9"},10072:{c:"\\2223"},10764:{c:"\\222C\\222C"},12296:{c:"\\27E8"},12297:{c:"\\27E9"}})},function(t,e,r){"use strict";Object.defineProperty(e,"__esModule",{value:!0}),e.smallop={32:[0,0,.25],40:[.85,.349,.458],41:[.85,.349,.458],47:[.85,.349,.578],91:[.85,.349,.417],92:[.85,.349,.578],93:[.85,.349,.417],123:[.85,.349,.583],125:[.85,.349,.583],160:[0,0,.25],710:[.744,-.551,.556],732:[.722,-.597,.556],770:[.744,-.551,0],771:[.722,-.597,0],8214:[.602,0,.778],8260:[.85,.349,.578],8593:[.6,0,.667],8595:[.6,0,.667],8657:[.599,0,.778],8659:[.6,0,.778],8719:[.75,.25,.944],8720:[.75,.25,.944],8721:[.75,.25,1.056],8730:[.85,.35,1],8739:[.627,.015,.333],8741:[.627,.015,.556],8747:[.805,.306,.472,{ic:.138}],8748:[.805,.306,.819,{ic:.138}],8749:[.805,.306,1.166,{ic:.138}],8750:[.805,.306,.472,{ic:.138}],8896:[.75,.249,.833],8897:[.75,.249,.833],8898:[.75,.249,.833],8899:[.75,.249,.833],8968:[.85,.349,.472],8969:[.85,.349,.472],8970:[.85,.349,.472],8971:[.85,.349,.472],9001:[.85,.35,.472],9002:[.85,.35,.472],9168:[.602,0,.667],10072:[.627,.015,.333],10216:[.85,.35,.472],10217:[.85,.35,.472],10752:[.75,.25,1.111],10753:[.75,.25,1.111],10754:[.75,.25,1.111],10756:[.75,.249,.833],10758:[.75,.249,.833],10764:[.805,.306,1.638,{ic:.138}],12296:[.85,.35,.472],12297:[.85,.35,.472]}},function(t,e,r){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var n=r(1),i=r(209);e.texCalligraphicBold=n.AddCSS(i.texCalligraphicBold,{32:{c:" "},47:{c:"/"},48:{c:"0"},49:{c:"1"},50:{c:"2"},51:{c:"3"},52:{c:"4"},53:{c:"5"},54:{c:"6"},55:{c:"7"},56:{c:"8"},57:{c:"9"},65:{c:"A"},66:{c:"B"},67:{c:"C"},68:{c:"D"},69:{c:"E"},70:{c:"F"},71:{c:"G"},72:{c:"H"},73:{c:"I"},74:{c:"J"},75:{c:"K"},76:{c:"L"},77:{c:"M"},78:{c:"N"},79:{c:"O"},80:{c:"P"},81:{c:"Q"},82:{c:"R"},83:{c:"S"},84:{c:"T"},85:{c:"U"},86:{c:"V"},87:{c:"W"},88:{c:"X"},89:{c:"Y"},90:{c:"Z"},97:{c:"a"},98:{c:"b"},99:{c:"c"},100:{c:"d"},101:{c:"e"},102:{c:"f"},103:{c:"g"},104:{c:"h"},105:{c:"i"},106:{c:"j"},107:{c:"k"},108:{c:"l"},109:{c:"m"},110:{c:"n"},111:{c:"o"},112:{c:"p"},113:{c:"q"},114:{c:"r"},115:{c:"s"},116:{c:"t"},117:{c:"u"},118:{c:"v"},119:{c:"w"},120:{c:"x"},121:{c:"y"},122:{c:"z"},913:{c:"A",f:"BI"},914:{c:"B",f:"BI"},917:{c:"E",f:"BI"},918:{c:"Z",f:"BI"},919:{c:"H",f:"BI"},921:{c:"I",f:"BI"},922:{c:"K",f:"BI"},924:{c:"M",f:"BI"},925:{c:"N",f:"BI"},927:{c:"O",f:"BI"},929:{c:"P",f:"BI"},930:{c:"\\398",f:"BI"},932:{c:"T",f:"BI"},935:{c:"X",f:"BI"},978:{c:"\\3A5",f:"BI"},988:{c:"F",f:"BI"},8260:{c:"/"},8710:{c:"\\394"}})},function(t,e,r){"use strict";Object.defineProperty(e,"__esModule",{value:!0}),e.texCalligraphicBold={32:[0,0,.25],47:[.711,.21,.894],48:[.46,.017,.575],49:[.461,0,.575],50:[.46,0,.575],51:[.461,.211,.575],52:[.469,.194,.575],53:[.461,.211,.575],54:[.66,.017,.575],55:[.476,.211,.575],56:[.661,.017,.575],57:[.461,.21,.575],65:[.751,.049,.921,{ic:.068,sk:.224}],66:[.705,.017,.748,{sk:.16}],67:[.703,.02,.613,{sk:.16}],68:[.686,0,.892,{sk:.0958}],69:[.703,.016,.607,{sk:.128}],70:[.686,.03,.814,{ic:.116,sk:.128}],71:[.703,.113,.682,{sk:.128}],72:[.686,.048,.987,{sk:.128}],73:[.686,0,.642,{ic:.104,sk:.0319}],74:[.686,.114,.779,{ic:.158,sk:.192}],75:[.703,.017,.871,{sk:.0639}],76:[.703,.017,.788,{sk:.16}],77:[.703,.049,1.378,{sk:.16}],78:[.84,.049,.937,{ic:.168,sk:.0958}],79:[.703,.017,.906,{sk:.128}],80:[.686,.067,.81,{sk:.0958}],81:[.703,.146,.939,{sk:.128}],82:[.686,.017,.99,{sk:.0958}],83:[.703,.016,.696,{sk:.16}],84:[.72,.069,.644,{ic:.303,sk:.0319}],85:[.686,.024,.715,{ic:.056,sk:.0958}],86:[.686,.077,.737,{sk:.0319}],87:[.686,.077,1.169,{sk:.0958}],88:[.686,0,.817,{ic:.089,sk:.16}],89:[.686,.164,.759,{sk:.0958}],90:[.686,0,.818,{sk:.16}],97:[.452,.008,.633],98:[.694,.008,.521],99:[.451,.008,.513,{sk:.0639}],100:[.694,.008,.61,{sk:.192}],101:[.452,.008,.554,{sk:.0639}],102:[.701,.201,.568,{ic:.056,sk:.192}],103:[.452,.202,.545,{sk:.0319}],104:[.694,.008,.668,{sk:-.0319}],105:[.694,.008,.405],106:[.694,.202,.471],107:[.694,.008,.604],108:[.694,.008,.348,{sk:.0958}],109:[.452,.008,1.032],110:[.452,.008,.713],111:[.452,.008,.585,{sk:.0639}],112:[.452,.194,.601,{sk:.0958}],113:[.452,.194,.542,{sk:.0958}],114:[.452,.008,.529,{sk:.0639}],115:[.451,.008,.531,{sk:.0639}],116:[.643,.007,.415,{sk:.0958}],117:[.452,.008,.681,{sk:.0319}],118:[.453,.008,.567,{sk:.0319}],119:[.453,.008,.831,{sk:.0958}],120:[.452,.008,.659,{sk:.0319}],121:[.452,.202,.59,{sk:.0639}],122:[.452,.008,.555,{sk:.0639}],160:[0,0,.25],913:[.711,0,.869,{sk:.16}],914:[.686,0,.866,{sk:.0958}],915:[.68,0,.657,{ic:.12,sk:.0958}],916:[.711,0,.958,{sk:.192}],917:[.68,0,.81,{sk:.0958}],918:[.686,0,.773,{sk:.0958}],919:[.686,0,.982,{sk:.0639}],920:[.702,.017,.867,{sk:.0958}],921:[.686,0,.511,{ic:.062,sk:.128}],922:[.686,0,.971,{sk:.0639}],923:[.711,0,.806,{sk:.192}],924:[.686,0,1.142,{ic:.077,sk:.0958}],925:[.686,0,.95,{ic:.077,sk:.0958}],926:[.675,0,.841,{sk:.0958}],927:[.703,.017,.837,{sk:.0958}],928:[.68,0,.982,{sk:.0639}],929:[.686,0,.723,{ic:.124,sk:.0958}],930:[.702,.017,.867,{sk:.0958}],931:[.686,0,.885,{sk:.0958}],932:[.675,0,.637,{ic:.135,sk:.0958}],933:[.703,0,.671,{ic:.131,sk:.0639}],934:[.686,0,.767,{sk:.0958}],935:[.686,0,.947,{sk:.0958}],936:[.686,0,.714,{ic:.076,sk:.0639}],937:[.703,0,.879,{sk:.0958}],945:[.452,.008,.761,{sk:.0319}],946:[.701,.194,.66,{sk:.0958}],947:[.451,.211,.59],948:[.725,.008,.522,{sk:.0639}],949:[.461,.017,.529,{sk:.0958}],950:[.711,.202,.508,{sk:.0958}],951:[.452,.211,.6,{sk:.0639}],952:[.702,.008,.562,{sk:.0958}],953:[.452,.008,.412,{sk:.0639}],954:[.452,.008,.668],955:[.694,.013,.671],956:[.452,.211,.708,{sk:.0319}],957:[.452,0,.577,{sk:.0319}],958:[.711,.201,.508,{sk:.128}],959:[.452,.008,.585,{sk:.0639}],960:[.444,.008,.682],961:[.451,.211,.612,{sk:.0958}],962:[.451,.105,.424,{sk:.0958}],963:[.444,.008,.686],964:[.444,.013,.521,{ic:.089,sk:.0319}],965:[.453,.008,.631,{sk:.0319}],966:[.452,.216,.747,{sk:.0958}],967:[.452,.201,.718,{sk:.0639}],968:[.694,.202,.758,{sk:.128}],969:[.453,.008,.718],977:[.701,.008,.692,{sk:.0958}],978:[.703,0,.671,{ic:.131,sk:.0639}],981:[.694,.202,.712,{sk:.0958}],982:[.444,.008,.975],988:[.68,0,.689,{ic:.12,sk:.0958}],1009:[.451,.194,.612,{sk:.0958}],1013:[.444,.007,.483,{sk:.0639}],8260:[.711,.21,.894],8710:[.711,0,.958,{sk:.192}]}},function(t,e,r){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var n=r(1),i=r(211);e.texCalligraphic=n.AddCSS(i.texCalligraphic,{32:{c:" "},48:{c:"0"},49:{c:"1"},50:{c:"2"},51:{c:"3"},52:{c:"4"},53:{c:"5"},54:{c:"6"},55:{c:"7"},56:{c:"8"},57:{c:"9"},65:{c:"A"},66:{c:"B"},67:{c:"C"},68:{c:"D"},69:{c:"E"},70:{c:"F"},71:{c:"G"},72:{c:"H"},73:{c:"I"},74:{c:"J"},75:{c:"K"},76:{c:"L"},77:{c:"M"},78:{c:"N"},79:{c:"O"},80:{c:"P"},81:{c:"Q"},82:{c:"R"},83:{c:"S"},84:{c:"T"},85:{c:"U"},86:{c:"V"},87:{c:"W"},88:{c:"X"},89:{c:"Y"},90:{c:"Z"},913:{c:"A",f:"I"},914:{c:"B",f:"I"},917:{c:"E",f:"I"},918:{c:"Z",f:"I"},919:{c:"H",f:"I"},921:{c:"I",f:"I"},922:{c:"K",f:"I"},924:{c:"M",f:"I"},925:{c:"N",f:"I"},927:{c:"O",f:"I"},929:{c:"P",f:"I"},930:{c:"\\398",f:"I"},932:{c:"T",f:"I"},935:{c:"X",f:"I"},978:{c:"\\3A5",f:"I"},988:{c:"F",f:"I"}})},function(t,e,r){"use strict";Object.defineProperty(e,"__esModule",{value:!0}),e.texCalligraphic={32:[0,0,.25],48:[.452,.022,.5],49:[.453,0,.5],50:[.453,0,.5],51:[.452,.216,.5],52:[.464,.194,.5],53:[.453,.216,.5],54:[.665,.022,.5],55:[.463,.216,.5],56:[.666,.021,.5],57:[.453,.216,.5],65:[.728,.05,.798,{sk:.194}],66:[.705,.022,.657,{sk:.139}],67:[.705,.025,.527,{sk:.139}],68:[.683,0,.771,{sk:.0833}],69:[.705,.022,.528,{sk:.111}],70:[.683,.032,.719,{ic:.11,sk:.111}],71:[.704,.119,.595,{sk:.111}],72:[.683,.048,.845,{sk:.111}],73:[.683,0,.545,{ic:.097,sk:.0278}],74:[.683,.119,.678,{ic:.161,sk:.167}],75:[.705,.022,.762,{sk:.0556}],76:[.705,.022,.69,{sk:.139}],77:[.705,.05,1.201,{sk:.139}],78:[.789,.05,.82,{ic:.159,sk:.0833}],79:[.705,.022,.796,{sk:.111}],80:[.683,.057,.696,{sk:.0833}],81:[.705,.131,.817,{sk:.111}],82:[.682,.022,.848,{sk:.0833}],83:[.705,.022,.606,{sk:.139}],84:[.717,.068,.545,{ic:.288,sk:.0278}],85:[.683,.028,.626,{ic:.061,sk:.0833}],86:[.683,.052,.613,{sk:.0278}],87:[.683,.053,.988,{sk:.0833}],88:[.683,0,.713,{ic:.094,sk:.139}],89:[.683,.143,.668,{sk:.0833}],90:[.683,0,.725,{sk:.139}],160:[0,0,.25],913:[.716,0,.75,{sk:.139}],914:[.683,0,.759,{sk:.0833}],917:[.68,0,.738,{sk:.0833}],918:[.683,0,.683,{sk:.0833}],919:[.683,0,.831,{ic:.057,sk:.0556}],921:[.683,0,.44,{ic:.064,sk:.111}],922:[.683,0,.849,{sk:.0556}],924:[.683,0,.97,{ic:.081,sk:.0833}],925:[.683,0,.803,{ic:.085,sk:.0833}],927:[.704,.022,.763,{sk:.0833}],929:[.683,0,.642,{ic:.109,sk:.0833}],930:[.704,.022,.763,{sk:.0833}],932:[.677,0,.584,{ic:.12,sk:.0833}],935:[.683,0,.828,{sk:.0833}],978:[.705,0,.583,{ic:.117,sk:.0556}],988:[.68,0,.643,{ic:.106,sk:.0833}]}},function(t,e,r){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var n=r(1),i=r(213);e.texMathit=n.AddCSS(i.texMathit,{32:{c:" "},33:{c:"!"},35:{c:"#"},37:{c:"%"},38:{c:"&"},40:{c:"("},41:{c:")"},42:{c:"*"},43:{c:"+"},44:{c:","},45:{c:"-"},46:{c:"."},47:{c:"/"},48:{c:"0"},49:{c:"1"},50:{c:"2"},51:{c:"3"},52:{c:"4"},53:{c:"5"},54:{c:"6"},55:{c:"7"},56:{c:"8"},57:{c:"9"},58:{c:":"},59:{c:";"},61:{c:"="},63:{c:"?"},64:{c:"@"},65:{c:"A"},66:{c:"B"},67:{c:"C"},68:{c:"D"},69:{c:"E"},70:{c:"F"},71:{c:"G"},72:{c:"H"},73:{c:"I"},74:{c:"J"},75:{c:"K"},76:{c:"L"},77:{c:"M"},78:{c:"N"},79:{c:"O"},80:{c:"P"},81:{c:"Q"},82:{c:"R"},83:{c:"S"},84:{c:"T"},85:{c:"U"},86:{c:"V"},87:{c:"W"},88:{c:"X"},89:{c:"Y"},90:{c:"Z"},91:{c:"["},93:{c:"]"},94:{c:"^"},95:{c:"_"},97:{c:"a"},98:{c:"b"},99:{c:"c"},100:{c:"d"},101:{c:"e"},102:{c:"f"},103:{c:"g"},104:{c:"h"},105:{c:"i"},106:{c:"j"},107:{c:"k"},108:{c:"l"},109:{c:"m"},110:{c:"n"},111:{c:"o"},112:{c:"p"},113:{c:"q"},114:{c:"r"},115:{c:"s"},116:{c:"t"},117:{c:"u"},118:{c:"v"},119:{c:"w"},120:{c:"x"},121:{c:"y"},122:{c:"z"},126:{c:"~"},913:{c:"A"},914:{c:"B"},917:{c:"E"},918:{c:"Z"},919:{c:"H"},921:{c:"I"},922:{c:"K"},924:{c:"M"},925:{c:"N"},927:{c:"O"},929:{c:"P"},930:{c:"\\398"},932:{c:"T"},935:{c:"X"},978:{c:"\\3A5"},988:{c:"F"},8213:{c:"\\2014"},8215:{c:"_"},8260:{c:"/"},8710:{c:"\\394"}})},function(t,e,r){"use strict";Object.defineProperty(e,"__esModule",{value:!0}),e.texMathit={32:[0,0,.25],33:[.716,0,.307,{ic:.073}],34:[.694,-.379,.514],35:[.694,.194,.818],37:[.75,.056,.818],38:[.716,.022,.767],39:[.694,-.379,.307,{ic:.07}],40:[.75,.25,.409,{ic:.108}],41:[.75,.25,.409],42:[.75,-.32,.511,{ic:.073}],43:[.557,.057,.767],44:[.121,.194,.307],45:[.251,-.18,.358],46:[.121,0,.307],47:[.75,.25,.511,{ic:.106}],48:[.665,.021,.511,{ic:.051}],49:[.666,0,.511],50:[.666,.022,.511],51:[.666,.022,.511,{ic:.051}],52:[.666,.194,.511],53:[.666,.022,.511,{ic:.056}],54:[.665,.022,.511,{ic:.054}],55:[.666,.022,.511,{ic:.123}],56:[.666,.021,.511],57:[.666,.022,.511],58:[.431,0,.307],59:[.431,.194,.307],61:[.367,-.133,.767],63:[.716,0,.511],64:[.705,.011,.767],65:[.716,0,.743],66:[.683,0,.704],67:[.705,.021,.716,{ic:.096}],68:[.683,0,.755],69:[.68,0,.678,{ic:.065}],70:[.68,0,.653,{ic:.078}],71:[.705,.022,.774],72:[.683,0,.743,{ic:.117}],73:[.683,0,.386,{ic:.122}],74:[.683,.021,.525,{ic:.097}],75:[.683,0,.769,{ic:.09}],76:[.683,0,.627],77:[.683,0,.897,{ic:.113}],78:[.683,0,.743,{ic:.117}],79:[.704,.022,.767],80:[.683,0,.678,{ic:.051}],81:[.704,.194,.767],82:[.683,.022,.729],83:[.705,.022,.562,{ic:.071}],84:[.677,0,.716,{ic:.09}],85:[.683,.022,.743,{ic:.117}],86:[.683,.022,.743,{ic:.125}],87:[.683,.022,.999,{ic:.125}],88:[.683,0,.743,{ic:.082}],89:[.683,0,.743,{ic:.132}],90:[.683,0,.613,{ic:.091}],91:[.75,.25,.307,{ic:.139}],93:[.75,.25,.307,{ic:.052}],94:[.694,-.527,.511],95:[-.025,.062,.511],97:[.442,.011,.511],98:[.694,.011,.46],99:[.441,.01,.46],100:[.694,.011,.511,{ic:.056}],101:[.442,.01,.46],102:[.705,.204,.307,{ic:.143}],103:[.442,.205,.46],104:[.694,.011,.511],105:[.656,.01,.307],106:[.656,.204,.307,{ic:.057}],107:[.694,.011,.46],108:[.694,.011,.256,{ic:.056}],109:[.442,.011,.818],110:[.442,.011,.562],111:[.442,.011,.511],112:[.442,.194,.511],113:[.442,.194,.46],114:[.442,.011,.422,{ic:.062}],115:[.442,.011,.409],116:[.626,.011,.332],117:[.441,.011,.537],118:[.443,.01,.46],119:[.443,.011,.664],120:[.442,.011,.464],121:[.441,.205,.486],122:[.442,.011,.409,{ic:.057}],126:[.318,-.208,.511,{ic:.06}],160:[0,0,.25],163:[.714,.011,.769],305:[.441,.01,.307],567:[.442,.204,.332],768:[.697,-.5,0],769:[.697,-.5,0],770:[.694,-.527,0],771:[.668,-.558,0,{ic:.06}],772:[.589,-.544,0,{ic:.054}],774:[.694,-.515,0,{ic:.062}],775:[.669,-.548,0],776:[.669,-.554,0],778:[.716,-.542,0],779:[.697,-.503,0,{ic:.065}],780:[.638,-.502,0],913:[.716,0,.743],914:[.683,0,.704],915:[.68,0,.627,{ic:.078}],916:[.716,0,.818],917:[.68,0,.678,{ic:.065}],918:[.683,0,.613,{ic:.091}],919:[.683,0,.743,{ic:.117}],920:[.704,.022,.767],921:[.683,0,.386,{ic:.122}],922:[.683,0,.769,{ic:.09}],923:[.716,0,.692],924:[.683,0,.897,{ic:.113}],925:[.683,0,.743,{ic:.117}],926:[.677,0,.664,{ic:.09}],927:[.704,.022,.767],928:[.68,0,.743,{ic:.116}],929:[.683,0,.678,{ic:.051}],930:[.704,.022,.767],931:[.683,0,.716,{ic:.066}],932:[.677,0,.716,{ic:.09}],933:[.705,0,.767,{ic:.065}],934:[.683,0,.716],935:[.683,0,.743,{ic:.082}],936:[.683,0,.767,{ic:.057}],937:[.705,0,.716],978:[.705,0,.767,{ic:.065}],988:[.68,0,.653,{ic:.078}],8211:[.285,-.248,.511],8212:[.285,-.248,1.022],8213:[.285,-.248,1.022],8215:[-.025,.062,.511],8216:[.694,-.379,.307,{ic:.055}],8217:[.694,-.379,.307,{ic:.07}],8220:[.694,-.379,.514,{ic:.092}],8221:[.694,-.379,.514],8260:[.75,.25,.511,{ic:.106}],8463:[.695,.013,.54],8710:[.716,0,.818]}},function(t,e,r){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var n=r(1),i=r(215);e.texOldstyleBold=n.AddCSS(i.texOldstyleBold,{32:{c:" "},48:{c:"0"},49:{c:"1"},50:{c:"2"},51:{c:"3"},52:{c:"4"},53:{c:"5"},54:{c:"6"},55:{c:"7"},56:{c:"8"},57:{c:"9"},65:{c:"A"},66:{c:"B"},67:{c:"C"},68:{c:"D"},69:{c:"E"},70:{c:"F"},71:{c:"G"},72:{c:"H"},73:{c:"I"},74:{c:"J"},75:{c:"K"},76:{c:"L"},77:{c:"M"},78:{c:"N"},79:{c:"O"},80:{c:"P"},81:{c:"Q"},82:{c:"R"},83:{c:"S"},84:{c:"T"},85:{c:"U"},86:{c:"V"},87:{c:"W"},88:{c:"X"},89:{c:"Y"},90:{c:"Z"},913:{c:"A",f:"B"},914:{c:"B",f:"B"},917:{c:"E",f:"B"},918:{c:"Z",f:"B"},919:{c:"H",f:"B"},921:{c:"I",f:"B"},922:{c:"K",f:"B"},924:{c:"M",f:"B"},925:{c:"N",f:"B"},927:{c:"O",f:"B"},929:{c:"P",f:"B"},930:{c:"\\398",f:"B"},932:{c:"T",f:"B"},935:{c:"X",f:"B"},978:{c:"\\3A5",f:"B"},988:{c:"F",f:"B"}})},function(t,e,r){"use strict";Object.defineProperty(e,"__esModule",{value:!0}),e.texOldstyleBold={32:[0,0,.25],48:[.46,.017,.575],49:[.461,0,.575],50:[.46,0,.575],51:[.461,.211,.575],52:[.469,.194,.575],53:[.461,.211,.575],54:[.66,.017,.575],55:[.476,.211,.575],56:[.661,.017,.575],57:[.461,.21,.575],65:[.751,.049,.921,{ic:.068,sk:.224}],66:[.705,.017,.748,{sk:.16}],67:[.703,.02,.613,{sk:.16}],68:[.686,0,.892,{sk:.0958}],69:[.703,.016,.607,{sk:.128}],70:[.686,.03,.814,{ic:.116,sk:.128}],71:[.703,.113,.682,{sk:.128}],72:[.686,.048,.987,{sk:.128}],73:[.686,0,.642,{ic:.104,sk:.0319}],74:[.686,.114,.779,{ic:.158,sk:.192}],75:[.703,.017,.871,{sk:.0639}],76:[.703,.017,.788,{sk:.16}],77:[.703,.049,1.378,{sk:.16}],78:[.84,.049,.937,{ic:.168,sk:.0958}],79:[.703,.017,.906,{sk:.128}],80:[.686,.067,.81,{sk:.0958}],81:[.703,.146,.939,{sk:.128}],82:[.686,.017,.99,{sk:.0958}],83:[.703,.016,.696,{sk:.16}],84:[.72,.069,.644,{ic:.303,sk:.0319}],85:[.686,.024,.715,{ic:.056,sk:.0958}],86:[.686,.077,.737,{sk:.0319}],87:[.686,.077,1.169,{sk:.0958}],88:[.686,0,.817,{ic:.089,sk:.16}],89:[.686,.164,.759,{sk:.0958}],90:[.686,0,.818,{sk:.16}],160:[0,0,.25],913:[.698,0,.869],914:[.686,0,.818],917:[.68,0,.756],918:[.686,0,.703],919:[.686,0,.9],921:[.686,0,.436],922:[.686,0,.901],924:[.686,0,1.092],925:[.686,0,.9],927:[.696,.01,.864],929:[.686,0,.786],930:[.696,.01,.894],932:[.675,0,.8],935:[.686,0,.869],978:[.697,0,.894],988:[.68,0,.724]}},function(t,e,r){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var n=r(1),i=r(217);e.texOldstyle=n.AddCSS(i.texOldstyle,{32:{c:" "},48:{c:"0"},49:{c:"1"},50:{c:"2"},51:{c:"3"},52:{c:"4"},53:{c:"5"},54:{c:"6"},55:{c:"7"},56:{c:"8"},57:{c:"9"},65:{c:"A"},66:{c:"B"},67:{c:"C"},68:{c:"D"},69:{c:"E"},70:{c:"F"},71:{c:"G"},72:{c:"H"},73:{c:"I"},74:{c:"J"},75:{c:"K"},76:{c:"L"},77:{c:"M"},78:{c:"N"},79:{c:"O"},80:{c:"P"},81:{c:"Q"},82:{c:"R"},83:{c:"S"},84:{c:"T"},85:{c:"U"},86:{c:"V"},87:{c:"W"},88:{c:"X"},89:{c:"Y"},90:{c:"Z"},913:{c:"A",f:""},914:{c:"B",f:""},917:{c:"E",f:""},918:{c:"Z",f:""},919:{c:"H",f:""},921:{c:"I",f:""},922:{c:"K",f:""},924:{c:"M",f:""},925:{c:"N",f:""},927:{c:"O",f:""},929:{c:"P",f:""},930:{c:"\\398",f:""},932:{c:"T",f:""},935:{c:"X",f:""},978:{c:"\\3A5",f:""},988:{c:"F",f:""}})},function(t,e,r){"use strict";Object.defineProperty(e,"__esModule",{value:!0}),e.texOldstyle={32:[0,0,.25],48:[.452,.022,.5],49:[.453,0,.5],50:[.453,0,.5],51:[.452,.216,.5],52:[.464,.194,.5],53:[.453,.216,.5],54:[.665,.022,.5],55:[.463,.216,.5],56:[.666,.021,.5],57:[.453,.216,.5],65:[.728,.05,.798,{sk:.194}],66:[.705,.022,.657,{sk:.139}],67:[.705,.025,.527,{sk:.139}],68:[.683,0,.771,{sk:.0833}],69:[.705,.022,.528,{sk:.111}],70:[.683,.032,.719,{ic:.11,sk:.111}],71:[.704,.119,.595,{sk:.111}],72:[.683,.048,.845,{sk:.111}],73:[.683,0,.545,{ic:.097,sk:.0278}],74:[.683,.119,.678,{ic:.161,sk:.167}],75:[.705,.022,.762,{sk:.0556}],76:[.705,.022,.69,{sk:.139}],77:[.705,.05,1.201,{sk:.139}],78:[.789,.05,.82,{ic:.159,sk:.0833}],79:[.705,.022,.796,{sk:.111}],80:[.683,.057,.696,{sk:.0833}],81:[.705,.131,.817,{sk:.111}],82:[.682,.022,.848,{sk:.0833}],83:[.705,.022,.606,{sk:.139}],84:[.717,.068,.545,{ic:.288,sk:.0278}],85:[.683,.028,.626,{ic:.061,sk:.0833}],86:[.683,.052,.613,{sk:.0278}],87:[.683,.053,.988,{sk:.0833}],88:[.683,0,.713,{ic:.094,sk:.139}],89:[.683,.143,.668,{sk:.0833}],90:[.683,0,.725,{sk:.139}],160:[0,0,.25],913:[.716,0,.75],914:[.683,0,.708],917:[.68,0,.681],918:[.683,0,.611],919:[.683,0,.75],921:[.683,0,.361],922:[.683,0,.778],924:[.683,0,.917],925:[.683,0,.75],927:[.705,.022,.778],929:[.683,0,.681],930:[.705,.022,.778],932:[.677,0,.722],935:[.683,0,.75],978:[.705,0,.778],988:[.68,0,.653]}},function(t,e,r){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var n=r(1),i=r(219);e.texSize3=n.AddCSS(i.texSize3,{32:{c:" "},40:{c:"("},41:{c:")"},47:{c:"/"},91:{c:"["},93:{c:"]"},123:{c:"{"},125:{c:"}"},8260:{c:"/"},9001:{c:"\\27E8"},9002:{c:"\\27E9"},12296:{c:"\\27E8"},12297:{c:"\\27E9"}})},function(t,e,r){"use strict";Object.defineProperty(e,"__esModule",{value:!0}),e.texSize3={32:[0,0,.25],40:[1.45,.949,.736],41:[1.45,.949,.736],47:[1.45,.949,1.044],91:[1.45,.949,.528],92:[1.45,.949,1.044],93:[1.45,.949,.528],123:[1.45,.949,.75],125:[1.45,.949,.75],160:[0,0,.25],710:[.772,-.564,1.444],732:[.749,-.61,1.444],770:[.772,-.564,0],771:[.749,-.61,0],8260:[1.45,.949,1.044],8730:[1.45,.95,1],8968:[1.45,.949,.583],8969:[1.45,.949,.583],8970:[1.45,.949,.583],8971:[1.45,.949,.583],9001:[1.45,.95,.75],9002:[1.45,.949,.75],10216:[1.45,.95,.75],10217:[1.45,.949,.75],12296:[1.45,.95,.75],12297:[1.45,.949,.75]}},function(t,e,r){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var n=r(1),i=r(221);e.texSize4=n.AddCSS(i.texSize4,{32:{c:" "},40:{c:"("},41:{c:")"},47:{c:"/"},91:{c:"["},93:{c:"]"},123:{c:"{"},125:{c:"}"},8260:{c:"/"},9001:{c:"\\27E8"},9002:{c:"\\27E9"},12296:{c:"\\27E8"},12297:{c:"\\27E9"},57685:{c:"\\E153\\E152"},57686:{c:"\\E151\\E150"}})},function(t,e,r){"use strict";Object.defineProperty(e,"__esModule",{value:!0}),e.texSize4={32:[0,0,.25],40:[1.75,1.249,.792],41:[1.75,1.249,.792],47:[1.75,1.249,1.278],91:[1.75,1.249,.583],92:[1.75,1.249,1.278],93:[1.75,1.249,.583],123:[1.75,1.249,.806],125:[1.75,1.249,.806],160:[0,0,.25],710:[.845,-.561,1.889],732:[.823,-.583,1.889],770:[.845,-.561,0],771:[.823,-.583,0],8260:[1.75,1.249,1.278],8730:[1.75,1.25,1],8968:[1.75,1.249,.639],8969:[1.75,1.249,.639],8970:[1.75,1.249,.639],8971:[1.75,1.249,.639],9001:[1.75,1.248,.806],9002:[1.75,1.248,.806],9115:[1.154,.655,.875],9116:[.61,.01,.875],9117:[1.165,.644,.875],9118:[1.154,.655,.875],9119:[.61,.01,.875],9120:[1.165,.644,.875],9121:[1.154,.645,.667],9122:[.602,0,.667],9123:[1.155,.644,.667],9124:[1.154,.645,.667],9125:[.602,0,.667],9126:[1.155,.644,.667],9127:[.899,.01,.889],9128:[1.16,.66,.889],9129:[.01,.899,.889],9130:[.29,.015,.889],9131:[.899,.01,.889],9132:[1.16,.66,.889],9133:[.01,.899,.889],9143:[.935,.885,1.056],10216:[1.75,1.248,.806],10217:[1.75,1.248,.806],12296:[1.75,1.248,.806],12297:[1.75,1.248,.806],57344:[.625,.014,1.056],57345:[.605,.014,1.056],57680:[.12,.213,.45],57681:[.12,.213,.45],57682:[.333,0,.45],57683:[.333,0,.45],57684:[.32,.2,.4],57685:[.333,0,.9],57686:[.12,.213,.9]}},function(t,e,r){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var n=r(1),i=r(223);e.texVariant=n.AddCSS(i.texVariant,{32:{c:" "},65:{c:"A"},66:{c:"B"},67:{c:"C"},68:{c:"D"},69:{c:"E"},70:{c:"F"},71:{c:"G"},72:{c:"H"},73:{c:"I"},74:{c:"J"},75:{c:"K"},76:{c:"L"},77:{c:"M"},78:{c:"N"},79:{c:"O"},80:{c:"P"},81:{c:"Q"},82:{c:"R"},83:{c:"S"},84:{c:"T"},85:{c:"U"},86:{c:"V"},87:{c:"W"},88:{c:"X"},89:{c:"Y"},90:{c:"Z"},107:{c:"k"},988:{c:"\\E008"},1008:{c:"\\E009"},8463:{f:""},8726:{f:""},8740:{c:"\\E006"},8742:{c:"\\E007"},8808:{c:"\\E00C"},8809:{c:"\\E00D"},8816:{c:"\\E011"},8817:{c:"\\E00E"},8840:{c:"\\E016"},8841:{c:"\\E018"},8842:{c:"\\E01A"},8843:{c:"\\E01B"},10887:{c:"\\E010"},10888:{c:"\\E00F"},10955:{c:"\\E017"},10956:{c:"\\E019"}})},function(t,e,r){"use strict";Object.defineProperty(e,"__esModule",{value:!0}),e.texVariant={32:[0,0,.25],65:[.701,0,.722],66:[.683,0,.667],67:[.702,.019,.722],68:[.683,0,.722],69:[.683,0,.667],70:[.683,0,.611],71:[.702,.019,.778],72:[.683,0,.778],73:[.683,0,.389],74:[.683,.077,.5],75:[.683,0,.778],76:[.683,0,.667],77:[.683,0,.944],78:[.683,.02,.722],79:[.701,.019,.778],80:[.683,0,.611],81:[.701,.181,.778],82:[.683,0,.722],83:[.702,.012,.556],84:[.683,0,.667],85:[.683,.019,.722],86:[.683,.02,.722],87:[.683,.019,1],88:[.683,0,.722],89:[.683,0,.722],90:[.683,0,.667],107:[.683,0,.556],160:[0,0,.25],165:[.683,0,.75],174:[.709,.175,.947],240:[.749,.021,.556],295:[.695,.013,.54],710:[.845,-.561,2.333],732:[.899,-.628,2.333],770:[.845,-.561,0],771:[.899,-.628,0],988:[.605,.085,.778],989:[.605,.085,.778],1008:[.434,.006,.667,{ic:.067}],8245:[.56,-.043,.275],8463:[.695,.013,.54],8487:[.684,.022,.722],8498:[.695,0,.556],8502:[.763,.021,.667],8503:[.764,.043,.444],8504:[.764,.043,.667],8513:[.705,.023,.639],8592:[.437,-.064,.5],8594:[.437,-.064,.5],8602:[.437,-.06,1],8603:[.437,-.06,1],8606:[.417,-.083,1],8608:[.417,-.083,1],8610:[.417,-.083,1.111],8611:[.417,-.083,1.111],8619:[.575,.041,1],8620:[.575,.041,1],8621:[.417,-.083,1.389],8622:[.437,-.06,1],8624:[.722,0,.5],8625:[.722,0,.5],8630:[.461,0,1],8631:[.46,0,1],8634:[.65,.083,.778],8635:[.65,.083,.778],8638:[.694,.194,.417],8639:[.694,.194,.417],8642:[.694,.194,.417],8643:[.694,.194,.417],8644:[.667,0,1],8646:[.667,0,1],8647:[.583,.083,1],8648:[.694,.193,.833],8649:[.583,.083,1],8650:[.694,.194,.833],8651:[.514,.014,1],8652:[.514,.014,1],8653:[.534,.035,1],8654:[.534,.037,1],8655:[.534,.035,1],8666:[.611,.111,1],8667:[.611,.111,1],8669:[.417,-.083,1],8672:[.437,-.064,1.334],8674:[.437,-.064,1.334],8705:[.846,.021,.5],8708:[.86,.166,.556],8709:[.587,0,.778],8717:[.44,0,.429],8722:[.27,-.23,.5],8724:[.766,.093,.778],8726:[.43,.023,.778],8733:[.472,-.028,.778],8736:[.694,0,.722],8737:[.714,.02,.722],8738:[.551,.051,.722],8739:[.43,.023,.222],8740:[.43,.023,.222],8741:[.431,.023,.389],8742:[.431,.024,.389],8756:[.471,.082,.667],8757:[.471,.082,.667],8764:[.365,-.132,.778],8765:[.367,-.133,.778],8769:[.467,-.032,.778],8770:[.463,-.034,.778],8774:[.652,.155,.778],8776:[.481,-.05,.778],8778:[.579,.039,.778],8782:[.492,-.008,.778],8783:[.492,-.133,.778],8785:[.609,.108,.778],8786:[.601,.101,.778],8787:[.601,.102,.778],8790:[.367,-.133,.778],8791:[.721,-.133,.778],8796:[.859,-.133,.778],8806:[.753,.175,.778],8807:[.753,.175,.778],8808:[.752,.284,.778],8809:[.752,.284,.778],8812:[.75,.25,.5],8814:[.708,.209,.778],8815:[.708,.209,.778],8816:[.919,.421,.778],8817:[.919,.421,.778],8818:[.732,.228,.778],8819:[.732,.228,.778],8822:[.681,.253,.778],8823:[.681,.253,.778],8828:[.58,.153,.778],8829:[.58,.154,.778],8830:[.732,.228,.778],8831:[.732,.228,.778],8832:[.705,.208,.778],8833:[.705,.208,.778],8840:[.828,.33,.778],8841:[.828,.33,.778],8842:[.634,.255,.778],8843:[.634,.254,.778],8847:[.539,.041,.778],8848:[.539,.041,.778],8858:[.582,.082,.778],8859:[.582,.082,.778],8861:[.582,.082,.778],8862:[.689,0,.778],8863:[.689,0,.778],8864:[.689,0,.778],8865:[.689,0,.778],8872:[.694,0,.611],8873:[.694,0,.722],8874:[.694,0,.889],8876:[.695,0,.611],8877:[.695,0,.611],8878:[.695,0,.722],8879:[.695,0,.722],8882:[.539,.041,.778],8883:[.539,.041,.778],8884:[.636,.138,.778],8885:[.636,.138,.778],8888:[.408,-.092,1.111],8890:[.431,.212,.556],8891:[.716,0,.611],8892:[.716,0,.611],8901:[.189,0,.278],8903:[.545,.044,.778],8905:[.492,-.008,.778],8906:[.492,-.008,.778],8907:[.694,.022,.778],8908:[.694,.022,.778],8909:[.464,-.036,.778],8910:[.578,.021,.76],8911:[.578,.022,.76],8912:[.54,.04,.778],8913:[.54,.04,.778],8914:[.598,.022,.667],8915:[.598,.022,.667],8916:[.736,.022,.667],8918:[.541,.041,.778],8919:[.541,.041,.778],8920:[.568,.067,1.333],8921:[.568,.067,1.333],8922:[.886,.386,.778],8923:[.886,.386,.778],8926:[.734,0,.778],8927:[.734,0,.778],8928:[.801,.303,.778],8929:[.801,.303,.778],8934:[.73,.359,.778],8935:[.73,.359,.778],8936:[.73,.359,.778],8937:[.73,.359,.778],8938:[.706,.208,.778],8939:[.706,.208,.778],8940:[.802,.303,.778],8941:[.801,.303,.778],8994:[.378,-.122,.778],8995:[.378,-.143,.778],9416:[.709,.175,.902],9484:[.694,-.306,.5],9488:[.694,-.306,.5],9492:[.366,.022,.5],9496:[.366,.022,.5],9585:[.694,.195,.889],9586:[.694,.195,.889],9632:[.689,0,.778],9633:[.689,0,.778],9650:[.575,.02,.722],9651:[.575,.02,.722],9654:[.539,.041,.778],9660:[.576,.019,.722],9661:[.576,.019,.722],9664:[.539,.041,.778],9674:[.716,.132,.667],9733:[.694,.111,.944],10003:[.706,.034,.833],10016:[.716,.022,.833],10731:[.716,.132,.667],10846:[.813,.097,.611],10877:[.636,.138,.778],10878:[.636,.138,.778],10885:[.762,.29,.778],10886:[.762,.29,.778],10887:[.801,.303,.778],10888:[.801,.303,.778],10889:[.761,.387,.778],10890:[.761,.387,.778],10891:[1.003,.463,.778],10892:[1.003,.463,.778],10901:[.636,.138,.778],10902:[.636,.138,.778],10933:[.752,.286,.778],10934:[.752,.286,.778],10935:[.761,.294,.778],10936:[.761,.294,.778],10937:[.761,.337,.778],10938:[.761,.337,.778],10949:[.753,.215,.778],10950:[.753,.215,.778],10955:[.752,.332,.778],10956:[.752,.333,.778],57350:[.43,.023,.222],57351:[.431,.024,.389],57352:[.605,.085,.778],57353:[.434,.006,.667,{ic:.067}],57356:[.752,.284,.778],57357:[.752,.284,.778],57358:[.919,.421,.778],57359:[.801,.303,.778],57360:[.801,.303,.778],57361:[.919,.421,.778],57366:[.828,.33,.778],57367:[.752,.332,.778],57368:[.828,.33,.778],57369:[.752,.333,.778],57370:[.634,.255,.778],57371:[.634,.254,.778]}},function(t,e,r){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var n=r(23);e.HDW1=[.75,.25,.875],e.HDW2=[.583,.082,1],e.HDW3=[.583,.082,.5],e.VSIZES=[1,1.2,1.8,2.4,3];var i={c:47,dir:n.V,sizes:e.VSIZES},o={c:175,dir:n.H,sizes:[.59],stretch:[0,175],HDW:[.59,-.544,.5]},a={c:710,dir:n.H,sizes:[.517,.817,1.335,1.777,1.909]},s={c:732,dir:n.H,sizes:[.583,.805,1.33,1.773,1.887]},c={c:8211,dir:n.H,sizes:[.5],stretch:[0,8211],HDW:[.285,-.248,.5]},l={c:8592,dir:n.H,sizes:[1],stretch:[8592,8722],HDW:e.HDW2},u={c:8594,dir:n.H,sizes:[1],stretch:[0,8722,8594],HDW:e.HDW2},h={c:8596,dir:n.H,sizes:[1],stretch:[8592,8722,8594],HDW:e.HDW2},f={c:8612,dir:n.H,stretch:[8592,8722,8739],HDW:e.HDW3,min:1.278},p={c:8614,dir:n.H,sizes:[1],stretch:[8739,8722,8594],HDW:e.HDW2},d={c:8656,dir:n.H,sizes:[1],stretch:[8656,61],HDW:e.HDW2},m={c:8658,dir:n.H,sizes:[1],stretch:[0,61,8658],HDW:e.HDW2},y={c:8660,dir:n.H,sizes:[1],stretch:[8656,61,8658],HDW:e.HDW2},v={c:8722,dir:n.H,sizes:[.778],stretch:[0,8722],HDW:[.583,.082,.778]},b={c:8739,dir:n.V,sizes:[1],stretch:[0,8739],HDW:[.75,.249,.278]},g={c:9180,dir:n.H,sizes:[.778,1],schar:[8994,8994],stretch:[57680,57684,57681],HDW:[.32,.2,.5]},M={c:9181,dir:n.H,sizes:[.778,1],schar:[8995,8995],stretch:[57682,57684,57683],HDW:[.32,.2,.5]},O={c:9182,dir:n.H,stretch:[57680,57684,57681,57685],HDW:[.32,.2,.5],min:1.8},x={c:9183,dir:n.H,stretch:[57682,57684,57683,57686],HDW:[.32,.2,.5],min:1.8},S={c:10216,dir:n.V,sizes:e.VSIZES},E={c:10217,dir:n.V,sizes:e.VSIZES},C={c:10502,dir:n.H,stretch:[8656,61,8739],HDW:e.HDW3,min:1.278},_={c:10503,dir:n.H,stretch:[8872,61,8658],HDW:e.HDW3,min:1.278};e.delimiters={40:{dir:n.V,sizes:e.VSIZES,stretch:[9115,9116,9117],HDW:[.75,.25,.875]},41:{dir:n.V,sizes:e.VSIZES,stretch:[9118,9119,9120],HDW:[.75,.25,.875]},45:v,47:i,61:{dir:n.H,sizes:[.767],stretch:[0,61],HDW:[.583,.082,.778]},91:{dir:n.V,sizes:e.VSIZES,stretch:[9121,9122,9123],HDW:e.HDW1},92:{dir:n.V,sizes:e.VSIZES},93:{dir:n.V,sizes:e.VSIZES,stretch:[9124,9125,9126],HDW:e.HDW1},94:a,95:c,123:{dir:n.V,sizes:e.VSIZES,stretch:[9127,9130,9129,9128],HDW:[.75,.25,.889]},124:{dir:n.V,sizes:[1],stretch:[0,8739],HDW:[.75,.249,.278]},125:{dir:n.V,sizes:e.VSIZES,stretch:[9131,9130,9133,9132],HDW:[.75,.25,.889]},126:s,175:o,710:a,713:o,732:s,770:a,771:s,818:c,8211:c,8212:c,8213:c,8214:{dir:n.V,sizes:[.602,1],schar:[0,8741],stretch:[0,8741],HDW:[.75,.25,.5]},8215:c,8254:o,8407:u,8592:l,8593:{dir:n.V,sizes:[.888],stretch:[8593,9168],HDW:[.694,.193,.667]},8594:u,8595:{dir:n.V,sizes:[.888],stretch:[0,9168,8595],HDW:[.694,.194,.667]},8596:h,8597:{dir:n.V,sizes:[1.044],stretch:[8593,9168,8595],HDW:[.772,.272,.667]},8606:{dir:n.H,sizes:[1],stretch:[8606,8722],HDW:e.HDW2},8608:{dir:n.H,sizes:[1],stretch:[0,8722,8608],HDW:e.HDW2},8612:f,8613:{dir:n.V,stretch:[8593,9168,8869],HDW:e.HDW1,min:1.555},8614:p,8615:{dir:n.V,stretch:[8868,9168,8595],HDW:e.HDW1,min:1.555},8624:{dir:n.V,sizes:[.722],stretch:[8624,9168],HDW:[.722,0,.667]},8625:{dir:n.V,sizes:[.722],stretch:[8625,9168],HDW:[.722,0,.667]},8636:{dir:n.H,sizes:[1],stretch:[8636,8722],HDW:e.HDW2},8637:{dir:n.H,sizes:[1],stretch:[8637,8722],HDW:e.HDW2},8638:{dir:n.V,sizes:[.888],stretch:[8638,9168],HDW:[.694,.194,.667]},8639:{dir:n.V,sizes:[.888],stretch:[8639,9168],HDW:[.694,.194,.667]},8640:{dir:n.H,sizes:[1],stretch:[0,8722,8640],HDW:e.HDW2},8641:{dir:n.H,sizes:[1],stretch:[0,8722,8641],HDW:e.HDW2},8642:{dir:n.V,sizes:[.888],stretch:[0,9168,8642],HDW:[.694,.194,.667]},8643:{dir:n.V,sizes:[.888],stretch:[0,9168,8643],HDW:[.694,.194,.667]},8656:d,8657:{dir:n.V,sizes:[.888],stretch:[8657,8214],HDW:[.694,.194,.778]},8658:m,8659:{dir:n.V,sizes:[.888],stretch:[0,8214,8659],HDW:[.694,.194,.778]},8660:y,8661:{dir:n.V,sizes:[1.044],stretch:[8657,8214,8659],HDW:[.772,.272,.778]},8666:{dir:n.H,sizes:[1],stretch:[8666,8801],HDW:[.464,-.036,1]},8667:{dir:n.H,sizes:[1],stretch:[0,8801,8667],HDW:[.464,-.036,1]},8722:v,8725:i,8730:{dir:n.V,sizes:e.VSIZES,stretch:[57345,57344,9143],HDW:[.8,.2,1.056]},8739:b,8741:{dir:n.V,sizes:[1],stretch:[0,8741],HDW:[.75,.25,.5]},8968:{dir:n.V,sizes:e.VSIZES,stretch:[9121,9122],HDW:e.HDW1},8969:{dir:n.V,sizes:e.VSIZES,stretch:[9124,9125],HDW:e.HDW1},8970:{dir:n.V,sizes:e.VSIZES,stretch:[0,9122,9123],HDW:e.HDW1},8971:{dir:n.V,sizes:e.VSIZES,stretch:[0,9125,9126],HDW:e.HDW1},8978:g,8994:g,8995:M,9001:S,9002:E,9130:{dir:n.V,sizes:[.32],stretch:[9130,9130,9130],HDW:[.29,.015,.889]},9135:c,9136:{dir:n.V,sizes:[.989],stretch:[9127,9130,9133],HDW:[.744,.244,.889]},9137:{dir:n.V,sizes:[.989],stretch:[9131,9130,9129],HDW:[.744,.244,.889]},9140:{dir:n.H,stretch:[9484,8722,9488],HDW:e.HDW3,min:1},9141:{dir:n.H,stretch:[9492,8722,9496],HDW:e.HDW3,min:1},9168:{dir:n.V,sizes:[.602,1],schar:[0,8739],stretch:[0,8739],HDW:[.602,0,.278]},9180:g,9181:M,9182:O,9183:x,9184:{dir:n.H,stretch:[714,713,715],HDW:[.59,-.544,.5],min:1},9185:{dir:n.H,stretch:[715,713,714],HDW:[.59,-.544,.5],min:1},9472:c,10072:b,10216:S,10217:E,10222:{dir:n.V,sizes:[.989],stretch:[9127,9130,9129],HDW:[.744,.244,.889]},10223:{dir:n.V,sizes:[.989],stretch:[9131,9130,9133],HDW:[.744,.244,.889]},10229:l,10230:u,10231:h,10232:d,10233:m,10234:y,10235:f,10236:p,10237:C,10238:_,10502:C,10503:_,10574:{dir:n.H,stretch:[8636,8722,8640],HDW:e.HDW3,min:2},10575:{dir:n.V,stretch:[8638,9168,8642],HDW:e.HDW1,min:1.776},10576:{dir:n.H,stretch:[8637,8722,8641],HDW:e.HDW3,min:2},10577:{dir:n.V,stretch:[8639,9168,8643],HDW:e.HDW1,min:.5},10586:{dir:n.H,stretch:[8636,8722,8739],HDW:e.HDW3,min:1.278},10587:{dir:n.H,stretch:[8739,8722,8640],HDW:e.HDW3,min:1.278},10588:{dir:n.V,stretch:[8638,9168,8869],HDW:e.HDW1,min:1.556},10589:{dir:n.V,stretch:[8868,9168,8642],HDW:e.HDW1,min:1.556},10590:{dir:n.H,stretch:[8637,8722,8739],HDW:e.HDW3,min:1.278},10591:{dir:n.H,stretch:[8739,8722,8641],HDW:e.HDW3,min:1.278},10592:{dir:n.V,stretch:[8639,9168,8869],HDW:e.HDW1,min:1.776},10593:{dir:n.V,stretch:[8868,9168,8643],HDW:e.HDW1,min:1.776},12296:S,12297:E,65079:O,65080:x}},function(t,e,r){"use strict";var n,i=this&&this.__extends||(n=function(t,e){return(n=Object.setPrototypeOf||{__proto__:[]}instanceof Array&&function(t,e){t.__proto__=e}||function(t,e){for(var r in e)e.hasOwnProperty(r)&&(t[r]=e[r])})(t,e)},function(t,e){function r(){this.constructor=t}n(t,e),t.prototype=null===e?Object.create(e):(r.prototype=e.prototype,new r)}),l=this&&this.__values||function(t){var e="function"==typeof Symbol&&Symbol.iterator,r=e&&t[e],n=0;if(r)return r.call(t);if(t&&"number"==typeof t.length)return{next:function(){return t&&n>=t.length&&(t=void 0),{value:t&&t[n++],done:!t}}};throw new TypeError(e?"Object is not iterable.":"Symbol.iterator is not defined.")},u=this&&this.__read||function(t,e){var r="function"==typeof Symbol&&t[Symbol.iterator];if(!r)return t;var n,i,o=r.call(t),a=[];try{for(;(void 0===e||0=t.length&&(t=void 0),{value:t&&t[n++],done:!t}}};throw new TypeError(e?"Object is not iterable.":"Symbol.iterator is not defined.")};Object.defineProperty(e,"__esModule",{value:!0});var o=r(20),l=r(13),n=r(3),i=r(225),a=r(227),s=r(228),c="undefined"!=typeof window&&window.navigator&&"Mac"===window.navigator.platform.substr(0,3),h=(Object.defineProperty(f.prototype,"isLoading",{get:function(){return 0/g,">")},f.prototype.toMML=function(t){return this.MmlVisitor.visitTree(t.root,t,{texHints:this.settings.texHints,semantics:this.settings.semantics&&"MathML"!==t.inputJax.name})},f.prototype.zoom=function(t,e,r){t&&!this.isZoomEvent(t,e)||(this.menu.mathItem=r,t&&this.menu.post(t),this.zoomBox.post())},f.prototype.isZoomEvent=function(t,e){return this.settings.zoom===e&&(!this.settings.alt||t.altKey)&&(!this.settings.ctrl||t.ctrlKey)&&(!this.settings.cmd||t.metaKey)&&(!this.settings.shift||t.shiftKey)},f.prototype.rerender=function(t){void 0===t&&(t=l.STATE.TYPESET),this.rerenderStart=Math.min(t,this.rerenderStart),f.loading||(this.document.rerender(this.rerenderStart),this.rerenderStart=l.STATE.LAST)},f.prototype.copyMathML=function(){this.copyToClipboard(this.toMML(this.menu.mathItem))},f.prototype.copyOriginal=function(){this.copyToClipboard(this.menu.mathItem.math)},f.prototype.copyAnnotation=function(){this.copyToClipboard(this.menu.annotation)},f.prototype.copyToClipboard=function(t){var e=document.createElement("textarea");e.value=t,e.setAttribute("readonly",""),e.style.cssText="height: 1px; width: 1px; padding: 1px; position: absolute; left: -10px",document.body.appendChild(e),e.select();try{document.execCommand("copy")}catch(t){alert("Can't copy to clipboard: "+t.message)}document.body.removeChild(e)},f.prototype.addMenu=function(e){var r=this,t=e.typesetRoot;t.addEventListener("contextmenu",function(){return r.menu.mathItem=e},!0),t.addEventListener("keydown",function(){return r.menu.mathItem=e},!0),t.addEventListener("click",function(t){return r.zoom(t,"Click",e)},!0),t.addEventListener("dblclick",function(t){return r.zoom(t,"DoubleClick",e)},!0),this.menu.getStore().insert(t)},f.prototype.clear=function(){this.menu.getStore().clear()},f.prototype.variable=function(e,r){var n=this;return{name:e,getter:function(){return n.settings[e]},setter:function(t){n.settings[e]=t,r&&r(t),n.saveUserSettings()}}},f.prototype.a11yVar=function(r){var n=this;return{name:r,getter:function(){return n.getA11y(r)},setter:function(t){n.settings[r]=t;var e={};e[r]=t,n.setA11y(e),n.saveUserSettings()}}},f.prototype.submenu=function(t,e,r,n){var i,o;void 0===r&&(r=[]),void 0===n&&(n=!1);var a=[];try{for(var s=u(r),c=s.next();!c.done;c=s.next()){var l=c.value;Array.isArray(l)?a=a.concat(l):a.push(l)}}catch(t){i={error:t}}finally{try{c&&!c.done&&(o=s.return)&&o.call(s)}finally{if(i)throw i.error}}return{type:"submenu",id:t,content:e,menu:{items:a},disabled:0===a.length||n}},f.prototype.command=function(t,e,r,n){return void 0===n&&(n={}),Object.assign({type:"command",id:t,content:e,action:r},n)},f.prototype.checkbox=function(t,e,r,n){return void 0===n&&(n={}),Object.assign({type:"checkbox",id:t,content:e,variable:r},n)},f.prototype.radioGroup=function(e,t){var r=this;return t.map(function(t){return r.radio(t[0],t[1]||t[0],e)})},f.prototype.radio=function(t,e,r,n){return void 0===n&&(n={}),Object.assign({type:"radio",id:t,content:e,variable:r},n)},f.prototype.label=function(t,e){return{type:"label",id:t,content:e}},f.prototype.rule=function(){return{type:"rule"}},f.MENU_STORAGE="MathJax-Menu-Settings",f.OPTIONS={settings:{texHints:!0,semantics:!1,zoom:"NoZoom",zscale:"200%",renderer:"CHTML",alt:!1,cmd:!1,ctrl:!1,shift:!1,scale:1,autocollapse:!1,collapsible:!1,inTabOrder:!0,explorer:!1},jax:{CHTML:null,SVG:null},annotationTypes:n.expandable({TeX:["TeX","LaTeX","application/x-tex"],StarMath:["StarMath 5.0"],Maple:["Maple"],ContentMathML:["MathML-Content","application/mathml-content+xml"],OpenMath:["OpenMath"]})},f.loading=0,f.loadingPromises=new Map,f._loadingPromise=null,f._loadingOK=null,f._loadingFailed=null,f);function f(t,e){var r=this;void 0===e&&(e={}),this.settings=null,this.defaultSettings=null,this.menu=null,this.MmlVisitor=new a.MmlVisitor,this.jax={CHTML:null,SVG:null},this.rerenderStart=l.STATE.LAST,this.about=new ContextMenu.Info('MathJax v'+o.mathjax.version,function(){var t=[];return t.push("Input Jax: "+r.document.inputJax.map(function(t){return t.name}).join(", ")),t.push("Output Jax: "+r.document.outputJax.name),t.push("Document Type: "+r.document.kind),t.join("
")},'www.mathjax.org'),this.help=new ContextMenu.Info("MathJax Help",function(){return["

MathJax is a JavaScript library that allows page"," authors to include mathematics within their web pages."," As a reader, you don't need to do anything to make that happen.

","

Browsers: MathJax works with all modern browsers including"," Edge, Firefox, Chrome, Safari, Opera, and most mobile browsers.

","

Math Menu: MathJax adds a contextual menu to equations."," Right-click or CTRL-click on any mathematics to access the menu.

",'
',"

Show Math As: These options allow you to view the formula's"," source markup (as MathML or in its original format).

","

Copy to Clipboard: These options copy the formula's source markup,"," as MathML or in its original format, to the clipboard"," (in browsers that support that).

","

Math Settings: These give you control over features of MathJax,"," such the size of the mathematics, and the mechanism used"," to display equations.

","

Accessibility: MathJax can work with screen"," readers to make mathematics accessible to the visually impaired."," Turn on the explorer to enable generation of speech strings"," and the ability to investigate expressions interactively.

","

Language: This menu lets you select the language used by MathJax"," for its menus and warning messages. (Not yet implemented in version 3.)

","
","

Math Zoom: If you are having difficulty reading an"," equation, MathJax can enlarge it to help you see it better, or"," you can scall all the math on the page to make it larger."," Turn these features on in the Math Settings menu.

","

Preferences: MathJax uses your browser's localStorage database"," to save the preferences set via this menu locally in your browser. These"," are not used to track you, and are not transferred or used remotely by"," MathJax in any way.

"].join("\n")},'www.mathjax.org'),this.mathmlCode=new s.SelectableInfo("MathJax MathML Expression",function(){if(!r.menu.mathItem)return"";var t=r.toMML(r.menu.mathItem);return"
"+r.formatSource(t)+"
"},""),this.originalText=new s.SelectableInfo("MathJax Original Source",function(){if(!r.menu.mathItem)return"";var t=r.menu.mathItem.math;return'
'+r.formatSource(t)+"
"},""),this.annotationText=new s.SelectableInfo("MathJax Annotation Text",function(){if(!r.menu.mathItem)return"";var t=r.menu.annotation;return'
'+r.formatSource(t)+"
"},""),this.zoomBox=new ContextMenu.Info("MathJax Zoomed Expression",function(){if(!r.menu.mathItem)return"";var t=r.menu.mathItem.typesetRoot.cloneNode(!0);return t.style.margin="0",'
'+t.outerHTML+"
"},""),this.document=t,this.options=n.userOptions(n.defaultOptions({},this.constructor.OPTIONS),e),this.initSettings(),this.mergeUserSettings(),this.initMenu()}e.Menu=h},function(t,e,r){"use strict";var n,i=this&&this.__extends||(n=function(t,e){return(n=Object.setPrototypeOf||{__proto__:[]}instanceof Array&&function(t,e){t.__proto__=e}||function(t,e){for(var r in e)e.hasOwnProperty(r)&&(t[r]=e[r])})(t,e)},function(t,e){function r(){this.constructor=t}n(t,e),t.prototype=null===e?Object.create(e):(r.prototype=e.prototype,new r)});Object.defineProperty(e,"__esModule",{value:!0});var o,a=r(96),s=r(3),c=(o=a.SerializedMmlVisitor,i(l,o),l.prototype.visitTree=function(t,e,r){return void 0===e&&(e=null),void 0===r&&(r={}),this.mathItem=e,s.userOptions(this.options,r),this.visitNode(t,"")},l.prototype.visitTeXAtomNode=function(t,e){return this.options.texHints?o.prototype.visitTeXAtomNode.call(this,t,e):t.childNodes[0]&&1===t.childNodes[0].childNodes.length?this.visitNode(t.childNodes[0],e):e+"\n"+this.childNodeMml(t,e+" ","\n")+e+""},l.prototype.visitMathNode=function(t,e){if(!this.options.semantics||"TeX"!==this.mathItem.inputJax.name)return o.prototype.visitDefault.call(this,t,e);var r=t.childNodes.length&&1\n"+e+" \n"+(r?e+" \n":"")+this.childNodeMml(t,e+(r?" ":" "),"\n")+(r?e+" \n":"")+e+' '+this.mathItem.math+"\n"+e+" \n"+e+""},l);function l(){var t=null!==o&&o.apply(this,arguments)||this;return t.options={texHints:!0,semantics:!1},t.mathItem=null,t}e.MmlVisitor=c},function(t,e,r){"use strict";var n,i=this&&this.__extends||(n=function(t,e){return(n=Object.setPrototypeOf||{__proto__:[]}instanceof Array&&function(t,e){t.__proto__=e}||function(t,e){for(var r in e)e.hasOwnProperty(r)&&(t[r]=e[r])})(t,e)},function(t,e){function r(){this.constructor=t}n(t,e),t.prototype=null===e?Object.create(e):(r.prototype=e.prototype,new r)});Object.defineProperty(e,"__esModule",{value:!0});var o,a=(o=ContextMenu.Info,i(s,o),s.prototype.addEvents=function(t){var e=this;t.addEventListener("keypress",function(t){"a"===t.key&&(t.ctrlKey||t.metaKey)&&(e.selectAll(),e.stop(t))})},s.prototype.selectAll=function(){document.getSelection().selectAllChildren(this.getHtml().querySelector("pre"))},s.prototype.copyToClipboard=function(){this.selectAll();try{document.execCommand("copy")}catch(t){alert("Can't copy to clipboard: "+t.message)}document.getSelection().removeAllRanges()},s.prototype.generateHtml=function(){var e=this;o.prototype.generateHtml.call(this);var t=this.getHtml().querySelector("span."+ContextMenu.HtmlClasses.INFOSIGNATURE).appendChild(document.createElement("input"));t.type="button",t.value="Copy to Clipboard",t.addEventListener("click",function(t){return e.copyToClipboard()})},s);function s(){return null!==o&&o.apply(this,arguments)||this}e.SelectableInfo=a},function(t,e,r){"use strict";var n,o=this&&this.__extends||(n=function(t,e){return(n=Object.setPrototypeOf||{__proto__:[]}instanceof Array&&function(t,e){t.__proto__=e}||function(t,e){for(var r in e)e.hasOwnProperty(r)&&(t[r]=e[r])})(t,e)},function(t,e){function r(){this.constructor=t}n(t,e),t.prototype=null===e?Object.create(e):(r.prototype=e.prototype,new r)}),a=this&&this.__assign||function(){return(a=Object.assign||function(t){for(var e,r=1,n=arguments.length;r=t.length&&(t=void 0),{value:t&&t[n++],done:!t}}};throw new TypeError(e?"Object is not iterable.":"Symbol.iterator is not defined.")};Object.defineProperty(e,"__esModule",{value:!0});var l=r(20),u=r(13),h=r(3),f=r(226);function p(t){return o(e,n=t),e.prototype.addMenu=function(t){this.state()=t.length&&(t=void 0),{value:t&&t[n++],done:!t}}};throw new TypeError(e?"Object is not iterable.":"Symbol.iterator is not defined.")};Object.defineProperty(S,"__esModule",{value:!0});var t,c,r,o,l,n=E(5),a=E(24);function u(t){return r.visitTree(t,c.document)}function h(){r=new S.MathJax._.core.MmlTree.SerializedMmlVisitor.SerializedMmlVisitor,o=S.MathJax._.mathjax.mathjax,c.input=v(),c.output=b(),c.adaptor=g(),c.handler&&o.handlers.unregister(c.handler),c.handler=M(),c.handler&&(o.handlers.register(c.handler),c.document=O())}function f(){var e,t;c.input&&c.output&&p();var r=c.output?c.output.name.toLowerCase():"";try{for(var n=s(c.input),i=n.next();!i.done;i=n.next()){var o=i.value,a=o.name.toLowerCase();m(a,o),y(a,o),c.output&&d(a,r,o)}}catch(t){e={error:t}}finally{try{i&&!i.done&&(t=n.return)&&t.call(n)}finally{if(e)throw e.error}}}function p(){S.MathJax.typeset=function(t){void 0===t&&(t=null),c.document.options.elements=t,c.document.render()},S.MathJax.typesetPromise=function(t){return void 0===t&&(t=null),c.document.options.elements=t,o.handleRetriesFor(function(){c.document.render()})},S.MathJax.typesetClear=function(){return c.document.clear()}}function d(t,e,r){var n=t+"2"+e;S.MathJax[n]=function(t,e){return void 0===e&&(e={}),e.format=r.name,c.document.convert(t,e)},S.MathJax[n+"Promise"]=function(t,e){return void 0===e&&(e={}),e.format=r.name,o.handleRetriesFor(function(){return c.document.convert(t,e)})},S.MathJax[e+"Stylesheet"]=function(){return c.output.styleSheet(c.document)},"getMetricsFor"in c.output&&(S.MathJax.getMetricsFor=function(t,e){return c.output.getMetricsFor(t,e)})}function m(t,r){var n=S.MathJax._.core.MathItem.STATE;S.MathJax[t+"2mml"]=function(t,e){return void 0===e&&(e={}),e.end=n.CONVERT,e.format=r.name,u(c.document.convert(t,e))},S.MathJax[t+"2mmlPromise"]=function(t,e){return void 0===e&&(e={}),e.end=n.CONVERT,e.format=r.name,o.handleRetriesFor(function(){return u(c.document.convert(t,e))})}}function y(t,e){"tex"===t&&(S.MathJax.texReset=function(t){return void 0===t&&(t=0),e.parseOptions.tags.reset(t)})}function v(){var e,t,r=[];try{for(var n=s(S.CONFIG.input),i=n.next();!i.done;i=n.next()){var o=i.value,a=c.constructors[o];if(!a)throw Error('Input Jax "'+o+'" is not defined (has it been loaded?)');r.push(new a(S.MathJax.config[o]))}}catch(t){e={error:t}}finally{try{i&&!i.done&&(t=n.return)&&t.call(n)}finally{if(e)throw e.error}}return r}function b(){var t=S.CONFIG.output;if(!t)return null;var e=c.constructors[t];if(!e)throw Error('Output Jax "'+t+'" is not defined (has it been loaded?)');return new e(S.MathJax.config[t])}function g(){var t=S.CONFIG.adaptor;if(!t||"none"===t)return null;var e=c.constructors[t];if(!e)throw Error('DOMAdaptor "'+t+'" is not defined (has it been loaded?)');return e(S.MathJax.config[t])}function M(){var e,t,r=S.CONFIG.handler;if(!r||"none"===r||!c.adaptor)return null;var n=c.constructors[r];if(!n)throw Error('Handler "'+r+'" is not defined (has it been loaded?)');var i=new n(c.adaptor,5);try{for(var o=s(l),a=o.next();!a.done;a=o.next()){i=a.value.item(i)}}catch(t){e={error:t}}finally{try{a&&!a.done&&(t=o.return)&&t.call(o)}finally{if(e)throw e.error}}return i}function O(t){return void 0===t&&(t=null),o.document(t||S.CONFIG.document,e(e({},S.MathJax.config.options),{InputJax:c.input,OutputJax:c.output}))}c=t=S.Startup||(S.Startup={}),l=new a.PrioritizedList,c.constructors={},c.input=[],c.output=null,c.handler=null,c.adaptor=null,c.elements=null,c.document=null,c.promise=new Promise(function(t,e){var r=i.document;if(r&&r.readyState&&"complete"!==r.readyState&&"interactive"!==r.readyState){var n=function(){return t()};r.defaultView.addEventListener("load",n,!0),r.defaultView.addEventListener("DOMContentLoaded",n,!0)}else t()}),c.toMML=u,c.registerConstructor=function(t,e){c.constructors[t]=e},c.useHandler=function(t,e){void 0===e&&(e=!1),S.CONFIG.handler&&!e||(S.CONFIG.handler=t)},c.useAdaptor=function(t,e){void 0===e&&(e=!1),S.CONFIG.adaptor&&!e||(S.CONFIG.adaptor=t)},c.useInput=function(t,e){void 0===e&&(e=!1),x&&!e||S.CONFIG.input.push(t)},c.useOutput=function(t,e){void 0===e&&(e=!1),S.CONFIG.output&&!e||(S.CONFIG.output=t)},c.extendHandler=function(t,e){void 0===e&&(e=10),l.add(t,e)},c.defaultReady=function(){h(),f(),c.promise=c.promise.then(function(){return S.CONFIG.pageReady()})},c.defaultPageReady=function(){return S.CONFIG.typeset&&S.MathJax.typesetPromise?S.MathJax.typesetPromise():null},c.getComponents=h,c.makeMethods=f,c.makeTypesetMethods=p,c.makeOutputMethods=d,c.makeMmlMethods=m,c.makeResetMethod=y,c.getInputJax=v,c.getOutputJax=b,c.getAdaptor=g,c.getHandler=M,c.getDocument=O,S.MathJax=n.MathJax,void 0===S.MathJax._.startup&&(n.combineDefaults(S.MathJax.config,"startup",{input:[],output:"",handler:null,adaptor:null,document:"undefined"==typeof document?"":document,elements:null,typeset:!0,ready:t.defaultReady.bind(t),pageReady:t.defaultPageReady.bind(t)}),n.combineWithMathJax({startup:t,options:{}})),S.CONFIG=S.MathJax.config.startup;var x=0!==S.CONFIG.input.length}).call(this,E(28))},function(t,e,r){"use strict";r(17).Loader.preLoad("loader","startup","core","input/tex","input/mml","output/chtml","output/chtml/fonts/tex.js","ui/menu")},function(t,e,r){"use strict";r(234);var n=r(70),i=r(81);MathJax.startup&&(MathJax.startup.registerConstructor("HTMLHandler",n.HTMLHandler),MathJax.startup.registerConstructor("browserAdaptor",i.browserAdaptor),MathJax.startup.useHandler("HTMLHandler"),MathJax.startup.useAdaptor("browserAdaptor")),MathJax.loader&&(MathJax._.mathjax.mathjax.asyncLoad=function(t){return MathJax.loader.load(t)})},function(t,e,r){"use strict";var n=r(5),i=Ct(n),o=Ct(r(79)),a=Ct(r(81)),s=Ct(r(80)),c=Ct(r(40)),l=Ct(r(82)),u=Ct(r(94)),h=Ct(r(29)),f=Ct(r(41)),p=Ct(r(13)),d=Ct(r(43)),m=Ct(r(19)),y=Ct(r(85)),v=Ct(r(235)),b=Ct(r(44)),g=Ct(r(0)),M=Ct(r(67)),O=Ct(r(59)),x=Ct(r(90)),S=Ct(r(91)),E=Ct(r(46)),C=Ct(r(92)),_=Ct(r(58)),T=Ct(r(88)),w=Ct(r(57)),A=Ct(r(53)),k=Ct(r(65)),I=Ct(r(47)),L=Ct(r(61)),N=Ct(r(48)),P=Ct(r(26)),B=Ct(r(56)),R=Ct(r(89)),j=Ct(r(55)),H=Ct(r(52)),D=Ct(r(51)),X=Ct(r(50)),F=Ct(r(54)),W=Ct(r(87)),J=Ct(r(31)),q=Ct(r(62)),V=Ct(r(64)),U=Ct(r(49)),z=Ct(r(63)),G=Ct(r(60)),K=Ct(r(66)),Z=Ct(r(68)),Y=Ct(r(86)),$=Ct(r(96)),Q=Ct(r(42)),tt=Ct(r(30)),et=Ct(r(45)),rt=Ct(r(84)),nt=Ct(r(95)),it=Ct(r(97)),ot=Ct(r(98)),at=Ct(r(236)),st=Ct(r(99)),ct=Ct(r(102)),lt=Ct(r(70)),ut=Ct(r(100)),ht=Ct(r(101)),ft=Ct(r(20)),pt=Ct(r(103)),dt=Ct(r(93)),mt=Ct(r(12)),yt=Ct(r(25)),vt=Ct(r(83)),bt=Ct(r(3)),gt=Ct(r(24)),Mt=Ct(r(69)),Ot=Ct(r(71)),xt=Ct(r(14)),St=Ct(r(104)),Et=Ct(r(10));function Ct(t){if(t&&t.__esModule)return t;var e={};if(null!=t)for(var r in t)Object.prototype.hasOwnProperty.call(t,r)&&(e[r]=t[r]);return e.default=t,e}(0,n.combineWithMathJax)({_:{adaptors:{HTMLAdaptor:o,browserAdaptor:a},components:{global:i},core:{DOMAdaptor:s,FindMath:c,Handler:l,HandlerList:u,InputJax:h,MathDocument:f,MathItem:p,MathList:d,MmlTree:{Attributes:m,MML:y,MathMLVisitor:v,MmlFactory:b,MmlNode:g,MmlNodes:{TeXAtom:M,maction:O,maligngroup:x,malignmark:S,math:E,mathchoice:C,menclose:_,merror:T,mfenced:w,mfrac:A,mglyph:k,mi:I,mmultiscripts:L,mn:N,mo:P,mpadded:B,mphantom:R,mroot:j,mrow:H,ms:D,mspace:X,msqrt:F,mstyle:W,msubsup:J,mtable:q,mtd:V,mtext:U,mtr:z,munderover:G,semantics:K},MmlVisitor:Z,OperatorDictionary:Y,SerializedMmlVisitor:$},OutputJax:Q,Tree:{Factory:tt,Node:et,NodeFactory:rt,Visitor:nt,Wrapper:it,WrapperFactory:ot}},handlers:{html_ts:at,html:{HTMLDocument:st,HTMLDomStrings:ct,HTMLHandler:lt,HTMLMathItem:ut,HTMLMathList:ht}},mathjax:ft,util:{AsyncLoad:pt,BitField:dt,Entities:mt,FunctionList:yt,LinkedList:vt,Options:bt,PrioritizedList:gt,Retries:Mt,Styles:Ot,lengths:xt,numeric:St,string:Et}}})},function(t,e,r){"use strict";var n,i=this&&this.__extends||(n=function(t,e){return(n=Object.setPrototypeOf||{__proto__:[]}instanceof Array&&function(t,e){t.__proto__=e}||function(t,e){for(var r in e)e.hasOwnProperty(r)&&(t[r]=e[r])})(t,e)},function(t,e){function r(){this.constructor=t}n(t,e),t.prototype=null===e?Object.create(e):(r.prototype=e.prototype,new r)}),l=this&&this.__values||function(t){var e="function"==typeof Symbol&&Symbol.iterator,r=e&&t[e],n=0;if(r)return r.call(t);if(t&&"number"==typeof t.length)return{next:function(){return t&&n>=t.length&&(t=void 0),{value:t&&t[n++],done:!t}}};throw new TypeError(e?"Object is not iterable.":"Symbol.iterator is not defined.")};Object.defineProperty(e,"__esModule",{value:!0});var o,a=r(68),s=(o=a.MmlVisitor,i(c,o),c.prototype.visitTree=function(t,e){var r=(this.document=e).createElement("top");return this.visitNode(t,r),this.document=null,r.firstChild},c.prototype.visitTextNode=function(t,e){e.appendChild(this.document.createTextNode(t.getText()))},c.prototype.visitXMLNode=function(t,e){e.appendChild(t.getXML().cloneNode(!0))},c.prototype.visitInferredMrowNode=function(t,e){var r,n;try{for(var i=l(t.childNodes),o=i.next();!o.done;o=i.next()){var a=o.value;this.visitNode(a,e)}}catch(t){r={error:t}}finally{try{o&&!o.done&&(n=i.return)&&n.call(i)}finally{if(r)throw r.error}}},c.prototype.visitDefault=function(t,e){var r,n,i=this.document.createElement(t.kind);this.addAttributes(t,i);try{for(var o=l(t.childNodes),a=o.next();!a.done;a=o.next()){var s=a.value;this.visitNode(s,i)}}catch(t){r={error:t}}finally{try{a&&!a.done&&(n=o.return)&&n.call(o)}finally{if(r)throw r.error}}e.appendChild(i)},c.prototype.addAttributes=function(t,e){var r,n,i=t.attributes,o=i.getExplicitNames();try{for(var a=l(o),s=a.next();!s.done;s=a.next()){var c=s.value;e.setAttribute(c,i.getExplicit(c).toString())}}catch(t){r={error:t}}finally{try{s&&!s.done&&(n=a.return)&&n.call(a)}finally{if(r)throw r.error}}},c);function c(){var t=null!==o&&o.apply(this,arguments)||this;return t.document=null,t}e.MathMLVisitor=s},function(t,e,r){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var n=r(20),i=r(70);e.RegisterHTMLHandler=function(t){var e=new i.HTMLHandler(t);return n.mathjax.handlers.register(e),e}},function(t,e,r){"use strict";r(238);var n=r(249);r(17).Loader.preLoad("input/tex-base","[tex]/ams","[tex]/newcommand","[tex]/noundefined","[tex]/require","[tex]/autoload","[tex]/configMacros"),(0,n.registerTeX)(["base","ams","newcommand","noundefined","require","autoload","configMacros"])},function(t,e,r){"use strict";var n=r(5),i=j(r(105)),o=j(r(11)),a=j(r(107)),s=j(r(106)),c=j(r(8)),l=j(r(112)),u=j(r(6)),h=j(r(33)),f=j(r(110)),p=j(r(7)),d=j(r(109)),m=j(r(32)),y=j(r(111)),v=j(r(22)),b=j(r(9)),g=j(r(27)),M=j(r(15)),O=j(r(4)),x=j(r(21)),S=j(r(242)),E=j(r(114)),C=j(r(115)),_=j(r(244)),T=j(r(113)),w=j(r(34)),A=j(r(35)),k=j(r(245)),I=j(r(246)),L=j(r(118)),N=j(r(72)),P=j(r(117)),B=j(r(248)),R=j(r(116));function j(t){if(t&&t.__esModule)return t;var e={};if(null!=t)for(var r in t)Object.prototype.hasOwnProperty.call(t,r)&&(e[r]=t[r]);return e.default=t,e}(0,n.combineWithMathJax)({_:{input:{tex_ts:i,tex:{Configuration:o,FilterUtil:a,FindTeX:s,MapHandler:c,NodeFactory:l,NodeUtil:u,ParseMethods:h,ParseOptions:f,ParseUtil:p,Stack:d,StackItem:m,StackItemFactory:y,Symbol:v,SymbolMap:b,Tags:g,TexConstants:M,TexError:O,TexParser:x,ams:{AmsConfiguration:S,AmsItems:E,AmsMethods:C},autoload:{AutoloadConfiguration:_},base:{BaseConfiguration:T,BaseItems:w,BaseMethods:A},config_macros:{ConfigMacrosConfiguration:k},newcommand:{NewcommandConfiguration:I,NewcommandItems:L,NewcommandMethods:N,NewcommandUtil:P},noundefined:{NoUndefinedConfiguration:B},require:{RequireConfiguration:R}}}}})},function(t,e,r){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var n=r(9),i=r(15),o=r(35),a=r(33),s=r(0);new n.RegExpMap("letter",a.default.variable,/[a-z]/i),new n.RegExpMap("digit",a.default.digit,/[0-9.,]/),new n.RegExpMap("command",a.default.controlSequence,/^\\/),new n.MacroMap("special",{"{":"Open","}":"Close","~":"Tilde","^":"Superscript",_:"Subscript"," ":"Space","\t":"Space","\r":"Space","\n":"Space","'":"Prime","%":"Comment","&":"Entry","#":"Hash","\xa0":"Space","\u2019":"Prime"},o.default),new n.CharacterMap("mathchar0mi",a.default.mathchar0mi,{alpha:"\u03b1",beta:"\u03b2",gamma:"\u03b3",delta:"\u03b4",epsilon:"\u03f5",zeta:"\u03b6",eta:"\u03b7",theta:"\u03b8",iota:"\u03b9",kappa:"\u03ba",lambda:"\u03bb",mu:"\u03bc",nu:"\u03bd",xi:"\u03be",omicron:"\u03bf",pi:"\u03c0",rho:"\u03c1",sigma:"\u03c3",tau:"\u03c4",upsilon:"\u03c5",phi:"\u03d5",chi:"\u03c7",psi:"\u03c8",omega:"\u03c9",varepsilon:"\u03b5",vartheta:"\u03d1",varpi:"\u03d6",varrho:"\u03f1",varsigma:"\u03c2",varphi:"\u03c6",S:["\xa7",{mathvariant:i.TexConstant.Variant.NORMAL}],aleph:["\u2135",{mathvariant:i.TexConstant.Variant.NORMAL}],hbar:["\u210f",{variantForm:!0}],imath:"\u0131",jmath:"\u0237",ell:"\u2113",wp:["\u2118",{mathvariant:i.TexConstant.Variant.NORMAL}],Re:["\u211c",{mathvariant:i.TexConstant.Variant.NORMAL}],Im:["\u2111",{mathvariant:i.TexConstant.Variant.NORMAL}],partial:["\u2202",{mathvariant:i.TexConstant.Variant.NORMAL}],infty:["\u221e",{mathvariant:i.TexConstant.Variant.NORMAL}],prime:["\u2032",{mathvariant:i.TexConstant.Variant.NORMAL,variantForm:!0}],emptyset:["\u2205",{mathvariant:i.TexConstant.Variant.NORMAL}],nabla:["\u2207",{mathvariant:i.TexConstant.Variant.NORMAL}],top:["\u22a4",{mathvariant:i.TexConstant.Variant.NORMAL}],bot:["\u22a5",{mathvariant:i.TexConstant.Variant.NORMAL}],angle:["\u2220",{mathvariant:i.TexConstant.Variant.NORMAL}],triangle:["\u25b3",{mathvariant:i.TexConstant.Variant.NORMAL}],backslash:["\u2216",{mathvariant:i.TexConstant.Variant.NORMAL,variantForm:!0}],forall:["\u2200",{mathvariant:i.TexConstant.Variant.NORMAL}],exists:["\u2203",{mathvariant:i.TexConstant.Variant.NORMAL}],neg:["\xac",{mathvariant:i.TexConstant.Variant.NORMAL}],lnot:["\xac",{mathvariant:i.TexConstant.Variant.NORMAL}],flat:["\u266d",{mathvariant:i.TexConstant.Variant.NORMAL}],natural:["\u266e",{mathvariant:i.TexConstant.Variant.NORMAL}],sharp:["\u266f",{mathvariant:i.TexConstant.Variant.NORMAL}],clubsuit:["\u2663",{mathvariant:i.TexConstant.Variant.NORMAL}],diamondsuit:["\u2662",{mathvariant:i.TexConstant.Variant.NORMAL}],heartsuit:["\u2661",{mathvariant:i.TexConstant.Variant.NORMAL}],spadesuit:["\u2660",{mathvariant:i.TexConstant.Variant.NORMAL}]}),new n.CharacterMap("mathchar0mo",a.default.mathchar0mo,{surd:"\u221a",coprod:["\u2210",{texClass:s.TEXCLASS.OP,movesupsub:!0}],bigvee:["\u22c1",{texClass:s.TEXCLASS.OP,movesupsub:!0}],bigwedge:["\u22c0",{texClass:s.TEXCLASS.OP,movesupsub:!0}],biguplus:["\u2a04",{texClass:s.TEXCLASS.OP,movesupsub:!0}],bigcap:["\u22c2",{texClass:s.TEXCLASS.OP,movesupsub:!0}],bigcup:["\u22c3",{texClass:s.TEXCLASS.OP,movesupsub:!0}],int:["\u222b",{texClass:s.TEXCLASS.OP}],intop:["\u222b",{texClass:s.TEXCLASS.OP,movesupsub:!0,movablelimits:!0}],iint:["\u222c",{texClass:s.TEXCLASS.OP}],iiint:["\u222d",{texClass:s.TEXCLASS.OP}],prod:["\u220f",{texClass:s.TEXCLASS.OP,movesupsub:!0}],sum:["\u2211",{texClass:s.TEXCLASS.OP,movesupsub:!0}],bigotimes:["\u2a02",{texClass:s.TEXCLASS.OP,movesupsub:!0}],bigoplus:["\u2a01",{texClass:s.TEXCLASS.OP,movesupsub:!0}],bigodot:["\u2a00",{texClass:s.TEXCLASS.OP,movesupsub:!0}],oint:["\u222e",{texClass:s.TEXCLASS.OP}],bigsqcup:["\u2a06",{texClass:s.TEXCLASS.OP,movesupsub:!0}],smallint:["\u222b",{largeop:!1}],triangleleft:"\u25c3",triangleright:"\u25b9",bigtriangleup:"\u25b3",bigtriangledown:"\u25bd",wedge:"\u2227",land:"\u2227",vee:"\u2228",lor:"\u2228",cap:"\u2229",cup:"\u222a",ddagger:"\u2021",dagger:"\u2020",sqcap:"\u2293",sqcup:"\u2294",uplus:"\u228e",amalg:"\u2a3f",diamond:"\u22c4",bullet:"\u2219",wr:"\u2240",div:"\xf7",odot:["\u2299",{largeop:!1}],oslash:["\u2298",{largeop:!1}],otimes:["\u2297",{largeop:!1}],ominus:["\u2296",{largeop:!1}],oplus:["\u2295",{largeop:!1}],mp:"\u2213",pm:"\xb1",circ:"\u2218",bigcirc:"\u25ef",setminus:["\u2216",{variantForm:!0}],cdot:"\u22c5",ast:"\u2217",times:"\xd7",star:"\u22c6",propto:"\u221d",sqsubseteq:"\u2291",sqsupseteq:"\u2292",parallel:"\u2225",mid:"\u2223",dashv:"\u22a3",vdash:"\u22a2",leq:"\u2264",le:"\u2264",geq:"\u2265",ge:"\u2265",lt:"<",gt:">",succ:"\u227b",prec:"\u227a",approx:"\u2248",succeq:"\u2ab0",preceq:"\u2aaf",supset:"\u2283",subset:"\u2282",supseteq:"\u2287",subseteq:"\u2286",in:"\u2208",ni:"\u220b",notin:"\u2209",owns:"\u220b",gg:"\u226b",ll:"\u226a",sim:"\u223c",simeq:"\u2243",perp:"\u22a5",equiv:"\u2261",asymp:"\u224d",smile:"\u2323",frown:"\u2322",ne:"\u2260",neq:"\u2260",cong:"\u2245",doteq:"\u2250",bowtie:"\u22c8",models:"\u22a8",notChar:"\u29f8",Leftrightarrow:"\u21d4",Leftarrow:"\u21d0",Rightarrow:"\u21d2",leftrightarrow:"\u2194",leftarrow:"\u2190",gets:"\u2190",rightarrow:"\u2192",to:"\u2192",mapsto:"\u21a6",leftharpoonup:"\u21bc",leftharpoondown:"\u21bd",rightharpoonup:"\u21c0",rightharpoondown:"\u21c1",nearrow:"\u2197",searrow:"\u2198",nwarrow:"\u2196",swarrow:"\u2199",rightleftharpoons:"\u21cc",hookrightarrow:"\u21aa",hookleftarrow:"\u21a9",longleftarrow:"\u27f5",Longleftarrow:"\u27f8",longrightarrow:"\u27f6",Longrightarrow:"\u27f9",Longleftrightarrow:"\u27fa",longleftrightarrow:"\u27f7",longmapsto:"\u27fc",ldots:"\u2026",cdots:"\u22ef",vdots:"\u22ee",ddots:"\u22f1",dotsc:"\u2026",dotsb:"\u22ef",dotsm:"\u22ef",dotsi:"\u22ef",dotso:"\u2026",ldotp:[".",{texClass:s.TEXCLASS.PUNCT}],cdotp:["\u22c5",{texClass:s.TEXCLASS.PUNCT}],colon:[":",{texClass:s.TEXCLASS.PUNCT}]}),new n.CharacterMap("mathchar7",a.default.mathchar7,{Gamma:"\u0393",Delta:"\u0394",Theta:"\u0398",Lambda:"\u039b",Xi:"\u039e",Pi:"\u03a0",Sigma:"\u03a3",Upsilon:"\u03a5",Phi:"\u03a6",Psi:"\u03a8",Omega:"\u03a9",_:"_","#":"#",$:"$","%":"%","&":"&",And:"&"}),new n.DelimiterMap("delimiter",a.default.delimiter,{"(":"(",")":")","[":"[","]":"]","<":"\u27e8",">":"\u27e9","\\lt":"\u27e8","\\gt":"\u27e9","/":"/","|":["|",{texClass:s.TEXCLASS.ORD}],".":"","\\\\":"\\","\\lmoustache":"\u23b0","\\rmoustache":"\u23b1","\\lgroup":"\u27ee","\\rgroup":"\u27ef","\\arrowvert":"\u23d0","\\Arrowvert":"\u2016","\\bracevert":"\u23aa","\\Vert":["\u2225",{texClass:s.TEXCLASS.ORD}],"\\|":["\u2225",{texClass:s.TEXCLASS.ORD}],"\\vert":["|",{texClass:s.TEXCLASS.ORD}],"\\uparrow":"\u2191","\\downarrow":"\u2193","\\updownarrow":"\u2195","\\Uparrow":"\u21d1","\\Downarrow":"\u21d3","\\Updownarrow":"\u21d5","\\backslash":"\\","\\rangle":"\u27e9","\\langle":"\u27e8","\\rbrace":"}","\\lbrace":"{","\\}":"}","\\{":"{","\\rceil":"\u2309","\\lceil":"\u2308","\\rfloor":"\u230b","\\lfloor":"\u230a","\\lbrack":"[","\\rbrack":"]"}),new n.CommandMap("macros",{displaystyle:["SetStyle","D",!0,0],textstyle:["SetStyle","T",!1,0],scriptstyle:["SetStyle","S",!1,1],scriptscriptstyle:["SetStyle","SS",!1,2],rm:["SetFont",i.TexConstant.Variant.NORMAL],mit:["SetFont",i.TexConstant.Variant.ITALIC],oldstyle:["SetFont",i.TexConstant.Variant.OLDSTYLE],cal:["SetFont",i.TexConstant.Variant.CALLIGRAPHIC],it:["SetFont","-tex-mathit"],bf:["SetFont",i.TexConstant.Variant.BOLD],bbFont:["SetFont",i.TexConstant.Variant.DOUBLESTRUCK],scr:["SetFont",i.TexConstant.Variant.SCRIPT],frak:["SetFont",i.TexConstant.Variant.FRAKTUR],sf:["SetFont",i.TexConstant.Variant.SANSSERIF],tt:["SetFont",i.TexConstant.Variant.MONOSPACE],tiny:["SetSize",.5],Tiny:["SetSize",.6],scriptsize:["SetSize",.7],small:["SetSize",.85],normalsize:["SetSize",1],large:["SetSize",1.2],Large:["SetSize",1.44],LARGE:["SetSize",1.73],huge:["SetSize",2.07],Huge:["SetSize",2.49],arcsin:["NamedFn"],arccos:["NamedFn"],arctan:["NamedFn"],arg:["NamedFn"],cos:["NamedFn"],cosh:["NamedFn"],cot:["NamedFn"],coth:["NamedFn"],csc:["NamedFn"],deg:["NamedFn"],det:"NamedOp",dim:["NamedFn"],exp:["NamedFn"],gcd:"NamedOp",hom:["NamedFn"],inf:"NamedOp",ker:["NamedFn"],lg:["NamedFn"],lim:"NamedOp",liminf:["NamedOp","lim inf"],limsup:["NamedOp","lim sup"],ln:["NamedFn"],log:["NamedFn"],max:"NamedOp",min:"NamedOp",Pr:"NamedOp",sec:["NamedFn"],sin:["NamedFn"],sinh:["NamedFn"],sup:"NamedOp",tan:["NamedFn"],tanh:["NamedFn"],limits:["Limits",1],nolimits:["Limits",0],overline:["UnderOver","00AF",null,1],underline:["UnderOver","005F"],overbrace:["UnderOver","23DE",1],underbrace:["UnderOver","23DF",1],overparen:["UnderOver","23DC"],underparen:["UnderOver","23DD"],overrightarrow:["UnderOver","2192"],underrightarrow:["UnderOver","2192"],overleftarrow:["UnderOver","2190"],underleftarrow:["UnderOver","2190"],overleftrightarrow:["UnderOver","2194"],underleftrightarrow:["UnderOver","2194"],overset:"Overset",underset:"Underset",stackrel:["Macro","\\mathrel{\\mathop{#2}\\limits^{#1}}",2],over:"Over",overwithdelims:"Over",atop:"Over",atopwithdelims:"Over",above:"Over",abovewithdelims:"Over",brace:["Over","{","}"],brack:["Over","[","]"],choose:["Over","(",")"],frac:"Frac",sqrt:"Sqrt",root:"Root",uproot:["MoveRoot","upRoot"],leftroot:["MoveRoot","leftRoot"],left:"LeftRight",right:"LeftRight",middle:"Middle",llap:"Lap",rlap:"Lap",raise:"RaiseLower",lower:"RaiseLower",moveleft:"MoveLeftRight",moveright:"MoveLeftRight",",":["Spacer",i.TexConstant.Length.THINMATHSPACE],":":["Spacer",i.TexConstant.Length.MEDIUMMATHSPACE],">":["Spacer",i.TexConstant.Length.MEDIUMMATHSPACE],";":["Spacer",i.TexConstant.Length.THICKMATHSPACE],"!":["Spacer",i.TexConstant.Length.NEGATIVETHINMATHSPACE],enspace:["Spacer",".5em"],quad:["Spacer","1em"],qquad:["Spacer","2em"],thinspace:["Spacer",i.TexConstant.Length.THINMATHSPACE],negthinspace:["Spacer",i.TexConstant.Length.NEGATIVETHINMATHSPACE],hskip:"Hskip",hspace:"Hskip",kern:"Hskip",mskip:"Hskip",mspace:"Hskip",mkern:"Hskip",rule:"rule",Rule:["Rule"],Space:["Rule","blank"],big:["MakeBig",s.TEXCLASS.ORD,.85],Big:["MakeBig",s.TEXCLASS.ORD,1.15],bigg:["MakeBig",s.TEXCLASS.ORD,1.45],Bigg:["MakeBig",s.TEXCLASS.ORD,1.75],bigl:["MakeBig",s.TEXCLASS.OPEN,.85],Bigl:["MakeBig",s.TEXCLASS.OPEN,1.15],biggl:["MakeBig",s.TEXCLASS.OPEN,1.45],Biggl:["MakeBig",s.TEXCLASS.OPEN,1.75],bigr:["MakeBig",s.TEXCLASS.CLOSE,.85],Bigr:["MakeBig",s.TEXCLASS.CLOSE,1.15],biggr:["MakeBig",s.TEXCLASS.CLOSE,1.45],Biggr:["MakeBig",s.TEXCLASS.CLOSE,1.75],bigm:["MakeBig",s.TEXCLASS.REL,.85],Bigm:["MakeBig",s.TEXCLASS.REL,1.15],biggm:["MakeBig",s.TEXCLASS.REL,1.45],Biggm:["MakeBig",s.TEXCLASS.REL,1.75],mathord:["TeXAtom",s.TEXCLASS.ORD],mathop:["TeXAtom",s.TEXCLASS.OP],mathopen:["TeXAtom",s.TEXCLASS.OPEN],mathclose:["TeXAtom",s.TEXCLASS.CLOSE],mathbin:["TeXAtom",s.TEXCLASS.BIN],mathrel:["TeXAtom",s.TEXCLASS.REL],mathpunct:["TeXAtom",s.TEXCLASS.PUNCT],mathinner:["TeXAtom",s.TEXCLASS.INNER],vcenter:["TeXAtom",s.TEXCLASS.VCENTER],buildrel:"BuildRel",hbox:["HBox",0],text:"HBox",mbox:["HBox",0],fbox:"FBox",strut:"Strut",mathstrut:["Macro","\\vphantom{(}"],phantom:"Phantom",vphantom:["Phantom",1,0],hphantom:["Phantom",0,1],smash:"Smash",acute:["Accent","00B4"],grave:["Accent","0060"],ddot:["Accent","00A8"],tilde:["Accent","007E"],bar:["Accent","00AF"],breve:["Accent","02D8"],check:["Accent","02C7"],hat:["Accent","005E"],vec:["Accent","2192"],dot:["Accent","02D9"],widetilde:["Accent","007E",1],widehat:["Accent","005E",1],matrix:"Matrix",array:"Matrix",pmatrix:["Matrix","(",")"],cases:["Matrix","{","","left left",null,".1em",null,!0],eqalign:["Matrix",null,null,"right left",i.TexConstant.Length.THICKMATHSPACE,".5em","D"],displaylines:["Matrix",null,null,"center",null,".5em","D"],cr:"Cr","\\":"CrLaTeX",newline:"Cr",hline:["HLine","solid"],hdashline:["HLine","dashed"],eqalignno:["Matrix",null,null,"right left",i.TexConstant.Length.THICKMATHSPACE,".5em","D",null,"right"],leqalignno:["Matrix",null,null,"right left",i.TexConstant.Length.THICKMATHSPACE,".5em","D",null,"left"],hfill:"HFill",hfil:"HFill",hfilll:"HFill",bmod:["Macro",'\\mmlToken{mo}[lspace="thickmathspace" rspace="thickmathspace"]{mod}'],pmod:["Macro","\\pod{\\mmlToken{mi}{mod}\\kern 6mu #1}",1],mod:["Macro","\\mathchoice{\\kern18mu}{\\kern12mu}{\\kern12mu}{\\kern12mu}\\mmlToken{mi}{mod}\\,\\,#1",1],pod:["Macro","\\mathchoice{\\kern18mu}{\\kern8mu}{\\kern8mu}{\\kern8mu}(#1)",1],iff:["Macro","\\;\\Longleftrightarrow\\;"],skew:["Macro","{{#2{#3\\mkern#1mu}\\mkern-#1mu}{}}",3],mathcal:["Macro","{\\cal #1}",1],mathscr:["Macro","{\\scr #1}",1],mathrm:["Macro","{\\rm #1}",1],mathbf:["Macro","{\\bf #1}",1],mathbb:["Macro","{\\bbFont #1}",1],Bbb:["Macro","{\\bbFont #1}",1],mathit:["Macro","{\\it #1}",1],mathfrak:["Macro","{\\frak #1}",1],mathsf:["Macro","{\\sf #1}",1],mathtt:["Macro","{\\tt #1}",1],textrm:["Macro","\\mathord{\\rm\\text{#1}}",1],textit:["Macro","\\mathord{\\it\\text{#1}}",1],textbf:["Macro","\\mathord{\\bf\\text{#1}}",1],textsf:["Macro","\\mathord{\\sf\\text{#1}}",1],texttt:["Macro","\\mathord{\\tt\\text{#1}}",1],pmb:["Macro","\\rlap{#1}\\kern1px{#1}",1],TeX:["Macro","T\\kern-.14em\\lower.5ex{E}\\kern-.115em X"],LaTeX:["Macro","L\\kern-.325em\\raise.21em{\\scriptstyle{A}}\\kern-.17em\\TeX"]," ":["Macro","\\text{ }"],not:"Not",dots:"Dots",space:"Tilde","\xa0":"Tilde",begin:"BeginEnd",end:"BeginEnd",label:"HandleLabel",ref:"HandleRef",nonumber:"HandleNoTag",mathchoice:"MathChoice",mmlToken:"MmlToken"},o.default);new n.EnvironmentMap("environment",a.default.environment,{array:["AlignedArray"],equation:["Equation",null,!0],"equation*":["Equation",null,!1],eqnarray:["EqnArray",null,!0,!0,"rcl","0 "+i.TexConstant.Length.THICKMATHSPACE,".5em"]},o.default);new n.CharacterMap("not_remap",null,{"\u2190":"\u219a","\u2192":"\u219b","\u2194":"\u21ae","\u21d0":"\u21cd","\u21d2":"\u21cf","\u21d4":"\u21ce","\u2208":"\u2209","\u220b":"\u220c","\u2223":"\u2224","\u2225":"\u2226","\u223c":"\u2241","~":"\u2241","\u2243":"\u2244","\u2245":"\u2247","\u2248":"\u2249","\u224d":"\u226d","=":"\u2260","\u2261":"\u2262","<":"\u226e",">":"\u226f","\u2264":"\u2270","\u2265":"\u2271","\u2272":"\u2274","\u2273":"\u2275","\u2276":"\u2278","\u2277":"\u2279","\u227a":"\u2280","\u227b":"\u2281","\u2282":"\u2284","\u2283":"\u2285","\u2286":"\u2288","\u2287":"\u2289","\u22a2":"\u22ac","\u22a8":"\u22ad","\u22a9":"\u22ae","\u22ab":"\u22af","\u227c":"\u22e0","\u227d":"\u22e1","\u2291":"\u22e2","\u2292":"\u22e3","\u22b2":"\u22ea","\u22b3":"\u22eb","\u22b4":"\u22ec","\u22b5":"\u22ed","\u2203":"\u2204"})},function(t,e,r){"use strict";Object.defineProperty(e,"__esModule",{value:!0}),r(12).add({Pcy:"\u041f",Poincareplane:"\u210c",Pr:"\u2abb",Prime:"\u2033",Proportion:"\u2237",par:"\u2225",para:"\xb6",parallel:"\u2225",parsim:"\u2af3",parsl:"\u2afd",part:"\u2202",pcy:"\u043f",percnt:"%",permil:"\u2030",perp:"\u22a5",pertenk:"\u2031",phmmat:"\u2133",phone:"\u260e",pitchfork:"\u22d4",planck:"\u210f",planckh:"\u210e",plankv:"\u210f",plus:"+",plusacir:"\u2a23",plusb:"\u229e",pluscir:"\u2a22",plusdo:"\u2214",plusdu:"\u2a25",pluse:"\u2a72",plusmn:"\xb1",plussim:"\u2a26",plustwo:"\u2a27",pm:"\xb1",pointint:"\u2a15",pound:"\xa3",pr:"\u227a",prE:"\u2ab3",prcue:"\u227c",pre:"\u2aaf",prec:"\u227a",precapprox:"\u2ab7",preccurlyeq:"\u227c",preceq:"\u2aaf",precsim:"\u227e",primes:"\u2119",prnE:"\u2ab5",prnap:"\u2ab9",prnsim:"\u22e8",prod:"\u220f",profalar:"\u232e",profline:"\u2312",profsurf:"\u2313",prop:"\u221d",propto:"\u221d",prsim:"\u227e",prurel:"\u22b0",puncsp:"\u2008"},"p")},function(t,e,r){"use strict";Object.defineProperty(e,"__esModule",{value:!0}),r(12).add({RBarr:"\u2910",REG:"\xae",Racute:"\u0154",Rang:"\u27eb",Rarrtl:"\u2916",Rcaron:"\u0158",Rcedil:"\u0156",Rcy:"\u0420",ReverseElement:"\u220b",ReverseUpEquilibrium:"\u296f",Rho:"\u03a1",RightArrowBar:"\u21e5",RightDoubleBracket:"\u27e7",RightDownTeeVector:"\u295d",RightDownVectorBar:"\u2955",RightTeeVector:"\u295b",RightTriangleBar:"\u29d0",RightUpDownVector:"\u294f",RightUpTeeVector:"\u295c",RightUpVectorBar:"\u2954",RightVectorBar:"\u2953",RoundImplies:"\u2970",RuleDelayed:"\u29f4",rAarr:"\u21db",rArr:"\u21d2",rAtail:"\u291c",rBarr:"\u290f",rHar:"\u2964",race:"\u223d\u0331",racute:"\u0155",radic:"\u221a",raemptyv:"\u29b3",rang:"\u27e9",rangd:"\u2992",range:"\u29a5",rangle:"\u27e9",raquo:"\xbb",rarr:"\u2192",rarrap:"\u2975",rarrb:"\u21e5",rarrbfs:"\u2920",rarrc:"\u2933",rarrfs:"\u291e",rarrhk:"\u21aa",rarrlp:"\u21ac",rarrpl:"\u2945",rarrsim:"\u2974",rarrw:"\u219d",ratail:"\u291a",ratio:"\u2236",rationals:"\u211a",rbarr:"\u290d",rbbrk:"\u2773",rbrke:"\u298c",rbrksld:"\u298e",rbrkslu:"\u2990",rcaron:"\u0159",rcedil:"\u0157",rceil:"\u2309",rcub:"}",rcy:"\u0440",rdca:"\u2937",rdldhar:"\u2969",rdquo:"\u201d",rdquor:"\u201d",rdsh:"\u21b3",real:"\u211c",realine:"\u211b",realpart:"\u211c",reals:"\u211d",rect:"\u25ad",reg:"\xae",rfisht:"\u297d",rfloor:"\u230b",rhard:"\u21c1",rharu:"\u21c0",rharul:"\u296c",rightarrow:"\u2192",rightarrowtail:"\u21a3",rightharpoondown:"\u21c1",rightharpoonup:"\u21c0",rightleftarrows:"\u21c4",rightleftharpoons:"\u21cc",rightsquigarrow:"\u219d",risingdotseq:"\u2253",rlarr:"\u21c4",rlhar:"\u21cc",rlm:"\u200f",rmoustache:"\u23b1",rnmid:"\u2aee",roang:"\u27ed",roarr:"\u21fe",robrk:"\u27e7",ropar:"\u2986",roplus:"\u2a2e",rotimes:"\u2a35",rpar:")",rpargt:"\u2994",rppolint:"\u2a12",rrarr:"\u21c9",rsaquo:"\u203a",rsh:"\u21b1",rsqb:"]",rsquo:"\u2019",rsquor:"\u2019",rthree:"\u22cc",rtrie:"\u22b5",rtrif:"\u25b8",rtriltri:"\u29ce",ruluhar:"\u2968",rx:"\u211e"},"r")},function(t,e,r){"use strict";var n,i,o=this&&this.__extends||(n=function(t,e){return(n=Object.setPrototypeOf||{__proto__:[]}instanceof Array&&function(t,e){t.__proto__=e}||function(t,e){for(var r in e)e.hasOwnProperty(r)&&(t[r]=e[r])})(t,e)},function(t,e){function r(){this.constructor=t}n(t,e),t.prototype=null===e?Object.create(e):(r.prototype=e.prototype,new r)});Object.defineProperty(e,"__esModule",{value:!0});var a=r(11),s=r(114),c=r(27);r(243);var l,u=(l=c.AbstractTags,o(h,l),h);function h(){return null!==l&&l.apply(this,arguments)||this}e.AmsTags=u;e.AmsConfiguration=a.Configuration.create("ams",{handler:{delimiter:["AMSsymbols-delimiter","AMSmath-delimiter"],macro:["AMSsymbols-mathchar0mi","AMSsymbols-mathchar0m0","AMSsymbols-delimiter","AMSsymbols-macros","AMSmath-mathchar0mo","AMSmath-macros","AMSmath-delimiter"],environment:["AMSmath-environment"]},items:(i={},i[s.MultlineItem.prototype.kind]=s.MultlineItem,i),tags:{ams:u},init:function(t){t.append(a.Configuration.extension())}})},function(t,e,r){"use strict";Object.defineProperty(e,"__esModule",{value:!0});function n(t){for(var e=[],r=0,n=t.length;r=t.length&&(t=void 0),{value:t&&t[n++],done:!t}}};throw new TypeError(e?"Object is not iterable.":"Symbol.iterator is not defined.")};Object.defineProperty(e,"__esModule",{value:!0});var o,a=r(11),s=r(9),T=r(22),w=r(116),b=r(18),c=r(3),l=(o=s.CommandMap,i(u,o),u.prototype.remove=function(t){this.map.delete(t)},u);function u(){return null!==o&&o.apply(this,arguments)||this}function A(t,e,r,n){var i,o,a,s;if(b.Package.packages.has(t.options.require.prefix+r)){var c=t.options.autoload[r],l=C(2===c.length&&Array.isArray(c[0])?c:[c,[]],2),u=l[0],h=l[1];try{for(var f=_(u),p=f.next();!p.done;p=f.next()){var d=p.value;k.remove(d)}}catch(t){i={error:t}}finally{try{p&&!p.done&&(o=f.return)&&o.call(f)}finally{if(i)throw i.error}}try{for(var m=_(h),y=m.next();!y.done;y=m.next()){var v=y.value;I.remove(v)}}catch(t){a={error:t}}finally{try{y&&!y.done&&(s=m.return)&&s.call(m)}finally{if(a)throw a.error}}t.i-=e.length+(n?0:7)}w.RequireLoad(t,r)}var k=new(e.AutoloadCommandMap=l)("autoload-macros",{},{}),I=new l("autoload-environments",{},{});e.AutoloadConfiguration=a.Configuration.create("autoload",{handler:{macro:["autoload-macros"],environment:["autoload-environments"]},options:{autoload:c.expandable({action:["toggle","mathtip","texttip"],amsCd:[[],["CD"]],bbox:["bbox"],boldsymbol:["boldsymbol"],braket:["bra","ket","braket","set","Bra","Ket","Braket","Set","ketbra","Ketbra"],cancel:["cancel","bcancel","xcancel","cancelto"],color:["color","definecolor","textcolor","colorbox","fcolorbox"],enclose:["enclose"],extpfeil:["xtwoheadrightarrow","xtwoheadleftarrow","xmapsto","xlongequal","xtofrom","Newextarrow"],html:["href","class","style","cssId"],mhchem:["ce","pu"],newcommand:["newcommand","renewcommand","newenvironment","renewenvironment","def","let"],unicode:["unicode"],verb:["verb"]})},config:function(t,e){var r,n,i,o,a,s,c=e.parseOptions,l=c.handlers.get("macro"),u=c.handlers.get("environment"),h=c.options.autoload;try{for(var f=_(Object.keys(h)),p=f.next();!p.done;p=f.next()){var d=p.value,m=h[d],y=C(2===m.length&&Array.isArray(m[0])?m:[m,[]],2),v=y[0],b=y[1];try{for(var g=(i=void 0,_(v)),M=g.next();!M.done;M=g.next()){var O=M.value;l.lookup(O)&&"color"!==O||k.add(O,new T.Macro(O,A,[d,!0]))}}catch(t){i={error:t}}finally{try{M&&!M.done&&(o=g.return)&&o.call(g)}finally{if(i)throw i.error}}try{for(var x=(a=void 0,_(b)),S=x.next();!S.done;S=x.next()){var E=S.value;u.lookup(E)||I.add(E,new T.Macro(E,A,[d,!1]))}}catch(t){a={error:t}}finally{try{S&&!S.done&&(s=x.return)&&s.call(x)}finally{if(a)throw a.error}}}}catch(t){r={error:t}}finally{try{p&&!p.done&&(n=f.return)&&n.call(f)}finally{if(r)throw r.error}}c.options.require.jax||w.RequireConfiguration.config(t,e)},configPriority:10,init:function(t){t.options.require||c.defaultOptions(t.options,w.RequireConfiguration.options)},priority:10})},function(t,e,r){"use strict";var u=this&&this.__values||function(t){var e="function"==typeof Symbol&&Symbol.iterator,r=e&&t[e],n=0;if(r)return r.call(t);if(t&&"number"==typeof t.length)return{next:function(){return t&&n>=t.length&&(t=void 0),{value:t&&t[n++],done:!t}}};throw new TypeError(e?"Object is not iterable.":"Symbol.iterator is not defined.")};Object.defineProperty(e,"__esModule",{value:!0});var n=r(11),i=r(3),o=r(9),h=r(22),f=r(72);var p=new o.CommandMap("configMacros",{},{});e.ConfigMacrosConfiguration=n.Configuration.create("configMacros",{handler:{macro:["configMacros"]},config:function(t,e){var r,n,i=t.options.macros;try{for(var o=u(Object.keys(i)),a=o.next();!a.done;a=o.next()){var s=a.value,c="string"==typeof i[s]?[i[s]]:i[s],l=Array.isArray(c[2])?new h.Macro(s,f.default.MacroWithTemplate,c.slice(0,2).concat(c[2])):new h.Macro(s,f.default.Macro,c);p.add(s,l)}}catch(t){r={error:t}}finally{try{a&&!a.done&&(n=o.return)&&n.call(o)}finally{if(r)throw r.error}}},options:{macros:i.expandable({})}})},function(t,e,r){"use strict";var n;Object.defineProperty(e,"__esModule",{value:!0});var i=r(11),o=r(118),a=r(8);r(247);e.NewcommandConfiguration=i.Configuration.create("newcommand",{handler:{macro:["Newcommand-macros"]},items:(n={},n[o.BeginEnvItem.prototype.kind]=o.BeginEnvItem,n),options:{maxMacros:1e3},init:function(t){t.handler.macro.indexOf(a.ExtensionMaps.NEW_COMMAND)<0&&t.append(i.Configuration.extension())}})},function(t,e,r){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var n=r(72);new(r(9).CommandMap)("Newcommand-macros",{newcommand:"NewCommand",renewcommand:"NewCommand",newenvironment:"NewEnvironment",renewenvironment:"NewEnvironment",def:"MacroDef",let:"Let"},n.default)},function(t,e,r){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var n=r(11);e.NoUndefinedConfiguration=n.Configuration.create("noundefined",{fallback:{macro:function(t,e){var r=t.create("text","\\"+e);t.Push(t.create("node","mtext",[],{mathcolor:"red"},r))}}})},function(t,e,r){"use strict";Object.defineProperty(e,"__esModule",{value:!0}),e.registerTeX=function(t){if(MathJax.startup){MathJax.startup.registerConstructor("tex",n.TeX),MathJax.startup.useInput("tex"),MathJax.config.tex||(MathJax.config.tex={});var e=MathJax.config.tex.packages;MathJax.config.tex.packages=t,e&&(0,i.insert)(MathJax.config.tex,{packages:e})}};var n=r(105),i=r(3)},function(t,e,r){"use strict";r(251);var n=r(119);MathJax.startup&&(MathJax.startup.registerConstructor("mml",n.MathML),MathJax.startup.useInput("mml"))},function(t,e,r){"use strict";var n=r(5),i=s(r(119)),o=s(r(120)),a=s(r(121));function s(t){if(t&&t.__esModule)return t;var e={};if(null!=t)for(var r in t)Object.prototype.hasOwnProperty.call(t,r)&&(e[r]=t[r]);return e.default=t,e}(0,n.combineWithMathJax)({_:{input:{mathml_ts:i,mathml:{FindMathML:o,MathMLCompile:a}}}})},function(t,e,r){"use strict";r(253);var n=r(5),i=r(122);MathJax.loader&&(0,n.combineDefaults)(MathJax.config.loader,"output/chtml",{checkReady:function(){return MathJax.loader.load("output/chtml/fonts/tex")}}),MathJax.startup&&(MathJax.startup.registerConstructor("chtml",i.CHTML),MathJax.startup.useOutput("chtml"))},function(t,e,r){"use strict";var n=r(5),i=mt(r(122)),o=mt(r(1)),a=mt(r(148)),s=mt(r(2)),c=mt(r(125)),l=mt(r(127)),u=mt(r(172)),h=mt(r(174)),f=mt(r(167)),p=mt(r(130)),d=mt(r(146)),m=mt(r(150)),y=mt(r(152)),v=mt(r(168)),b=mt(r(132)),g=mt(r(160)),M=mt(r(136)),O=mt(r(134)),x=mt(r(144)),S=mt(r(155)),E=mt(r(149)),C=mt(r(138)),_=mt(r(142)),T=mt(r(74)),w=mt(r(37)),A=mt(r(162)),k=mt(r(165)),I=mt(r(140)),L=mt(r(164)),N=mt(r(159)),P=mt(r(157)),B=mt(r(170)),R=mt(r(16)),j=mt(r(124)),H=mt(r(23)),D=mt(r(36)),X=mt(r(123)),F=mt(r(128)),W=mt(r(126)),J=mt(r(173)),q=mt(r(175)),V=mt(r(76)),U=mt(r(131)),z=mt(r(147)),G=mt(r(151)),K=mt(r(153)),Z=mt(r(169)),Y=mt(r(133)),$=mt(r(161)),Q=mt(r(137)),tt=mt(r(135)),et=mt(r(145)),rt=mt(r(156)),nt=mt(r(73)),it=mt(r(139)),ot=mt(r(143)),at=mt(r(154)),st=mt(r(38)),ct=mt(r(163)),lt=mt(r(166)),ut=mt(r(141)),ht=mt(r(75)),ft=mt(r(39)),pt=mt(r(158)),dt=mt(r(171));function mt(t){if(t&&t.__esModule)return t;var e={};if(null!=t)for(var r in t)Object.prototype.hasOwnProperty.call(t,r)&&(e[r]=t[r]);return e.default=t,e}(0,n.combineWithMathJax)({_:{output:{chtml_ts:i,chtml:{FontData:o,Notation:a,Wrapper:s,WrapperFactory:c,Wrappers_ts:l,Wrappers:{TeXAtom:u,TextNode:h,maction:f,math:p,menclose:d,mfenced:m,mfrac:y,mglyph:v,mi:b,mmultiscripts:g,mn:M,mo:O,mpadded:x,mroot:S,mrow:E,ms:C,mspace:_,msqrt:T,msubsup:w,mtable:A,mtd:k,mtext:I,mtr:L,munderover:N,scriptbase:P,semantics:B}},common:{BBox:R,CssStyles:j,FontData:H,Notation:D,OutputJax:X,Wrapper:F,WrapperFactory:W,Wrappers:{TeXAtom:J,TextNode:q,maction:V,math:U,menclose:z,mfenced:G,mfrac:K,mglyph:Z,mi:Y,mmultiscripts:$,mn:Q,mo:tt,mpadded:et,mroot:rt,mrow:nt,ms:it,mspace:ot,msqrt:at,msubsup:st,mtable:ct,mtd:lt,mtext:ut,mtr:ht,munderover:ft,scriptbase:pt,semantics:dt}}}}})},function(t,e,r){"use strict";var n,i=this&&this.__extends||(n=function(t,e){return(n=Object.setPrototypeOf||{__proto__:[]}instanceof Array&&function(t,e){t.__proto__=e}||function(t,e){for(var r in e)e.hasOwnProperty(r)&&(t[r]=e[r])})(t,e)},function(t,e){function r(){this.constructor=t}n(t,e),t.prototype=null===e?Object.create(e):(r.prototype=e.prototype,new r)}),o=this&&this.__assign||function(){return(o=Object.assign||function(t){for(var e,r=1,n=arguments.length;rdocument.body.offsetWidth-5&&(n=document.body.offsetWidth-l.offsetWidth-5),this.post(n,i)},kt.prototype.registerWidget=function(t){this.widgets.push(t)},kt.prototype.unregisterWidget=function(t){var e=this.widgets.indexOf(t);-1document.body.offsetWidth-5&&(i=Math.max(5,i-n-e.offsetWidth+6)),I.prototype.post.call(this,i,o)}},It.prototype.display=function(){this.baseMenu.getFrame().appendChild(this.getHtml())},It.prototype.setBaseMenu=function(){for(var t=this;(t=t.anchor.getMenu())instanceof It;);this.baseMenu=t},L=It,k.SubMenu=L,function(t){t.close=function(t){var e=t.getMenu();e instanceof N.SubMenu?e.baseMenu.unpost():e.unpost()},t.getActiveElement=function(t){var e=t.getMenu();return(e instanceof N.SubMenu?e.baseMenu:e).getStore().getActive()},t.error=function(t,e){console.log("ContextMenu Error: "+e)},t.counter=function(){return e++};var e=0}((N=vt=vt||{}).MenuUtil||(N.MenuUtil={})),P=vt=vt||{},B=P.AbstractEntry,bt(Lt,B),Object.defineProperty(Lt.prototype,"content",{get:function(){return this._content},set:function(t){this._content=t,this.generateHtml(),this.getMenu()&&this.getMenu().generateHtml()},enumerable:!0,configurable:!0}),Lt.prototype.getId=function(){return this.id},Lt.prototype.press=function(){this.disabled||(this.executeAction(),this.executeCallbacks_())},Lt.prototype.executeAction=function(){},Lt.prototype.registerCallback=function(t){-1===this.callbacks.indexOf(t)&&this.callbacks.push(t)},Lt.prototype.unregisterCallback=function(t){var e=this.callbacks.indexOf(t);-1!==e&&this.callbacks.splice(e,1)},Lt.prototype.mousedown=function(t){this.press(),this.stop(t)},Lt.prototype.mouseover=function(t){this.focus(),this.stop(t)},Lt.prototype.mouseout=function(t){this.deactivate(),this.stop(t)},Lt.prototype.generateHtml=function(){B.prototype.generateHtml.call(this);var t=this.getHtml();t.setAttribute("aria-disabled","false"),t.textContent=this.content},Lt.prototype.activate=function(){this.disabled||this.getHtml().classList.add(P.HtmlClasses.MENUACTIVE)},Lt.prototype.deactivate=function(){this.getHtml().classList.remove(P.HtmlClasses.MENUACTIVE)},Lt.prototype.focus=function(){this.getMenu().setFocused(this),B.prototype.focus.call(this),this.activate()},Lt.prototype.unfocus=function(){this.deactivate(),B.prototype.unfocus.call(this)},Lt.prototype.escape=function(t){P.MenuUtil.close(this)},Lt.prototype.up=function(t){this.getMenu().up(t)},Lt.prototype.down=function(t){this.getMenu().down(t)},Lt.prototype.left=function(t){if(this.getMenu()instanceof P.ContextMenu)this.getMenu().left(t);else{var e=this.getMenu();e.setFocused(null),e.getAnchor().focus()}},Lt.prototype.right=function(t){this.getMenu().right(t)},Lt.prototype.space=function(t){this.press()},Lt.prototype.disable=function(){this.disabled=!0;var t=this.getHtml();t.classList.add(P.HtmlClasses.MENUDISABLED),t.setAttribute("aria-disabled","true")},Lt.prototype.enable=function(){this.disabled=!1;var t=this.getHtml();t.classList.remove(P.HtmlClasses.MENUDISABLED),t.removeAttribute("aria-disabled")},Lt.prototype.executeCallbacks_=function(){P.MenuUtil.getActiveElement(this);for(var t=0,e=this.callbacks;t'+this.title+''),r.write("
"+this.generateContent()+"
"),r.write('
'),r.write(""),r.close()):(r.open(),r.write(""+this.title+''),r.write("
"+this.generateContent()+"
"),r.write(""),r.close(),setTimeout(this.resize.bind(this),50))},Jt.prototype.unpost=function(){this.windowList.forEach(function(t){return t.close()}),this.window=null},Jt.prototype.generateContent=function(){return this.content(this.active)},Jt.prototype.resize=function(){var t=this.window.document.body.firstChild,e=this.window.outerHeight-this.window.innerHeight||30,r=this.window.outerWidth-this.window.innerWidth||30;r=Math.max(140,Math.min(Math.floor(.5*this.window.screen.width),t.offsetWidth+r+25)),e=Math.max(40,Math.min(Math.floor(.5*this.window.screen.height),t.offsetHeight+e+25)),this.window.resizeTo(r,e);var n=this.active.getBoundingClientRect();if(n){var i=Math.max(0,Math.min(n.right-Math.floor(r/2),this.window.screen.width-r-20)),o=Math.max(0,Math.min(n.bottom-Math.floor(e/2),this.window.screen.height-e-20));this.window.moveTo(i,o)}this.active=null},Jt.popupSettings={status:"no",toolbar:"no",locationbar:"no",menubar:"no",directories:"no",personalbar:"no",resizable:"yes",scrollbars:"yes",width:400,height:300},yt=Jt,dt.Popup=yt,(vt=vt||{}).TOUCH={START:"touchstart",MOVE:"touchmove",END:"touchend",CANCEL:"touchcancel"}},function(t,e,r){"use strict";var n=r(5),i=l(r(225)),o=l(r(226)),a=l(r(229)),s=l(r(227)),c=l(r(228));function l(t){if(t&&t.__esModule)return t;var e={};if(null!=t)for(var r in t)Object.prototype.hasOwnProperty.call(t,r)&&(e[r]=t[r]);return e.default=t,e}(0,n.combineWithMathJax)({_:{ui:{menu:{MJContextMenu:i,Menu:o,MenuHandler:a,MmlVisitor:s,SelectableInfo:c}}}})},function(t,e,r){"use strict";r(78);var n=r(17),i=r(5),o=r(262);(0,i.combineDefaults)(MathJax.config.loader,"dependencies",o.dependencies),(0,i.combineDefaults)(MathJax.config.loader,"paths",o.paths),(0,i.combineDefaults)(MathJax.config.loader,"provides",o.provides),n.Loader.preLoad("loader"),n.Loader.load.apply(n.Loader,function(t){if(Array.isArray(t)){for(var e=0,r=Array(t.length);eli { - margin-left: 22px; -} - -ol>li { - margin-left: 27.2px; -} - -li>*:first-child { - margin-top: 0 -} - -/* Text alignements, this should be forbidden. */ - -.left { - text-align: left; -} - -.right { - text-align: right; -} - -.center { - text-align: center; -} - -/* Links and anchors */ - -a { - text-decoration: none; - color: var(--link-color); -} - -a:hover { - box-shadow: 0 1px 0 0 var(--link-color); -} - -/* Linked highlight */ -*:target { - background-color: var(--target-background) !important; - box-shadow: 0 0px 0 1px var(--target-shadow) !important; - border-radius: 1px; -} - -*:hover > a.anchor { - visibility: visible; -} - -a.anchor:before { - content: "#"; -} - -a.anchor:hover { - box-shadow: none; - text-decoration: none; - color: var(--anchor-hover); -} - -a.anchor { - visibility: hidden; - position: absolute; - /* top: 0px; */ - /* margin-left: -3ex; */ - margin-left: -1.3em; - font-weight: normal; - font-style: normal; - padding-right: 0.4em; - padding-left: 0.4em; - /* To remain selectable */ - color: var(--anchor-color); -} - -.spec > a.anchor { - margin-left: -2.3em; - padding-right: 0.9em; -} - -.xref-unresolved { - color: #2C94BD; -} -.xref-unresolved:hover { - box-shadow: 0 1px 0 0 var(--xref-shadow); -} - -/* Section and document divisions. - Until at least 4.03 many of the modules of the stdlib start at .h7, - we restart the sequence there like h2 */ - -h1, h2, h3, h4, h5, h6, .h7, .h8, .h9, .h10 { - font-family: "Fira Sans", Helvetica, Arial, sans-serif; - font-weight: 400; - padding-top: 0.1em; - line-height: 1.2; - overflow-wrap: break-word; -} - -h1 { - font-weight: 500; - font-size: 2.441em; -} - -h1 { - font-weight: 500; - font-size: 1.953em; - box-shadow: 0 1px 0 0 var(--header-shadow); -} - -h2 { - font-size: 1.563em; -} - -h3 { - font-size: 1.25em; -} - -small, .font_small { - font-size: 0.8em; -} - -h1 code, h1 tt { - font-size: inherit; - font-weight: inherit; -} - -h2 code, h2 tt { - font-size: inherit; - font-weight: inherit; -} - -h3 code, h3 tt { - font-size: inherit; - font-weight: inherit; -} - -h3 code, h3 tt { - font-size: inherit; - font-weight: inherit; -} - -h4 { - font-size: 1.12em; -} - -/* Comment delimiters, hidden but accessible to screen readers and - selected for copy/pasting */ - -/* Taken from bootstrap */ -/* See also https://stackoverflow.com/a/27769435/4220738 */ -.comment-delim { - position: absolute; - width: 1px; - height: 1px; - padding: 0; - margin: -1px; - overflow: hidden; - clip: rect(0, 0, 0, 0); - white-space: nowrap; - border: 0; -} - -/* Preformatted and code */ - -tt, code, pre { - font-family: "Fira Mono", courier; - font-weight: 400; -} - -pre { - padding: 0.1em; - border: 1px solid var(--pre-border-color); - border-radius: 5px; - overflow-x: auto; -} - -p code, -li code { - background-color: var(--li-code-background); - color: var(--li-code-color); - border-radius: 3px; - padding: 0 0.3ex; -} - -p a > code { - color: var(--link-color); -} - -code { - white-space: pre-wrap; -} - -/* Code blocks (e.g. Examples) */ - -pre code { - font-size: 0.893rem; -} - -/* Code lexemes */ - -.keyword { - font-weight: 500; -} - -.arrow { white-space: nowrap } - -/* Module member specification */ - -.spec { - background-color: var(--spec-summary-background); - border-radius: 3px; - border-left: 4px solid var(--spec-summary-border-color); - border-right: 5px solid transparent; - padding: 0.35em 0.5em; -} - -li:not(:last-child) > .def-doc { - margin-bottom: 15px; -} - -/* Spacing between items */ -div.odoc-spec,.odoc-include { - margin-bottom: 2em; -} - -.spec.type .variant p, .spec.type .record p { - margin: 5px; -} - -.spec.type .variant, .spec.type .record { - margin-left: 2ch; - list-style: none; - display: flex; - flex-wrap: wrap; - row-gap: 4px; -} - -.spec.type .record > code, .spec.type .variant > code { - min-width: 40%; -} - -.spec.type > ol { - margin-top: 0; - margin-bottom: 0; -} - -.spec.type .record > .def-doc, .spec.type .variant > .def-doc { - min-width:50%; - padding: 0.25em 0.5em; - margin-left: 10%; - border-radius: 3px; - flex-grow:1; - background: var(--main-background); - box-shadow: 2px 2px 4px lightgrey; -} - -div.def { - margin-top: 0; - text-indent: -2ex; - padding-left: 2ex; -} - -div.def-doc>*:first-child { - margin-top: 0; -} - -/* Collapsible inlined include and module */ - -.odoc-include details { - position: relative; -} - -.odoc-include.shadowed-include { - display: none; -} - -.odoc-include details:after { - z-index: -100; - display: block; - content: " "; - position: absolute; - border-radius: 0 1ex 1ex 0; - right: -20px; - top: 1px; - bottom: 1px; - width: 15px; - background: var(--spec-details-after-background, rgba(0, 4, 15, 0.05)); - box-shadow: 0 0px 0 1px var(--spec-details-after-shadow, rgba(204, 204, 204, 0.53)); -} - -.odoc-include summary { - position: relative; - margin-bottom: 1em; - cursor: pointer; - outline: none; -} - -.odoc-include summary:hover { - background-color: var(--spec-summary-hover-background); -} - -/* FIXME: Does not work in Firefox. */ -.odoc-include summary::-webkit-details-marker { - color: #888; - transform: scaleX(-1); - position: absolute; - top: calc(50% - 5px); - height: 11px; - right: -29px; -} - -/* Records and variants FIXME */ - -div.def table { - text-indent: 0em; - padding: 0; - margin-left: -2ex; -} - -td.def { - padding-left: 2ex; -} - -td.def-doc *:first-child { - margin-top: 0em; -} - -/* Lists of @tags */ - -.at-tags { list-style-type: none; margin-left: -3ex; } -.at-tags li { padding-left: 3ex; text-indent: -3ex; } -.at-tags .at-tag { text-transform: capitalize } - -/* Lists of modules */ - -.modules { list-style-type: none; margin-left: -3ex; } -.modules li { padding-left: 3ex; text-indent: -3ex; margin-top: 5px } -.modules .synopsis { padding-left: 1ch; } - -/* Odig package index */ - -.packages { list-style-type: none; margin-left: -3ex; } -.packages li { padding-left: 3ex; text-indent: -3ex } -.packages li a.anchor { padding-right: 0.5ch; padding-left: 3ch; } -.packages .version { font-size: 10px; color: var(--by-name-version-color); } -.packages .synopsis { padding-left: 1ch } - -.by-name nav a { - text-transform: uppercase; - font-size: 18px; - margin-right: 1ex; - color: var(--by-name-nav-link-color,); - display: inline-block; -} - -.by-tag nav a { - margin-right: 1ex; - color: var(--by-name-nav-link-color); - display: inline-block; -} - -.by-tag ol { list-style-type: none; } -.by-tag ol.tags li { margin-left: 1ch; display: inline-block } -.by-tag td:first-child { text-transform: uppercase; } - -/* Odig package page */ - -.package nav { - display: inline; - font-size: 14px; - font-weight: normal; -} - -.package .version { - font-size: 14px; -} - -.package.info { - margin: 0; -} - -.package.info td:first-child { - font-style: italic; - padding-right: 2ex; -} - -.package.info ul { - list-style-type: none; - display: inline; - margin: 0; -} - -.package.info li { - display: inline-block; - margin: 0; - margin-right: 1ex; -} - -#info-authors li, #info-maintainers li { - display: block; -} - -/* Sidebar and TOC */ - -.odoc-toc:before { - display: block; - content: "Contents"; - text-transform: uppercase; - font-size: 1em; - margin: 1.414em 0 0.5em; - font-weight: 500; - color: var(--toc-before-color); - line-height: 1.2; -} - -.odoc-toc { - position: fixed; - top: 0px; - bottom: 0px; - left: 0px; - max-width: 30ex; - min-width: 26ex; - width: 20%; - background: var(--toc-background); - overflow: auto; - color: var(--toc-color); - padding-left: 2ex; - padding-right: 2ex; -} - -.odoc-toc ul li a { - font-family: "Fira Sans", sans-serif; - font-size: 0.95em; - color: var(--color); - font-weight: 400; - line-height: 1.6em; - display: block; -} - -.odoc-toc ul li a:hover { - box-shadow: none; - text-decoration: underline; -} - -/* First level titles */ - -.odoc-toc>ul>li>a { - font-weight: 500; -} - -.odoc-toc li ul { - margin: 0px; -} - -.odoc-toc ul { - list-style-type: none; -} - -.odoc-toc ul li { - margin: 0; -} -.odoc-toc>ul>li { - margin-bottom: 0.3em; -} - -.odoc-toc ul li li { - border-left: 1px solid var(--toc-list-border); - margin-left: 5px; - padding-left: 12px; -} - -/* Mobile adjustements. */ - -@media only screen and (max-width: 95ex) { - body.odoc { - margin: 2em; - } - .odoc-toc { - position: static; - width: auto; - min-width: unset; - max-width: unset; - border: none; - padding: 0.2em 1em; - border-radius: 5px; - margin-bottom: 2em; - } -} - -/* Print adjustements. */ - -@media print { - body { - color: black; - background: white; - } - body nav:first-child { - visibility: hidden; - } -} - -/* Syntax highlighting (based on github-gist) */ - -.hljs { - display: block; - background: var(--code-background); - padding: 0.5em; - color: var(--color); - overflow-x: auto; -} - -.hljs-comment, -.hljs-meta { - color: #969896; -} - -.hljs-string, -.hljs-variable, -.hljs-template-variable, -.hljs-strong, -.hljs-emphasis, -.hljs-quote { - color: #df5000; -} - -.hljs-keyword, -.hljs-selector-tag { - color: #a71d5d; -} - -.hljs-type, -.hljs-class .hljs-title { - color: #458; - font-weight: 500; -} - -.hljs-literal, -.hljs-symbol, -.hljs-bullet, -.hljs-attribute { - color: #0086b3; -} - -.hljs-section, -.hljs-name { - color: #63a35c; -} - -.hljs-tag { - color: #333333; -} - -.hljs-attr, -.hljs-selector-id, -.hljs-selector-class, -.hljs-selector-attr, -.hljs-selector-pseudo { - color: #795da3; -} - -.hljs-addition { - color: #55a532; - background-color: #eaffea; -} - -.hljs-deletion { - color: #bd2c00; - background-color: #ffecec; -} - -.hljs-link { - text-decoration: underline; -} - -/*--------------------------------------------------------------------------- - Copyright (c) 2016 The odoc contributors - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - ---------------------------------------------------------------------------*/ diff --git a/owl-base/Owl_algodiff_check/.dummy b/owl-base/Owl_algodiff_check/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl-base/Owl_algodiff_check/Make/Forward/index.html b/owl-base/Owl_algodiff_check/Make/Forward/index.html deleted file mode 100644 index 4a3ffd1c2..000000000 --- a/owl-base/Owl_algodiff_check/Make/Forward/index.html +++ /dev/null @@ -1,7 +0,0 @@ - -Forward (owl-base.Owl_algodiff_check.Make.Forward)

Module Make.Forward

val check : - threshold:float -> - f:(AD.t -> AD.t) -> - directions:AD.t array -> - AD.t array -> - bool * int
\ No newline at end of file diff --git a/owl-base/Owl_algodiff_check/Make/Reverse/index.html b/owl-base/Owl_algodiff_check/Make/Reverse/index.html deleted file mode 100644 index a18757390..000000000 --- a/owl-base/Owl_algodiff_check/Make/Reverse/index.html +++ /dev/null @@ -1,10 +0,0 @@ - -Reverse (owl-base.Owl_algodiff_check.Make.Reverse)

Module Make.Reverse

val check : - threshold:float -> - order:[ `second | `fourth | `eighth ] -> - ?verbose:bool -> - ?eps:float -> - f:(AD.t -> AD.t) -> - directions:AD.t array -> - AD.t array -> - bool * int
\ No newline at end of file diff --git a/owl-base/Owl_algodiff_check/Make/argument-1-AD/A/Linalg/index.html b/owl-base/Owl_algodiff_check/Make/argument-1-AD/A/Linalg/index.html deleted file mode 100644 index c2428b0f9..000000000 --- a/owl-base/Owl_algodiff_check/Make/argument-1-AD/A/Linalg/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -Linalg (owl-base.Owl_algodiff_check.Make.AD.A.Linalg)

Module A.Linalg

val inv : arr -> arr
val logdet : arr -> elt
val chol : ?upper:bool -> arr -> arr
val svd : ?thin:bool -> arr -> arr * arr * arr
val qr : arr -> arr * arr
val lq : arr -> arr * arr
val sylvester : arr -> arr -> arr -> arr
val lyapunov : arr -> arr -> arr
val discrete_lyapunov : - ?solver:[ `default | `bilinear | `direct ] -> - arr -> - arr -> - arr
val linsolve : ?trans:bool -> ?typ:[ `n | `u | `l ] -> arr -> arr -> arr
val care : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
val dare : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
\ No newline at end of file diff --git a/owl-base/Owl_algodiff_check/Make/argument-1-AD/A/Mat/index.html b/owl-base/Owl_algodiff_check/Make/argument-1-AD/A/Mat/index.html deleted file mode 100644 index 508343f61..000000000 --- a/owl-base/Owl_algodiff_check/Make/argument-1-AD/A/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl-base.Owl_algodiff_check.Make.AD.A.Mat)

Module A.Mat

val diagm : ?k:int -> arr -> arr
val triu : ?k:int -> arr -> arr
val tril : ?k:int -> arr -> arr
val eye : int -> arr
\ No newline at end of file diff --git a/owl-base/Owl_algodiff_check/Make/argument-1-AD/A/Scalar/index.html b/owl-base/Owl_algodiff_check/Make/argument-1-AD/A/Scalar/index.html deleted file mode 100644 index eedbddc1b..000000000 --- a/owl-base/Owl_algodiff_check/Make/argument-1-AD/A/Scalar/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Scalar (owl-base.Owl_algodiff_check.Make.AD.A.Scalar)

Module A.Scalar

val add : elt -> elt -> elt
val sub : elt -> elt -> elt
val mul : elt -> elt -> elt
val div : elt -> elt -> elt
val pow : elt -> elt -> elt
val atan2 : elt -> elt -> elt
val abs : elt -> elt
val neg : elt -> elt
val sqr : elt -> elt
val sqrt : elt -> elt
val exp : elt -> elt
val log : elt -> elt
val log2 : elt -> elt
val log10 : elt -> elt
val signum : elt -> elt
val floor : elt -> elt
val ceil : elt -> elt
val round : elt -> elt
val sin : elt -> elt
val cos : elt -> elt
val tan : elt -> elt
val sinh : elt -> elt
val cosh : elt -> elt
val tanh : elt -> elt
val asin : elt -> elt
val acos : elt -> elt
val atan : elt -> elt
val asinh : elt -> elt
val acosh : elt -> elt
val atanh : elt -> elt
val relu : elt -> elt
val dawsn : elt -> elt
val sigmoid : elt -> elt
\ No newline at end of file diff --git a/owl-base/Owl_algodiff_check/Make/argument-1-AD/A/index.html b/owl-base/Owl_algodiff_check/Make/argument-1-AD/A/index.html deleted file mode 100644 index 2155681f4..000000000 --- a/owl-base/Owl_algodiff_check/Make/argument-1-AD/A/index.html +++ /dev/null @@ -1,158 +0,0 @@ - -A (owl-base.Owl_algodiff_check.Make.AD.A)

Module AD.A

include Owl_types_ndarray_eltcmp.Sig
include Owl_types_ndarray_basic.Sig
type arr
type elt
val empty : int array -> arr
val zeros : int array -> arr
val ones : int array -> arr
val create : int array -> elt -> arr
val sequential : ?a:elt -> ?step:elt -> int array -> arr
val uniform : ?a:elt -> ?b:elt -> int array -> arr
val gaussian : ?mu:elt -> ?sigma:elt -> int array -> arr
val bernoulli : ?p:elt -> int array -> arr
val init : int array -> (int -> elt) -> arr
val init_nd : int array -> (int array -> elt) -> arr
val shape : arr -> int array
val numel : arr -> int
val get : arr -> int array -> elt
val set : arr -> int array -> elt -> unit
val get_slice : int list list -> arr -> arr
val set_slice : int list list -> arr -> arr -> unit
val get_fancy : Owl_types_common.index list -> arr -> arr
val set_fancy : Owl_types_common.index list -> arr -> arr -> unit
val copy : arr -> arr
val copy_ : out:arr -> arr -> unit
val reset : arr -> unit
val reshape : arr -> int array -> arr
val reverse : arr -> arr
val tile : arr -> int array -> arr
val repeat : arr -> int array -> arr
val concatenate : ?axis:int -> arr array -> arr
val stack : ?axis:int -> arr array -> arr
val split : ?axis:int -> int array -> arr -> arr array
val expand : ?hi:bool -> arr -> int -> arr
val squeeze : ?axis:int array -> arr -> arr
val draw : ?axis:int -> arr -> int -> arr * int array
val map : (elt -> elt) -> arr -> arr
val fold : ?axis:int -> (elt -> elt -> elt) -> elt -> arr -> arr
val scan : ?axis:int -> (elt -> elt -> elt) -> arr -> arr
val one_hot : int -> arr -> arr
val pad : ?v:elt -> int list list -> arr -> arr
val print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(elt -> string) -> - arr -> - unit
val abs : arr -> arr
val neg : arr -> arr
val floor : arr -> arr
val ceil : arr -> arr
val round : arr -> arr
val sqr : arr -> arr
val sqrt : arr -> arr
val log : arr -> arr
val log2 : arr -> arr
val log10 : arr -> arr
val exp : arr -> arr
val sin : arr -> arr
val cos : arr -> arr
val tan : arr -> arr
val sinh : arr -> arr
val cosh : arr -> arr
val tanh : arr -> arr
val asin : arr -> arr
val acos : arr -> arr
val atan : arr -> arr
val asinh : arr -> arr
val acosh : arr -> arr
val atanh : arr -> arr
val min : ?axis:int -> ?keep_dims:bool -> arr -> arr
val max : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum_reduce : ?axis:int array -> arr -> arr
val signum : arr -> arr
val sigmoid : arr -> arr
val relu : arr -> arr
val dawsn : arr -> arr
val min' : arr -> elt
val max' : arr -> elt
val sum' : arr -> elt
val log_sum_exp' : arr -> elt
val log_sum_exp : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm' : arr -> elt
val l2norm' : arr -> elt
val l2norm_sqr' : arr -> elt
val clip_by_value : ?amin:elt -> ?amax:elt -> arr -> arr
val clip_by_l2norm : elt -> arr -> arr
val pow : arr -> arr -> arr
val scalar_pow : elt -> arr -> arr
val pow_scalar : arr -> elt -> arr
val atan2 : arr -> arr -> arr
val scalar_atan2 : elt -> arr -> arr
val atan2_scalar : arr -> elt -> arr
val add : arr -> arr -> arr
val sub : arr -> arr -> arr
val mul : arr -> arr -> arr
val div : arr -> arr -> arr
val add_scalar : arr -> elt -> arr
val sub_scalar : arr -> elt -> arr
val mul_scalar : arr -> elt -> arr
val div_scalar : arr -> elt -> arr
val scalar_add : elt -> arr -> arr
val scalar_sub : elt -> arr -> arr
val scalar_mul : elt -> arr -> arr
val scalar_div : elt -> arr -> arr
val fma : arr -> arr -> arr -> arr
val conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val dilated_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val transpose_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val max_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val upsampling2d : arr -> int array -> arr
val conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val dilated_conv1d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv1d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val transpose_conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val max_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val upsampling2d_backward : arr -> int array -> arr -> arr
val row_num : arr -> int
val col_num : arr -> int
val row : arr -> int -> arr
val rows : arr -> int array -> arr
val copy_row_to : arr -> arr -> int -> unit
val copy_col_to : arr -> arr -> int -> unit
val dot : arr -> arr -> arr
val diag : ?k:int -> arr -> arr
val trace : arr -> elt
val transpose : ?axis:int array -> arr -> arr
val to_rows : arr -> arr array
val of_rows : arr array -> arr
val to_cols : arr -> arr array
val of_cols : arr array -> arr
val of_array : elt array -> int array -> arr
val of_arrays : elt array array -> arr
val float_to_elt : float -> elt
val elt_to_float : elt -> float
val elt_equal : arr -> arr -> arr
val elt_not_equal : arr -> arr -> arr
val elt_less : arr -> arr -> arr
val elt_greater : arr -> arr -> arr
val elt_less_equal : arr -> arr -> arr
val elt_greater_equal : arr -> arr -> arr
val elt_equal_scalar : arr -> elt -> arr
val elt_not_equal_scalar : arr -> elt -> arr
val elt_less_scalar : arr -> elt -> arr
val elt_greater_scalar : arr -> elt -> arr
val elt_less_equal_scalar : arr -> elt -> arr
val elt_greater_equal_scalar : arr -> elt -> arr
module Scalar : sig ... end
module Mat : sig ... end
module Linalg : sig ... end
\ No newline at end of file diff --git a/owl-base/Owl_algodiff_check/Make/argument-1-AD/Arr/index.html b/owl-base/Owl_algodiff_check/Make/argument-1-AD/Arr/index.html deleted file mode 100644 index 9401b46bf..000000000 --- a/owl-base/Owl_algodiff_check/Make/argument-1-AD/Arr/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Arr (owl-base.Owl_algodiff_check.Make.AD.Arr)

Module AD.Arr

val empty : int array -> t
val zeros : int array -> t
val ones : int array -> t
val uniform : ?a:A.elt -> ?b:A.elt -> int array -> t
val gaussian : ?mu:A.elt -> ?sigma:A.elt -> int array -> t
val shape : t -> int array
val numel : t -> int
val reset : t -> unit
val reshape : t -> int array -> t
val add : t -> t -> t
val sub : t -> t -> t
val mul : t -> t -> t
val div : t -> t -> t
val dot : t -> t -> t
\ No newline at end of file diff --git a/owl-base/Owl_algodiff_check/Make/argument-1-AD/Builder/index.html b/owl-base/Owl_algodiff_check/Make/argument-1-AD/Builder/index.html deleted file mode 100644 index 10a588b48..000000000 --- a/owl-base/Owl_algodiff_check/Make/argument-1-AD/Builder/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Builder (owl-base.Owl_algodiff_check.Make.AD.Builder)

Module AD.Builder

Ops Builder
module type Siso = sig ... end
val build_siso : (module Siso) -> t -> t

build single input single output operations

module type Sipo = sig ... end
val build_sipo : (module Sipo) -> t -> t * t

build single input pair outputs operations

module type Sito = sig ... end
val build_sito : (module Sito) -> t -> t * t * t

build single input triple outputs operations

module type Siao = sig ... end
val build_siao : (module Siao) -> t -> t array

build single input array output operations

module type Piso = sig ... end
val build_piso : (module Piso) -> t -> t -> t

build pair inputs single output operations

module type Aiso = sig ... end
val build_aiso : (module Aiso) -> t array -> t

build array input single output operations

\ No newline at end of file diff --git a/owl-base/Owl_algodiff_check/Make/argument-1-AD/Builder/module-type-Aiso/index.html b/owl-base/Owl_algodiff_check/Make/argument-1-AD/Builder/module-type-Aiso/index.html deleted file mode 100644 index be76e3c7a..000000000 --- a/owl-base/Owl_algodiff_check/Make/argument-1-AD/Builder/module-type-Aiso/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Aiso (owl-base.Owl_algodiff_check.Make.AD.Builder.Aiso)

Module type Builder.Aiso

val label : string
val ff : t array -> t
val df : int list -> t -> t array -> t array -> t
val dr : int list -> t array -> t -> t Stdlib.ref -> t list
\ No newline at end of file diff --git a/owl-base/Owl_algodiff_check/Make/argument-1-AD/Builder/module-type-Piso/index.html b/owl-base/Owl_algodiff_check/Make/argument-1-AD/Builder/module-type-Piso/index.html deleted file mode 100644 index e41963cd5..000000000 --- a/owl-base/Owl_algodiff_check/Make/argument-1-AD/Builder/module-type-Piso/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Piso (owl-base.Owl_algodiff_check.Make.AD.Builder.Piso)

Module type Builder.Piso

val label : string
val ff_aa : A.elt -> A.elt -> t
val ff_ab : A.elt -> A.arr -> t
val ff_ba : A.arr -> A.elt -> t
val ff_bb : A.arr -> A.arr -> t
val df_da : t -> t -> t -> t -> t
val df_db : t -> t -> t -> t -> t
val df_dab : t -> t -> t -> t -> t -> t
val dr_ab : t -> t -> t -> t Stdlib.ref -> t * t
val dr_a : t -> t -> t -> t Stdlib.ref -> t
val dr_b : t -> t -> t -> t Stdlib.ref -> t
\ No newline at end of file diff --git a/owl-base/Owl_algodiff_check/Make/argument-1-AD/Builder/module-type-Siao/index.html b/owl-base/Owl_algodiff_check/Make/argument-1-AD/Builder/module-type-Siao/index.html deleted file mode 100644 index 153a448d4..000000000 --- a/owl-base/Owl_algodiff_check/Make/argument-1-AD/Builder/module-type-Siao/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Siao (owl-base.Owl_algodiff_check.Make.AD.Builder.Siao)

Module type Builder.Siao

val label : string
val ff_f : A.elt -> t array
val ff_arr : A.arr -> t array
val df : t array -> t -> t -> t array
val dr : t -> t -> t Stdlib.ref array -> t Stdlib.ref array -> t
\ No newline at end of file diff --git a/owl-base/Owl_algodiff_check/Make/argument-1-AD/Builder/module-type-Sipo/index.html b/owl-base/Owl_algodiff_check/Make/argument-1-AD/Builder/module-type-Sipo/index.html deleted file mode 100644 index b495d93a1..000000000 --- a/owl-base/Owl_algodiff_check/Make/argument-1-AD/Builder/module-type-Sipo/index.html +++ /dev/null @@ -1,7 +0,0 @@ - -Sipo (owl-base.Owl_algodiff_check.Make.AD.Builder.Sipo)

Module type Builder.Sipo

val label : string
val ff_f : A.elt -> t * t
val ff_arr : A.arr -> t * t
val df : t -> t -> t -> t
val dr : - t -> - t -> - (t Stdlib.ref * t Stdlib.ref) -> - (t Stdlib.ref * t Stdlib.ref) -> - t
\ No newline at end of file diff --git a/owl-base/Owl_algodiff_check/Make/argument-1-AD/Builder/module-type-Siso/index.html b/owl-base/Owl_algodiff_check/Make/argument-1-AD/Builder/module-type-Siso/index.html deleted file mode 100644 index 5ac19edf7..000000000 --- a/owl-base/Owl_algodiff_check/Make/argument-1-AD/Builder/module-type-Siso/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Siso (owl-base.Owl_algodiff_check.Make.AD.Builder.Siso)

Module type Builder.Siso

val label : string
val ff_f : A.elt -> t
val ff_arr : A.arr -> t
val df : t -> t -> t -> t
val dr : t -> t -> t Stdlib.ref -> t
\ No newline at end of file diff --git a/owl-base/Owl_algodiff_check/Make/argument-1-AD/Builder/module-type-Sito/index.html b/owl-base/Owl_algodiff_check/Make/argument-1-AD/Builder/module-type-Sito/index.html deleted file mode 100644 index bed87a578..000000000 --- a/owl-base/Owl_algodiff_check/Make/argument-1-AD/Builder/module-type-Sito/index.html +++ /dev/null @@ -1,7 +0,0 @@ - -Sito (owl-base.Owl_algodiff_check.Make.AD.Builder.Sito)

Module type Builder.Sito

val label : string
val ff_f : A.elt -> t * t * t
val ff_arr : A.arr -> t * t * t
val df : t -> t -> t -> t
val dr : - t -> - t -> - (t Stdlib.ref * t Stdlib.ref * t Stdlib.ref) -> - (t Stdlib.ref * t Stdlib.ref * t Stdlib.ref) -> - t
\ No newline at end of file diff --git a/owl-base/Owl_algodiff_check/Make/argument-1-AD/Linalg/index.html b/owl-base/Owl_algodiff_check/Make/argument-1-AD/Linalg/index.html deleted file mode 100644 index 613ca524c..000000000 --- a/owl-base/Owl_algodiff_check/Make/argument-1-AD/Linalg/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -Linalg (owl-base.Owl_algodiff_check.Make.AD.Linalg)

Module AD.Linalg

val inv : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val logdet : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val chol : ?upper:bool -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val qr : t -> t * t

Refer to :doc:`owl_dense_ndarray_generic`

val lq : t -> t * t

Refer to :doc:`owl_dense_ndarray_generic`

val svd : ?thin:bool -> t -> t * t * t

Refer to :doc:`owl_dense_ndarray_generic`

val sylvester : t -> t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val lyapunov : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val discrete_lyapunov : - ?solver:[ `default | `bilinear | `direct ] -> - t -> - t -> - t

Refer to :doc:`owl_dense_ndarray_generic`

val (/@) : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val linsolve : ?trans:bool -> ?typ:[ `n | `u | `l ] -> t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val care : ?diag_r:bool -> t -> t -> t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val dare : ?diag_r:bool -> t -> t -> t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

\ No newline at end of file diff --git a/owl-base/Owl_algodiff_check/Make/argument-1-AD/Mat/index.html b/owl-base/Owl_algodiff_check/Make/argument-1-AD/Mat/index.html deleted file mode 100644 index c105ea5fb..000000000 --- a/owl-base/Owl_algodiff_check/Make/argument-1-AD/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl-base.Owl_algodiff_check.Make.AD.Mat)

Module AD.Mat

val empty : int -> int -> t
val zeros : int -> int -> t
val eye : int -> t
val ones : int -> int -> t
val uniform : ?a:A.elt -> ?b:A.elt -> int -> int -> t
val gaussian : ?mu:A.elt -> ?sigma:A.elt -> int -> int -> t
val shape : t -> int * int
val numel : t -> int
val row_num : t -> int
val col_num : t -> int
val reset : t -> unit
val reshape : int -> int -> t -> t
val get : t -> int -> int -> t
val set : t -> int -> int -> t -> t
val row : t -> int -> t
val mean : t -> t
val add : t -> t -> t
val sub : t -> t -> t
val mul : t -> t -> t
val div : t -> t -> t
val dot : t -> t -> t
val map_by_row : (t -> t) -> t -> t
val of_arrays : A.elt array array -> t
val init_2d : int -> int -> (int -> int -> t) -> t
val print : t -> unit
\ No newline at end of file diff --git a/owl-base/Owl_algodiff_check/Make/argument-1-AD/Maths/index.html b/owl-base/Owl_algodiff_check/Make/argument-1-AD/Maths/index.html deleted file mode 100644 index e5a89392e..000000000 --- a/owl-base/Owl_algodiff_check/Make/argument-1-AD/Maths/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Maths (owl-base.Owl_algodiff_check.Make.AD.Maths)

Module AD.Maths

val (+) : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val (-) : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val (*) : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val (/) : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val (*@) : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val (**) : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val add : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val sub : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val mul : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val div : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val kron : t -> t -> t

Refer to :doc:`owl_dense_matrix_generic`

val dot : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val pow : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val atan2 : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val min2 : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val max2 : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val cross_entropy : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val inv : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val neg : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val abs : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val signum : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val floor : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val ceil : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val round : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val sqr : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val sqrt : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val log : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val log2 : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val log10 : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val exp : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val sin : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val cos : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val tan : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val sinh : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val cosh : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val tanh : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val asin : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val acos : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val atan : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val asinh : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val acosh : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val atanh : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val sum' : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val log_sum_exp' : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val log_sum_exp : ?axis:int -> ?keep_dims:bool -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val sum : ?axis:int -> ?keep_dims:bool -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val sum_reduce : ?axis:int array -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val mean : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val transpose : ?axis:int array -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val swap : int -> int -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val l1norm' : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val l2norm' : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val l2norm_sqr' : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val sigmoid : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val relu : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val dawsn : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val softplus : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val softsign : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val softmax : ?axis:int -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val reshape : t -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val flatten : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val get_item : t -> int -> int -> t

Refer to :doc:`owl_dense_ndarray_generic`

val get_row : t -> int -> t

Refer to :doc:`owl_dense_ndarray_generic`

val concat : axis:int -> t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val split : axis:int -> int array -> t -> t array

Refer to :doc:`owl_dense_ndarray_generic`

val of_arrays : t array array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val to_arrays : t -> t array array

Refer to :doc:`owl_dense_ndarray_generic`

val concatenate : axis:int -> t array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val stack : axis:int -> t array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val get_slice : int list list -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val set_slice : int list list -> t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val get_fancy : Owl_types.index list -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val set_fancy : Owl_types.index list -> t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val diag : ?k:int -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val diagm : ?k:int -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val trace : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val triu : ?k:int -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val tril : ?k:int -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

\ No newline at end of file diff --git a/owl-base/Owl_algodiff_check/Make/argument-1-AD/NN/index.html b/owl-base/Owl_algodiff_check/Make/argument-1-AD/NN/index.html deleted file mode 100644 index 75ebdf794..000000000 --- a/owl-base/Owl_algodiff_check/Make/argument-1-AD/NN/index.html +++ /dev/null @@ -1,20 +0,0 @@ - -NN (owl-base.Owl_algodiff_check.Make.AD.NN)

Module AD.NN

val dropout : ?rate:float -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val conv1d : ?padding:Owl_types.padding -> t -> t -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val conv2d : ?padding:Owl_types.padding -> t -> t -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val conv3d : ?padding:Owl_types.padding -> t -> t -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val dilated_conv1d : - ?padding:Owl_types.padding -> - t -> - t -> - int array -> - int array -> - t

Refer to :doc:`owl_dense_ndarray_generic`

val dilated_conv2d : - ?padding:Owl_types.padding -> - t -> - t -> - int array -> - int array -> - t

Refer to :doc:`owl_dense_ndarray_generic`

val dilated_conv3d : - ?padding:Owl_types.padding -> - t -> - t -> - int array -> - int array -> - t

Refer to :doc:`owl_dense_ndarray_generic`

val transpose_conv1d : ?padding:Owl_types.padding -> t -> t -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val transpose_conv2d : ?padding:Owl_types.padding -> t -> t -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val transpose_conv3d : ?padding:Owl_types.padding -> t -> t -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val max_pool1d : Owl_types.padding -> t -> int array -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val max_pool2d : Owl_types.padding -> t -> int array -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val max_pool3d : Owl_types.padding -> t -> int array -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val avg_pool1d : Owl_types.padding -> t -> int array -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val avg_pool2d : Owl_types.padding -> t -> int array -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val avg_pool3d : Owl_types.padding -> t -> int array -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val upsampling2d : t -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val pad : ?v:A.elt -> int list list -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

\ No newline at end of file diff --git a/owl-base/Owl_algodiff_check/Make/argument-1-AD/index.html b/owl-base/Owl_algodiff_check/Make/argument-1-AD/index.html deleted file mode 100644 index f7adbfccd..000000000 --- a/owl-base/Owl_algodiff_check/Make/argument-1-AD/index.html +++ /dev/null @@ -1,11 +0,0 @@ - -AD (owl-base.Owl_algodiff_check.Make.AD)

Parameter Make.AD

include Owl_algodiff_core_sig.Sig
Type definition
include Owl_algodiff_types_sig.Sig with type elt := A.elt and type arr := A.arr
type t =
  1. | F of A.elt
  2. | Arr of A.arr
  3. | DF of t * t * int
  4. | DR of t * t Stdlib.ref * op * int Stdlib.ref * int * int Stdlib.ref
and adjoint = t -> t Stdlib.ref -> (t * t) list -> (t * t) list
and register = t list -> t list
and label = string * t list
and op = adjoint * register * label
Core functions
val tag : unit -> int

TODO

val primal : t -> t

TODO

val primal' : t -> t

TODO

val zero : t -> t

TODO

val reset_zero : t -> t

TODO

val tangent : t -> t

TODO

val adjref : t -> t Stdlib.ref

TODO

val adjval : t -> t

TODO

val shape : t -> int array

TODO

val is_float : t -> bool

TODO

val is_arr : t -> bool

TODO

val row_num : t -> int

number of rows

val col_num : t -> int

number of columns

val numel : t -> int

number of elements

val clip_by_value : amin:A.elt -> amax:A.elt -> t -> t

other functions, without tracking gradient

val clip_by_l2norm : A.elt -> t -> t

other functions, without tracking gradient

val copy_primal' : t -> t

TODO

val tile : t -> int array -> t

TODO

val repeat : t -> int array -> t

TODO

val pack_elt : A.elt -> t

convert from elt type to t type.

val unpack_elt : t -> A.elt

convert from t type to elt type.

val pack_flt : float -> t

convert from float type to t type.

val _f : float -> t

A shortcut function for F A.(float_to_elt x).

val unpack_flt : t -> float

convert from t type to float type.

val pack_arr : A.arr -> t

convert from arr type to t type.

val unpack_arr : t -> A.arr

convert from t type to arr type.

val deep_info : t -> string

TODO

val type_info : t -> string

TODO

val error_binop : string -> t -> t -> 'a

TODO

val error_uniop : string -> t -> 'a

TODO

val make_forward : t -> t -> int -> t

TODO

val make_reverse : t -> int -> t

TODO

val reverse_prop : t -> t -> unit

TODO

val diff : (t -> t) -> t -> t

diff f x returns the exat derivative of a function f : scalar -> scalar at point x. Simply calling diff f will return its derivative function g of the same type, i.e. g : scalar -> scalar.

Keep calling this function will give you higher-order derivatives of f, i.e. f |> diff |> diff |> diff |> ...

val diff' : (t -> t) -> t -> t * t

similar to diff, but return (f x, diff f x).

val grad : (t -> t) -> t -> t

gradient of f : (vector -> scalar) at x, reverse ad.

val grad' : (t -> t) -> t -> t * t

similar to grad, but return (f x, grad f x).

val jacobian : (t -> t) -> t -> t

jacobian of f : (vector -> vector) at x, both x and y are row vectors.

val jacobian' : (t -> t) -> t -> t * t

similar to jacobian, but return (f x, jacobian f x)

val jacobianv : (t -> t) -> t -> t -> t

jacobian vector product of f : (vector -> vector) at x along v, forward ad. Namely, it calcultes (jacobian x) v

val jacobianv' : (t -> t) -> t -> t -> t * t

similar to jacobianv', but return (f x, jacobianv f x v)

val jacobianTv : (t -> t) -> t -> t -> t

transposed jacobian vector product of f : (vector -> vector) at x along v, backward ad. Namely, it calculates transpose ((jacobianv f x v)).

val jacobianTv' : (t -> t) -> t -> t -> t * t

similar to jacobianTv, but return (f x, transpose (jacobianv f x v))

val hessian : (t -> t) -> t -> t

hessian of f : (scalar -> scalar) at x.

val hessian' : (t -> t) -> t -> t * t

simiarl to hessian, but return (f x, hessian f x)

val hessianv : (t -> t) -> t -> t -> t

hessian vector product of f : (scalar -> scalar) at x along v. Namely, it calculates (hessian x) v.

val hessianv' : (t -> t) -> t -> t -> t * t

similar to hessianv, but return (f x, hessianv f x v).

val laplacian : (t -> t) -> t -> t

laplacian of f : (scalar -> scalar) at x.

val laplacian' : (t -> t) -> t -> t * t

similar to laplacian, but return (f x, laplacian f x).

val gradhessian : (t -> t) -> t -> t * t

return (grad f x, hessian f x), f : (scalar -> scalar)

val gradhessian' : (t -> t) -> t -> t * t * t

return (f x, grad f x, hessian f x)

val gradhessianv : (t -> t) -> t -> t -> t * t

return (grad f x v, hessian f x v)

val gradhessianv' : (t -> t) -> t -> t -> t * t * t

return (f x, grad f x v, hessian f x v)

include Owl_algodiff_ops_sig.Sig - with type t := t - and type elt := A.elt - and type arr := A.arr - and type op := op
module Builder : - Owl_algodiff_ops_builder_sig.Sig - with type t := t - and type elt := A.elt - and type arr := A.arr - and type op := op
Supported Maths functions
module Maths : sig ... end
Supported Linalg functions
module Linalg : sig ... end
Supported Neural Network functions
module NN : sig ... end
Supported Mat functions
module Mat : sig ... end
Supported Arr functions
module Arr : sig ... end
Helper functions
include Owl_algodiff_graph_convert_sig.Sig with type t := t
val to_trace : t list -> string

to_trace [t0; t1; ...] outputs the trace of computation graph on the terminal in a human-readable format.

val to_dot : t list -> string

to_dot [t0; t1; ...] outputs the trace of computation graph in the dot file format which you can use other tools further visualisation, such as Graphviz.

val pp_num : Stdlib.Format.formatter -> t -> unit

pp_num t pretty prints the abstract number used in Algodiff.

\ No newline at end of file diff --git a/owl-base/Owl_algodiff_check/Make/index.html b/owl-base/Owl_algodiff_check/Make/index.html deleted file mode 100644 index b1163cfd1..000000000 --- a/owl-base/Owl_algodiff_check/Make/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Make (owl-base.Owl_algodiff_check.Make)

Module Owl_algodiff_check.Make

Parameters

Signature

val generate_test_samples : (int * int) -> int -> AD.t array * AD.t array
module Reverse : sig ... end
module Forward : sig ... end
\ No newline at end of file diff --git a/owl-base/Owl_algodiff_core/.dummy b/owl-base/Owl_algodiff_core/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl-base/Owl_algodiff_core/Make/A/Linalg/index.html b/owl-base/Owl_algodiff_core/Make/A/Linalg/index.html deleted file mode 100644 index 10704de9e..000000000 --- a/owl-base/Owl_algodiff_core/Make/A/Linalg/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -Linalg (owl-base.Owl_algodiff_core.Make.A.Linalg)

Module A.Linalg

val inv : arr -> arr
val logdet : arr -> elt
val chol : ?upper:bool -> arr -> arr
val svd : ?thin:bool -> arr -> arr * arr * arr
val qr : arr -> arr * arr
val lq : arr -> arr * arr
val sylvester : arr -> arr -> arr -> arr
val lyapunov : arr -> arr -> arr
val discrete_lyapunov : - ?solver:[ `default | `bilinear | `direct ] -> - arr -> - arr -> - arr
val linsolve : ?trans:bool -> ?typ:[ `n | `u | `l ] -> arr -> arr -> arr
val care : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
val dare : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
\ No newline at end of file diff --git a/owl-base/Owl_algodiff_core/Make/A/Mat/index.html b/owl-base/Owl_algodiff_core/Make/A/Mat/index.html deleted file mode 100644 index 2e8eed362..000000000 --- a/owl-base/Owl_algodiff_core/Make/A/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl-base.Owl_algodiff_core.Make.A.Mat)

Module A.Mat

val diagm : ?k:int -> arr -> arr
val triu : ?k:int -> arr -> arr
val tril : ?k:int -> arr -> arr
val eye : int -> arr
\ No newline at end of file diff --git a/owl-base/Owl_algodiff_core/Make/A/Scalar/index.html b/owl-base/Owl_algodiff_core/Make/A/Scalar/index.html deleted file mode 100644 index 0d728dc9b..000000000 --- a/owl-base/Owl_algodiff_core/Make/A/Scalar/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Scalar (owl-base.Owl_algodiff_core.Make.A.Scalar)

Module A.Scalar

val add : elt -> elt -> elt
val sub : elt -> elt -> elt
val mul : elt -> elt -> elt
val div : elt -> elt -> elt
val pow : elt -> elt -> elt
val atan2 : elt -> elt -> elt
val abs : elt -> elt
val neg : elt -> elt
val sqr : elt -> elt
val sqrt : elt -> elt
val exp : elt -> elt
val log : elt -> elt
val log2 : elt -> elt
val log10 : elt -> elt
val signum : elt -> elt
val floor : elt -> elt
val ceil : elt -> elt
val round : elt -> elt
val sin : elt -> elt
val cos : elt -> elt
val tan : elt -> elt
val sinh : elt -> elt
val cosh : elt -> elt
val tanh : elt -> elt
val asin : elt -> elt
val acos : elt -> elt
val atan : elt -> elt
val asinh : elt -> elt
val acosh : elt -> elt
val atanh : elt -> elt
val relu : elt -> elt
val dawsn : elt -> elt
val sigmoid : elt -> elt
\ No newline at end of file diff --git a/owl-base/Owl_algodiff_core/Make/A/index.html b/owl-base/Owl_algodiff_core/Make/A/index.html deleted file mode 100644 index 1cbd93c25..000000000 --- a/owl-base/Owl_algodiff_core/Make/A/index.html +++ /dev/null @@ -1,160 +0,0 @@ - -A (owl-base.Owl_algodiff_core.Make.A)

Module Make.A

include Owl_types_ndarray_eltcmp.Sig - with type arr = A.arr - with type elt = A.elt
include Owl_types_ndarray_basic.Sig with type arr = A.arr with type elt = A.elt
type arr = A.arr
type elt = A.elt
val empty : int array -> arr
val zeros : int array -> arr
val ones : int array -> arr
val create : int array -> elt -> arr
val sequential : ?a:elt -> ?step:elt -> int array -> arr
val uniform : ?a:elt -> ?b:elt -> int array -> arr
val gaussian : ?mu:elt -> ?sigma:elt -> int array -> arr
val bernoulli : ?p:elt -> int array -> arr
val init : int array -> (int -> elt) -> arr
val init_nd : int array -> (int array -> elt) -> arr
val shape : arr -> int array
val numel : arr -> int
val get : arr -> int array -> elt
val set : arr -> int array -> elt -> unit
val get_slice : int list list -> arr -> arr
val set_slice : int list list -> arr -> arr -> unit
val get_fancy : Owl_types_common.index list -> arr -> arr
val set_fancy : Owl_types_common.index list -> arr -> arr -> unit
val copy : arr -> arr
val copy_ : out:arr -> arr -> unit
val reset : arr -> unit
val reshape : arr -> int array -> arr
val reverse : arr -> arr
val tile : arr -> int array -> arr
val repeat : arr -> int array -> arr
val concatenate : ?axis:int -> arr array -> arr
val stack : ?axis:int -> arr array -> arr
val split : ?axis:int -> int array -> arr -> arr array
val expand : ?hi:bool -> arr -> int -> arr
val squeeze : ?axis:int array -> arr -> arr
val draw : ?axis:int -> arr -> int -> arr * int array
val map : (elt -> elt) -> arr -> arr
val fold : ?axis:int -> (elt -> elt -> elt) -> elt -> arr -> arr
val scan : ?axis:int -> (elt -> elt -> elt) -> arr -> arr
val one_hot : int -> arr -> arr
val pad : ?v:elt -> int list list -> arr -> arr
val print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(elt -> string) -> - arr -> - unit
val abs : arr -> arr
val neg : arr -> arr
val floor : arr -> arr
val ceil : arr -> arr
val round : arr -> arr
val sqr : arr -> arr
val sqrt : arr -> arr
val log : arr -> arr
val log2 : arr -> arr
val log10 : arr -> arr
val exp : arr -> arr
val sin : arr -> arr
val cos : arr -> arr
val tan : arr -> arr
val sinh : arr -> arr
val cosh : arr -> arr
val tanh : arr -> arr
val asin : arr -> arr
val acos : arr -> arr
val atan : arr -> arr
val asinh : arr -> arr
val acosh : arr -> arr
val atanh : arr -> arr
val min : ?axis:int -> ?keep_dims:bool -> arr -> arr
val max : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum_reduce : ?axis:int array -> arr -> arr
val signum : arr -> arr
val sigmoid : arr -> arr
val relu : arr -> arr
val dawsn : arr -> arr
val min' : arr -> elt
val max' : arr -> elt
val sum' : arr -> elt
val log_sum_exp' : arr -> elt
val log_sum_exp : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm' : arr -> elt
val l2norm' : arr -> elt
val l2norm_sqr' : arr -> elt
val clip_by_value : ?amin:elt -> ?amax:elt -> arr -> arr
val clip_by_l2norm : elt -> arr -> arr
val pow : arr -> arr -> arr
val scalar_pow : elt -> arr -> arr
val pow_scalar : arr -> elt -> arr
val atan2 : arr -> arr -> arr
val scalar_atan2 : elt -> arr -> arr
val atan2_scalar : arr -> elt -> arr
val add : arr -> arr -> arr
val sub : arr -> arr -> arr
val mul : arr -> arr -> arr
val div : arr -> arr -> arr
val add_scalar : arr -> elt -> arr
val sub_scalar : arr -> elt -> arr
val mul_scalar : arr -> elt -> arr
val div_scalar : arr -> elt -> arr
val scalar_add : elt -> arr -> arr
val scalar_sub : elt -> arr -> arr
val scalar_mul : elt -> arr -> arr
val scalar_div : elt -> arr -> arr
val fma : arr -> arr -> arr -> arr
val conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val dilated_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val transpose_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val max_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val upsampling2d : arr -> int array -> arr
val conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val dilated_conv1d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv1d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val transpose_conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val max_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val upsampling2d_backward : arr -> int array -> arr -> arr
val row_num : arr -> int
val col_num : arr -> int
val row : arr -> int -> arr
val rows : arr -> int array -> arr
val copy_row_to : arr -> arr -> int -> unit
val copy_col_to : arr -> arr -> int -> unit
val dot : arr -> arr -> arr
val diag : ?k:int -> arr -> arr
val trace : arr -> elt
val transpose : ?axis:int array -> arr -> arr
val to_rows : arr -> arr array
val of_rows : arr array -> arr
val to_cols : arr -> arr array
val of_cols : arr array -> arr
val of_array : elt array -> int array -> arr
val of_arrays : elt array array -> arr
val float_to_elt : float -> elt
val elt_to_float : elt -> float
val elt_equal : arr -> arr -> arr
val elt_not_equal : arr -> arr -> arr
val elt_less : arr -> arr -> arr
val elt_greater : arr -> arr -> arr
val elt_less_equal : arr -> arr -> arr
val elt_greater_equal : arr -> arr -> arr
val elt_equal_scalar : arr -> elt -> arr
val elt_not_equal_scalar : arr -> elt -> arr
val elt_less_scalar : arr -> elt -> arr
val elt_greater_scalar : arr -> elt -> arr
val elt_less_equal_scalar : arr -> elt -> arr
val elt_greater_equal_scalar : arr -> elt -> arr
module Scalar : sig ... end
module Mat : sig ... end
module Linalg : sig ... end
\ No newline at end of file diff --git a/owl-base/Owl_algodiff_core/Make/argument-1-A/Linalg/index.html b/owl-base/Owl_algodiff_core/Make/argument-1-A/Linalg/index.html deleted file mode 100644 index 10704de9e..000000000 --- a/owl-base/Owl_algodiff_core/Make/argument-1-A/Linalg/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -Linalg (owl-base.Owl_algodiff_core.Make.A.Linalg)

Module A.Linalg

val inv : arr -> arr
val logdet : arr -> elt
val chol : ?upper:bool -> arr -> arr
val svd : ?thin:bool -> arr -> arr * arr * arr
val qr : arr -> arr * arr
val lq : arr -> arr * arr
val sylvester : arr -> arr -> arr -> arr
val lyapunov : arr -> arr -> arr
val discrete_lyapunov : - ?solver:[ `default | `bilinear | `direct ] -> - arr -> - arr -> - arr
val linsolve : ?trans:bool -> ?typ:[ `n | `u | `l ] -> arr -> arr -> arr
val care : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
val dare : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
\ No newline at end of file diff --git a/owl-base/Owl_algodiff_core/Make/argument-1-A/Mat/index.html b/owl-base/Owl_algodiff_core/Make/argument-1-A/Mat/index.html deleted file mode 100644 index 2e8eed362..000000000 --- a/owl-base/Owl_algodiff_core/Make/argument-1-A/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl-base.Owl_algodiff_core.Make.A.Mat)

Module A.Mat

val diagm : ?k:int -> arr -> arr
val triu : ?k:int -> arr -> arr
val tril : ?k:int -> arr -> arr
val eye : int -> arr
\ No newline at end of file diff --git a/owl-base/Owl_algodiff_core/Make/argument-1-A/Scalar/index.html b/owl-base/Owl_algodiff_core/Make/argument-1-A/Scalar/index.html deleted file mode 100644 index 0d728dc9b..000000000 --- a/owl-base/Owl_algodiff_core/Make/argument-1-A/Scalar/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Scalar (owl-base.Owl_algodiff_core.Make.A.Scalar)

Module A.Scalar

val add : elt -> elt -> elt
val sub : elt -> elt -> elt
val mul : elt -> elt -> elt
val div : elt -> elt -> elt
val pow : elt -> elt -> elt
val atan2 : elt -> elt -> elt
val abs : elt -> elt
val neg : elt -> elt
val sqr : elt -> elt
val sqrt : elt -> elt
val exp : elt -> elt
val log : elt -> elt
val log2 : elt -> elt
val log10 : elt -> elt
val signum : elt -> elt
val floor : elt -> elt
val ceil : elt -> elt
val round : elt -> elt
val sin : elt -> elt
val cos : elt -> elt
val tan : elt -> elt
val sinh : elt -> elt
val cosh : elt -> elt
val tanh : elt -> elt
val asin : elt -> elt
val acos : elt -> elt
val atan : elt -> elt
val asinh : elt -> elt
val acosh : elt -> elt
val atanh : elt -> elt
val relu : elt -> elt
val dawsn : elt -> elt
val sigmoid : elt -> elt
\ No newline at end of file diff --git a/owl-base/Owl_algodiff_core/Make/argument-1-A/index.html b/owl-base/Owl_algodiff_core/Make/argument-1-A/index.html deleted file mode 100644 index 7829e15fa..000000000 --- a/owl-base/Owl_algodiff_core/Make/argument-1-A/index.html +++ /dev/null @@ -1,158 +0,0 @@ - -A (owl-base.Owl_algodiff_core.Make.A)

Parameter Make.A

include Owl_types_ndarray_eltcmp.Sig
include Owl_types_ndarray_basic.Sig
type arr
type elt
val empty : int array -> arr
val zeros : int array -> arr
val ones : int array -> arr
val create : int array -> elt -> arr
val sequential : ?a:elt -> ?step:elt -> int array -> arr
val uniform : ?a:elt -> ?b:elt -> int array -> arr
val gaussian : ?mu:elt -> ?sigma:elt -> int array -> arr
val bernoulli : ?p:elt -> int array -> arr
val init : int array -> (int -> elt) -> arr
val init_nd : int array -> (int array -> elt) -> arr
val shape : arr -> int array
val numel : arr -> int
val get : arr -> int array -> elt
val set : arr -> int array -> elt -> unit
val get_slice : int list list -> arr -> arr
val set_slice : int list list -> arr -> arr -> unit
val get_fancy : Owl_types_common.index list -> arr -> arr
val set_fancy : Owl_types_common.index list -> arr -> arr -> unit
val copy : arr -> arr
val copy_ : out:arr -> arr -> unit
val reset : arr -> unit
val reshape : arr -> int array -> arr
val reverse : arr -> arr
val tile : arr -> int array -> arr
val repeat : arr -> int array -> arr
val concatenate : ?axis:int -> arr array -> arr
val stack : ?axis:int -> arr array -> arr
val split : ?axis:int -> int array -> arr -> arr array
val expand : ?hi:bool -> arr -> int -> arr
val squeeze : ?axis:int array -> arr -> arr
val draw : ?axis:int -> arr -> int -> arr * int array
val map : (elt -> elt) -> arr -> arr
val fold : ?axis:int -> (elt -> elt -> elt) -> elt -> arr -> arr
val scan : ?axis:int -> (elt -> elt -> elt) -> arr -> arr
val one_hot : int -> arr -> arr
val pad : ?v:elt -> int list list -> arr -> arr
val print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(elt -> string) -> - arr -> - unit
val abs : arr -> arr
val neg : arr -> arr
val floor : arr -> arr
val ceil : arr -> arr
val round : arr -> arr
val sqr : arr -> arr
val sqrt : arr -> arr
val log : arr -> arr
val log2 : arr -> arr
val log10 : arr -> arr
val exp : arr -> arr
val sin : arr -> arr
val cos : arr -> arr
val tan : arr -> arr
val sinh : arr -> arr
val cosh : arr -> arr
val tanh : arr -> arr
val asin : arr -> arr
val acos : arr -> arr
val atan : arr -> arr
val asinh : arr -> arr
val acosh : arr -> arr
val atanh : arr -> arr
val min : ?axis:int -> ?keep_dims:bool -> arr -> arr
val max : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum_reduce : ?axis:int array -> arr -> arr
val signum : arr -> arr
val sigmoid : arr -> arr
val relu : arr -> arr
val dawsn : arr -> arr
val min' : arr -> elt
val max' : arr -> elt
val sum' : arr -> elt
val log_sum_exp' : arr -> elt
val log_sum_exp : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm' : arr -> elt
val l2norm' : arr -> elt
val l2norm_sqr' : arr -> elt
val clip_by_value : ?amin:elt -> ?amax:elt -> arr -> arr
val clip_by_l2norm : elt -> arr -> arr
val pow : arr -> arr -> arr
val scalar_pow : elt -> arr -> arr
val pow_scalar : arr -> elt -> arr
val atan2 : arr -> arr -> arr
val scalar_atan2 : elt -> arr -> arr
val atan2_scalar : arr -> elt -> arr
val add : arr -> arr -> arr
val sub : arr -> arr -> arr
val mul : arr -> arr -> arr
val div : arr -> arr -> arr
val add_scalar : arr -> elt -> arr
val sub_scalar : arr -> elt -> arr
val mul_scalar : arr -> elt -> arr
val div_scalar : arr -> elt -> arr
val scalar_add : elt -> arr -> arr
val scalar_sub : elt -> arr -> arr
val scalar_mul : elt -> arr -> arr
val scalar_div : elt -> arr -> arr
val fma : arr -> arr -> arr -> arr
val conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val dilated_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val transpose_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val max_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val upsampling2d : arr -> int array -> arr
val conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val dilated_conv1d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv1d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val transpose_conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val max_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val upsampling2d_backward : arr -> int array -> arr -> arr
val row_num : arr -> int
val col_num : arr -> int
val row : arr -> int -> arr
val rows : arr -> int array -> arr
val copy_row_to : arr -> arr -> int -> unit
val copy_col_to : arr -> arr -> int -> unit
val dot : arr -> arr -> arr
val diag : ?k:int -> arr -> arr
val trace : arr -> elt
val transpose : ?axis:int array -> arr -> arr
val to_rows : arr -> arr array
val of_rows : arr array -> arr
val to_cols : arr -> arr array
val of_cols : arr array -> arr
val of_array : elt array -> int array -> arr
val of_arrays : elt array array -> arr
val float_to_elt : float -> elt
val elt_to_float : elt -> float
val elt_equal : arr -> arr -> arr
val elt_not_equal : arr -> arr -> arr
val elt_less : arr -> arr -> arr
val elt_greater : arr -> arr -> arr
val elt_less_equal : arr -> arr -> arr
val elt_greater_equal : arr -> arr -> arr
val elt_equal_scalar : arr -> elt -> arr
val elt_not_equal_scalar : arr -> elt -> arr
val elt_less_scalar : arr -> elt -> arr
val elt_greater_scalar : arr -> elt -> arr
val elt_less_equal_scalar : arr -> elt -> arr
val elt_greater_equal_scalar : arr -> elt -> arr
module Scalar : sig ... end
module Mat : sig ... end
module Linalg : sig ... end
\ No newline at end of file diff --git a/owl-base/Owl_algodiff_core/Make/index.html b/owl-base/Owl_algodiff_core/Make/index.html deleted file mode 100644 index 37085ca09..000000000 --- a/owl-base/Owl_algodiff_core/Make/index.html +++ /dev/null @@ -1,3 +0,0 @@ - -Make (owl-base.Owl_algodiff_core.Make)

Module Owl_algodiff_core.Make

Parameters

Signature

module A : - Owl_types_ndarray_algodiff.Sig with type arr = A.arr with type elt = A.elt
Type definition
include Owl_algodiff_types_sig.Sig with type elt := A.elt and type arr := A.arr
type t =
  1. | F of A.elt
  2. | Arr of A.arr
  3. | DF of t * t * int
  4. | DR of t * t Stdlib.ref * op * int Stdlib.ref * int * int Stdlib.ref
and adjoint = t -> t Stdlib.ref -> (t * t) list -> (t * t) list
and register = t list -> t list
and label = string * t list
and op = adjoint * register * label
Core functions
val tag : unit -> int

TODO

val primal : t -> t

TODO

val primal' : t -> t

TODO

val zero : t -> t

TODO

val reset_zero : t -> t

TODO

val tangent : t -> t

TODO

val adjref : t -> t Stdlib.ref

TODO

val adjval : t -> t

TODO

val shape : t -> int array

TODO

val is_float : t -> bool

TODO

val is_arr : t -> bool

TODO

val row_num : t -> int

number of rows

val col_num : t -> int

number of columns

val numel : t -> int

number of elements

val clip_by_value : amin:A.elt -> amax:A.elt -> t -> t

other functions, without tracking gradient

val clip_by_l2norm : A.elt -> t -> t

other functions, without tracking gradient

val copy_primal' : t -> t

TODO

val tile : t -> int array -> t

TODO

val repeat : t -> int array -> t

TODO

val pack_elt : A.elt -> t

convert from elt type to t type.

val unpack_elt : t -> A.elt

convert from t type to elt type.

val pack_flt : float -> t

convert from float type to t type.

val _f : float -> t

A shortcut function for F A.(float_to_elt x).

val unpack_flt : t -> float

convert from t type to float type.

val pack_arr : A.arr -> t

convert from arr type to t type.

val unpack_arr : t -> A.arr

convert from t type to arr type.

val deep_info : t -> string

TODO

val type_info : t -> string

TODO

val error_binop : string -> t -> t -> 'a

TODO

val error_uniop : string -> t -> 'a

TODO

\ No newline at end of file diff --git a/owl-base/Owl_algodiff_core_sig/.dummy b/owl-base/Owl_algodiff_core_sig/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl-base/Owl_algodiff_core_sig/module-type-Sig/A/Linalg/index.html b/owl-base/Owl_algodiff_core_sig/module-type-Sig/A/Linalg/index.html deleted file mode 100644 index 8d9fe56f4..000000000 --- a/owl-base/Owl_algodiff_core_sig/module-type-Sig/A/Linalg/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -Linalg (owl-base.Owl_algodiff_core_sig.Sig.A.Linalg)

Module A.Linalg

val inv : arr -> arr
val logdet : arr -> elt
val chol : ?upper:bool -> arr -> arr
val svd : ?thin:bool -> arr -> arr * arr * arr
val qr : arr -> arr * arr
val lq : arr -> arr * arr
val sylvester : arr -> arr -> arr -> arr
val lyapunov : arr -> arr -> arr
val discrete_lyapunov : - ?solver:[ `default | `bilinear | `direct ] -> - arr -> - arr -> - arr
val linsolve : ?trans:bool -> ?typ:[ `n | `u | `l ] -> arr -> arr -> arr
val care : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
val dare : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
\ No newline at end of file diff --git a/owl-base/Owl_algodiff_core_sig/module-type-Sig/A/Mat/index.html b/owl-base/Owl_algodiff_core_sig/module-type-Sig/A/Mat/index.html deleted file mode 100644 index 1631b7e32..000000000 --- a/owl-base/Owl_algodiff_core_sig/module-type-Sig/A/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl-base.Owl_algodiff_core_sig.Sig.A.Mat)

Module A.Mat

val diagm : ?k:int -> arr -> arr
val triu : ?k:int -> arr -> arr
val tril : ?k:int -> arr -> arr
val eye : int -> arr
\ No newline at end of file diff --git a/owl-base/Owl_algodiff_core_sig/module-type-Sig/A/Scalar/index.html b/owl-base/Owl_algodiff_core_sig/module-type-Sig/A/Scalar/index.html deleted file mode 100644 index c8488197b..000000000 --- a/owl-base/Owl_algodiff_core_sig/module-type-Sig/A/Scalar/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Scalar (owl-base.Owl_algodiff_core_sig.Sig.A.Scalar)

Module A.Scalar

val add : elt -> elt -> elt
val sub : elt -> elt -> elt
val mul : elt -> elt -> elt
val div : elt -> elt -> elt
val pow : elt -> elt -> elt
val atan2 : elt -> elt -> elt
val abs : elt -> elt
val neg : elt -> elt
val sqr : elt -> elt
val sqrt : elt -> elt
val exp : elt -> elt
val log : elt -> elt
val log2 : elt -> elt
val log10 : elt -> elt
val signum : elt -> elt
val floor : elt -> elt
val ceil : elt -> elt
val round : elt -> elt
val sin : elt -> elt
val cos : elt -> elt
val tan : elt -> elt
val sinh : elt -> elt
val cosh : elt -> elt
val tanh : elt -> elt
val asin : elt -> elt
val acos : elt -> elt
val atan : elt -> elt
val asinh : elt -> elt
val acosh : elt -> elt
val atanh : elt -> elt
val relu : elt -> elt
val dawsn : elt -> elt
val sigmoid : elt -> elt
\ No newline at end of file diff --git a/owl-base/Owl_algodiff_core_sig/module-type-Sig/A/index.html b/owl-base/Owl_algodiff_core_sig/module-type-Sig/A/index.html deleted file mode 100644 index b3ff90f1f..000000000 --- a/owl-base/Owl_algodiff_core_sig/module-type-Sig/A/index.html +++ /dev/null @@ -1,158 +0,0 @@ - -A (owl-base.Owl_algodiff_core_sig.Sig.A)

Module Sig.A

include Owl_types_ndarray_eltcmp.Sig
include Owl_types_ndarray_basic.Sig
type arr
type elt
val empty : int array -> arr
val zeros : int array -> arr
val ones : int array -> arr
val create : int array -> elt -> arr
val sequential : ?a:elt -> ?step:elt -> int array -> arr
val uniform : ?a:elt -> ?b:elt -> int array -> arr
val gaussian : ?mu:elt -> ?sigma:elt -> int array -> arr
val bernoulli : ?p:elt -> int array -> arr
val init : int array -> (int -> elt) -> arr
val init_nd : int array -> (int array -> elt) -> arr
val shape : arr -> int array
val numel : arr -> int
val get : arr -> int array -> elt
val set : arr -> int array -> elt -> unit
val get_slice : int list list -> arr -> arr
val set_slice : int list list -> arr -> arr -> unit
val get_fancy : Owl_types_common.index list -> arr -> arr
val set_fancy : Owl_types_common.index list -> arr -> arr -> unit
val copy : arr -> arr
val copy_ : out:arr -> arr -> unit
val reset : arr -> unit
val reshape : arr -> int array -> arr
val reverse : arr -> arr
val tile : arr -> int array -> arr
val repeat : arr -> int array -> arr
val concatenate : ?axis:int -> arr array -> arr
val stack : ?axis:int -> arr array -> arr
val split : ?axis:int -> int array -> arr -> arr array
val expand : ?hi:bool -> arr -> int -> arr
val squeeze : ?axis:int array -> arr -> arr
val draw : ?axis:int -> arr -> int -> arr * int array
val map : (elt -> elt) -> arr -> arr
val fold : ?axis:int -> (elt -> elt -> elt) -> elt -> arr -> arr
val scan : ?axis:int -> (elt -> elt -> elt) -> arr -> arr
val one_hot : int -> arr -> arr
val pad : ?v:elt -> int list list -> arr -> arr
val print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(elt -> string) -> - arr -> - unit
val abs : arr -> arr
val neg : arr -> arr
val floor : arr -> arr
val ceil : arr -> arr
val round : arr -> arr
val sqr : arr -> arr
val sqrt : arr -> arr
val log : arr -> arr
val log2 : arr -> arr
val log10 : arr -> arr
val exp : arr -> arr
val sin : arr -> arr
val cos : arr -> arr
val tan : arr -> arr
val sinh : arr -> arr
val cosh : arr -> arr
val tanh : arr -> arr
val asin : arr -> arr
val acos : arr -> arr
val atan : arr -> arr
val asinh : arr -> arr
val acosh : arr -> arr
val atanh : arr -> arr
val min : ?axis:int -> ?keep_dims:bool -> arr -> arr
val max : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum_reduce : ?axis:int array -> arr -> arr
val signum : arr -> arr
val sigmoid : arr -> arr
val relu : arr -> arr
val dawsn : arr -> arr
val min' : arr -> elt
val max' : arr -> elt
val sum' : arr -> elt
val log_sum_exp' : arr -> elt
val log_sum_exp : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm' : arr -> elt
val l2norm' : arr -> elt
val l2norm_sqr' : arr -> elt
val clip_by_value : ?amin:elt -> ?amax:elt -> arr -> arr
val clip_by_l2norm : elt -> arr -> arr
val pow : arr -> arr -> arr
val scalar_pow : elt -> arr -> arr
val pow_scalar : arr -> elt -> arr
val atan2 : arr -> arr -> arr
val scalar_atan2 : elt -> arr -> arr
val atan2_scalar : arr -> elt -> arr
val add : arr -> arr -> arr
val sub : arr -> arr -> arr
val mul : arr -> arr -> arr
val div : arr -> arr -> arr
val add_scalar : arr -> elt -> arr
val sub_scalar : arr -> elt -> arr
val mul_scalar : arr -> elt -> arr
val div_scalar : arr -> elt -> arr
val scalar_add : elt -> arr -> arr
val scalar_sub : elt -> arr -> arr
val scalar_mul : elt -> arr -> arr
val scalar_div : elt -> arr -> arr
val fma : arr -> arr -> arr -> arr
val conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val dilated_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val transpose_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val max_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val upsampling2d : arr -> int array -> arr
val conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val dilated_conv1d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv1d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val transpose_conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val max_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val upsampling2d_backward : arr -> int array -> arr -> arr
val row_num : arr -> int
val col_num : arr -> int
val row : arr -> int -> arr
val rows : arr -> int array -> arr
val copy_row_to : arr -> arr -> int -> unit
val copy_col_to : arr -> arr -> int -> unit
val dot : arr -> arr -> arr
val diag : ?k:int -> arr -> arr
val trace : arr -> elt
val transpose : ?axis:int array -> arr -> arr
val to_rows : arr -> arr array
val of_rows : arr array -> arr
val to_cols : arr -> arr array
val of_cols : arr array -> arr
val of_array : elt array -> int array -> arr
val of_arrays : elt array array -> arr
val float_to_elt : float -> elt
val elt_to_float : elt -> float
val elt_equal : arr -> arr -> arr
val elt_not_equal : arr -> arr -> arr
val elt_less : arr -> arr -> arr
val elt_greater : arr -> arr -> arr
val elt_less_equal : arr -> arr -> arr
val elt_greater_equal : arr -> arr -> arr
val elt_equal_scalar : arr -> elt -> arr
val elt_not_equal_scalar : arr -> elt -> arr
val elt_less_scalar : arr -> elt -> arr
val elt_greater_scalar : arr -> elt -> arr
val elt_less_equal_scalar : arr -> elt -> arr
val elt_greater_equal_scalar : arr -> elt -> arr
module Scalar : sig ... end
module Mat : sig ... end
module Linalg : sig ... end
\ No newline at end of file diff --git a/owl-base/Owl_algodiff_core_sig/module-type-Sig/index.html b/owl-base/Owl_algodiff_core_sig/module-type-Sig/index.html deleted file mode 100644 index 2c1bdd676..000000000 --- a/owl-base/Owl_algodiff_core_sig/module-type-Sig/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Sig (owl-base.Owl_algodiff_core_sig.Sig)

Module type Owl_algodiff_core_sig.Sig

Type definition
include Owl_algodiff_types_sig.Sig with type elt := A.elt and type arr := A.arr
type t =
  1. | F of A.elt
  2. | Arr of A.arr
  3. | DF of t * t * int
  4. | DR of t * t Stdlib.ref * op * int Stdlib.ref * int * int Stdlib.ref
and adjoint = t -> t Stdlib.ref -> (t * t) list -> (t * t) list
and register = t list -> t list
and label = string * t list
and op = adjoint * register * label
Core functions
val tag : unit -> int

TODO

val primal : t -> t

TODO

val primal' : t -> t

TODO

val zero : t -> t

TODO

val reset_zero : t -> t

TODO

val tangent : t -> t

TODO

val adjref : t -> t Stdlib.ref

TODO

val adjval : t -> t

TODO

val shape : t -> int array

TODO

val is_float : t -> bool

TODO

val is_arr : t -> bool

TODO

val row_num : t -> int

number of rows

val col_num : t -> int

number of columns

val numel : t -> int

number of elements

val clip_by_value : amin:A.elt -> amax:A.elt -> t -> t

other functions, without tracking gradient

val clip_by_l2norm : A.elt -> t -> t

other functions, without tracking gradient

val copy_primal' : t -> t

TODO

val tile : t -> int array -> t

TODO

val repeat : t -> int array -> t

TODO

val pack_elt : A.elt -> t

convert from elt type to t type.

val unpack_elt : t -> A.elt

convert from t type to elt type.

val pack_flt : float -> t

convert from float type to t type.

val _f : float -> t

A shortcut function for F A.(float_to_elt x).

val unpack_flt : t -> float

convert from t type to float type.

val pack_arr : A.arr -> t

convert from arr type to t type.

val unpack_arr : t -> A.arr

convert from t type to arr type.

val deep_info : t -> string

TODO

val type_info : t -> string

TODO

val error_binop : string -> t -> t -> 'a

TODO

val error_uniop : string -> t -> 'a

TODO

\ No newline at end of file diff --git a/owl-base/Owl_algodiff_generic/.dummy b/owl-base/Owl_algodiff_generic/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl-base/Owl_algodiff_generic/Make/A/Linalg/index.html b/owl-base/Owl_algodiff_generic/Make/A/Linalg/index.html deleted file mode 100644 index 8fae43576..000000000 --- a/owl-base/Owl_algodiff_generic/Make/A/Linalg/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -Linalg (owl-base.Owl_algodiff_generic.Make.A.Linalg)

Module A.Linalg

val inv : arr -> arr
val logdet : arr -> elt
val chol : ?upper:bool -> arr -> arr
val svd : ?thin:bool -> arr -> arr * arr * arr
val qr : arr -> arr * arr
val lq : arr -> arr * arr
val sylvester : arr -> arr -> arr -> arr
val lyapunov : arr -> arr -> arr
val discrete_lyapunov : - ?solver:[ `default | `bilinear | `direct ] -> - arr -> - arr -> - arr
val linsolve : ?trans:bool -> ?typ:[ `n | `u | `l ] -> arr -> arr -> arr
val care : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
val dare : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
\ No newline at end of file diff --git a/owl-base/Owl_algodiff_generic/Make/A/Mat/index.html b/owl-base/Owl_algodiff_generic/Make/A/Mat/index.html deleted file mode 100644 index d8c2c71b5..000000000 --- a/owl-base/Owl_algodiff_generic/Make/A/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl-base.Owl_algodiff_generic.Make.A.Mat)

Module A.Mat

val diagm : ?k:int -> arr -> arr
val triu : ?k:int -> arr -> arr
val tril : ?k:int -> arr -> arr
val eye : int -> arr
\ No newline at end of file diff --git a/owl-base/Owl_algodiff_generic/Make/A/Scalar/index.html b/owl-base/Owl_algodiff_generic/Make/A/Scalar/index.html deleted file mode 100644 index a5560ac64..000000000 --- a/owl-base/Owl_algodiff_generic/Make/A/Scalar/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Scalar (owl-base.Owl_algodiff_generic.Make.A.Scalar)

Module A.Scalar

val add : elt -> elt -> elt
val sub : elt -> elt -> elt
val mul : elt -> elt -> elt
val div : elt -> elt -> elt
val pow : elt -> elt -> elt
val atan2 : elt -> elt -> elt
val abs : elt -> elt
val neg : elt -> elt
val sqr : elt -> elt
val sqrt : elt -> elt
val exp : elt -> elt
val log : elt -> elt
val log2 : elt -> elt
val log10 : elt -> elt
val signum : elt -> elt
val floor : elt -> elt
val ceil : elt -> elt
val round : elt -> elt
val sin : elt -> elt
val cos : elt -> elt
val tan : elt -> elt
val sinh : elt -> elt
val cosh : elt -> elt
val tanh : elt -> elt
val asin : elt -> elt
val acos : elt -> elt
val atan : elt -> elt
val asinh : elt -> elt
val acosh : elt -> elt
val atanh : elt -> elt
val relu : elt -> elt
val dawsn : elt -> elt
val sigmoid : elt -> elt
\ No newline at end of file diff --git a/owl-base/Owl_algodiff_generic/Make/A/index.html b/owl-base/Owl_algodiff_generic/Make/A/index.html deleted file mode 100644 index 976c47b88..000000000 --- a/owl-base/Owl_algodiff_generic/Make/A/index.html +++ /dev/null @@ -1,160 +0,0 @@ - -A (owl-base.Owl_algodiff_generic.Make.A)

Module Make.A

include Owl_types_ndarray_eltcmp.Sig - with type arr = A.arr - with type elt = A.elt
include Owl_types_ndarray_basic.Sig with type arr = A.arr with type elt = A.elt
type arr = A.arr
type elt = A.elt
val empty : int array -> arr
val zeros : int array -> arr
val ones : int array -> arr
val create : int array -> elt -> arr
val sequential : ?a:elt -> ?step:elt -> int array -> arr
val uniform : ?a:elt -> ?b:elt -> int array -> arr
val gaussian : ?mu:elt -> ?sigma:elt -> int array -> arr
val bernoulli : ?p:elt -> int array -> arr
val init : int array -> (int -> elt) -> arr
val init_nd : int array -> (int array -> elt) -> arr
val shape : arr -> int array
val numel : arr -> int
val get : arr -> int array -> elt
val set : arr -> int array -> elt -> unit
val get_slice : int list list -> arr -> arr
val set_slice : int list list -> arr -> arr -> unit
val get_fancy : Owl_types_common.index list -> arr -> arr
val set_fancy : Owl_types_common.index list -> arr -> arr -> unit
val copy : arr -> arr
val copy_ : out:arr -> arr -> unit
val reset : arr -> unit
val reshape : arr -> int array -> arr
val reverse : arr -> arr
val tile : arr -> int array -> arr
val repeat : arr -> int array -> arr
val concatenate : ?axis:int -> arr array -> arr
val stack : ?axis:int -> arr array -> arr
val split : ?axis:int -> int array -> arr -> arr array
val expand : ?hi:bool -> arr -> int -> arr
val squeeze : ?axis:int array -> arr -> arr
val draw : ?axis:int -> arr -> int -> arr * int array
val map : (elt -> elt) -> arr -> arr
val fold : ?axis:int -> (elt -> elt -> elt) -> elt -> arr -> arr
val scan : ?axis:int -> (elt -> elt -> elt) -> arr -> arr
val one_hot : int -> arr -> arr
val pad : ?v:elt -> int list list -> arr -> arr
val print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(elt -> string) -> - arr -> - unit
val abs : arr -> arr
val neg : arr -> arr
val floor : arr -> arr
val ceil : arr -> arr
val round : arr -> arr
val sqr : arr -> arr
val sqrt : arr -> arr
val log : arr -> arr
val log2 : arr -> arr
val log10 : arr -> arr
val exp : arr -> arr
val sin : arr -> arr
val cos : arr -> arr
val tan : arr -> arr
val sinh : arr -> arr
val cosh : arr -> arr
val tanh : arr -> arr
val asin : arr -> arr
val acos : arr -> arr
val atan : arr -> arr
val asinh : arr -> arr
val acosh : arr -> arr
val atanh : arr -> arr
val min : ?axis:int -> ?keep_dims:bool -> arr -> arr
val max : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum_reduce : ?axis:int array -> arr -> arr
val signum : arr -> arr
val sigmoid : arr -> arr
val relu : arr -> arr
val dawsn : arr -> arr
val min' : arr -> elt
val max' : arr -> elt
val sum' : arr -> elt
val log_sum_exp' : arr -> elt
val log_sum_exp : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm' : arr -> elt
val l2norm' : arr -> elt
val l2norm_sqr' : arr -> elt
val clip_by_value : ?amin:elt -> ?amax:elt -> arr -> arr
val clip_by_l2norm : elt -> arr -> arr
val pow : arr -> arr -> arr
val scalar_pow : elt -> arr -> arr
val pow_scalar : arr -> elt -> arr
val atan2 : arr -> arr -> arr
val scalar_atan2 : elt -> arr -> arr
val atan2_scalar : arr -> elt -> arr
val add : arr -> arr -> arr
val sub : arr -> arr -> arr
val mul : arr -> arr -> arr
val div : arr -> arr -> arr
val add_scalar : arr -> elt -> arr
val sub_scalar : arr -> elt -> arr
val mul_scalar : arr -> elt -> arr
val div_scalar : arr -> elt -> arr
val scalar_add : elt -> arr -> arr
val scalar_sub : elt -> arr -> arr
val scalar_mul : elt -> arr -> arr
val scalar_div : elt -> arr -> arr
val fma : arr -> arr -> arr -> arr
val conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val dilated_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val transpose_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val max_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val upsampling2d : arr -> int array -> arr
val conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val dilated_conv1d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv1d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val transpose_conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val max_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val upsampling2d_backward : arr -> int array -> arr -> arr
val row_num : arr -> int
val col_num : arr -> int
val row : arr -> int -> arr
val rows : arr -> int array -> arr
val copy_row_to : arr -> arr -> int -> unit
val copy_col_to : arr -> arr -> int -> unit
val dot : arr -> arr -> arr
val diag : ?k:int -> arr -> arr
val trace : arr -> elt
val transpose : ?axis:int array -> arr -> arr
val to_rows : arr -> arr array
val of_rows : arr array -> arr
val to_cols : arr -> arr array
val of_cols : arr array -> arr
val of_array : elt array -> int array -> arr
val of_arrays : elt array array -> arr
val float_to_elt : float -> elt
val elt_to_float : elt -> float
val elt_equal : arr -> arr -> arr
val elt_not_equal : arr -> arr -> arr
val elt_less : arr -> arr -> arr
val elt_greater : arr -> arr -> arr
val elt_less_equal : arr -> arr -> arr
val elt_greater_equal : arr -> arr -> arr
val elt_equal_scalar : arr -> elt -> arr
val elt_not_equal_scalar : arr -> elt -> arr
val elt_less_scalar : arr -> elt -> arr
val elt_greater_scalar : arr -> elt -> arr
val elt_less_equal_scalar : arr -> elt -> arr
val elt_greater_equal_scalar : arr -> elt -> arr
module Scalar : sig ... end
module Mat : sig ... end
module Linalg : sig ... end
\ No newline at end of file diff --git a/owl-base/Owl_algodiff_generic/Make/Arr/index.html b/owl-base/Owl_algodiff_generic/Make/Arr/index.html deleted file mode 100644 index e87e36a63..000000000 --- a/owl-base/Owl_algodiff_generic/Make/Arr/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Arr (owl-base.Owl_algodiff_generic.Make.Arr)

Module Make.Arr

val empty : int array -> t
val zeros : int array -> t
val ones : int array -> t
val uniform : ?a:A.elt -> ?b:A.elt -> int array -> t
val gaussian : ?mu:A.elt -> ?sigma:A.elt -> int array -> t
val shape : t -> int array
val numel : t -> int
val reset : t -> unit
val reshape : t -> int array -> t
val add : t -> t -> t
val sub : t -> t -> t
val mul : t -> t -> t
val div : t -> t -> t
val dot : t -> t -> t
\ No newline at end of file diff --git a/owl-base/Owl_algodiff_generic/Make/Builder/index.html b/owl-base/Owl_algodiff_generic/Make/Builder/index.html deleted file mode 100644 index 652c037ef..000000000 --- a/owl-base/Owl_algodiff_generic/Make/Builder/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Builder (owl-base.Owl_algodiff_generic.Make.Builder)

Module Make.Builder

Ops Builder
module type Siso = sig ... end
val build_siso : (module Siso) -> t -> t

build single input single output operations

module type Sipo = sig ... end
val build_sipo : (module Sipo) -> t -> t * t

build single input pair outputs operations

module type Sito = sig ... end
val build_sito : (module Sito) -> t -> t * t * t

build single input triple outputs operations

module type Siao = sig ... end
val build_siao : (module Siao) -> t -> t array

build single input array output operations

module type Piso = sig ... end
val build_piso : (module Piso) -> t -> t -> t

build pair inputs single output operations

module type Aiso = sig ... end
val build_aiso : (module Aiso) -> t array -> t

build array input single output operations

\ No newline at end of file diff --git a/owl-base/Owl_algodiff_generic/Make/Builder/module-type-Aiso/index.html b/owl-base/Owl_algodiff_generic/Make/Builder/module-type-Aiso/index.html deleted file mode 100644 index 8675a03ab..000000000 --- a/owl-base/Owl_algodiff_generic/Make/Builder/module-type-Aiso/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Aiso (owl-base.Owl_algodiff_generic.Make.Builder.Aiso)

Module type Builder.Aiso

val label : string
val ff : t array -> t
val df : int list -> t -> t array -> t array -> t
val dr : int list -> t array -> t -> t Stdlib.ref -> t list
\ No newline at end of file diff --git a/owl-base/Owl_algodiff_generic/Make/Builder/module-type-Piso/index.html b/owl-base/Owl_algodiff_generic/Make/Builder/module-type-Piso/index.html deleted file mode 100644 index 76b671e0c..000000000 --- a/owl-base/Owl_algodiff_generic/Make/Builder/module-type-Piso/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Piso (owl-base.Owl_algodiff_generic.Make.Builder.Piso)

Module type Builder.Piso

val label : string
val ff_aa : A.elt -> A.elt -> t
val ff_ab : A.elt -> A.arr -> t
val ff_ba : A.arr -> A.elt -> t
val ff_bb : A.arr -> A.arr -> t
val df_da : t -> t -> t -> t -> t
val df_db : t -> t -> t -> t -> t
val df_dab : t -> t -> t -> t -> t -> t
val dr_ab : t -> t -> t -> t Stdlib.ref -> t * t
val dr_a : t -> t -> t -> t Stdlib.ref -> t
val dr_b : t -> t -> t -> t Stdlib.ref -> t
\ No newline at end of file diff --git a/owl-base/Owl_algodiff_generic/Make/Builder/module-type-Siao/index.html b/owl-base/Owl_algodiff_generic/Make/Builder/module-type-Siao/index.html deleted file mode 100644 index 5813e0f09..000000000 --- a/owl-base/Owl_algodiff_generic/Make/Builder/module-type-Siao/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Siao (owl-base.Owl_algodiff_generic.Make.Builder.Siao)

Module type Builder.Siao

val label : string
val ff_f : A.elt -> t array
val ff_arr : A.arr -> t array
val df : t array -> t -> t -> t array
val dr : t -> t -> t Stdlib.ref array -> t Stdlib.ref array -> t
\ No newline at end of file diff --git a/owl-base/Owl_algodiff_generic/Make/Builder/module-type-Sipo/index.html b/owl-base/Owl_algodiff_generic/Make/Builder/module-type-Sipo/index.html deleted file mode 100644 index db2e52c3c..000000000 --- a/owl-base/Owl_algodiff_generic/Make/Builder/module-type-Sipo/index.html +++ /dev/null @@ -1,7 +0,0 @@ - -Sipo (owl-base.Owl_algodiff_generic.Make.Builder.Sipo)

Module type Builder.Sipo

val label : string
val ff_f : A.elt -> t * t
val ff_arr : A.arr -> t * t
val df : t -> t -> t -> t
val dr : - t -> - t -> - (t Stdlib.ref * t Stdlib.ref) -> - (t Stdlib.ref * t Stdlib.ref) -> - t
\ No newline at end of file diff --git a/owl-base/Owl_algodiff_generic/Make/Builder/module-type-Siso/index.html b/owl-base/Owl_algodiff_generic/Make/Builder/module-type-Siso/index.html deleted file mode 100644 index d9383c7f3..000000000 --- a/owl-base/Owl_algodiff_generic/Make/Builder/module-type-Siso/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Siso (owl-base.Owl_algodiff_generic.Make.Builder.Siso)

Module type Builder.Siso

val label : string
val ff_f : A.elt -> t
val ff_arr : A.arr -> t
val df : t -> t -> t -> t
val dr : t -> t -> t Stdlib.ref -> t
\ No newline at end of file diff --git a/owl-base/Owl_algodiff_generic/Make/Builder/module-type-Sito/index.html b/owl-base/Owl_algodiff_generic/Make/Builder/module-type-Sito/index.html deleted file mode 100644 index 7473f46e0..000000000 --- a/owl-base/Owl_algodiff_generic/Make/Builder/module-type-Sito/index.html +++ /dev/null @@ -1,7 +0,0 @@ - -Sito (owl-base.Owl_algodiff_generic.Make.Builder.Sito)

Module type Builder.Sito

val label : string
val ff_f : A.elt -> t * t * t
val ff_arr : A.arr -> t * t * t
val df : t -> t -> t -> t
val dr : - t -> - t -> - (t Stdlib.ref * t Stdlib.ref * t Stdlib.ref) -> - (t Stdlib.ref * t Stdlib.ref * t Stdlib.ref) -> - t
\ No newline at end of file diff --git a/owl-base/Owl_algodiff_generic/Make/Linalg/index.html b/owl-base/Owl_algodiff_generic/Make/Linalg/index.html deleted file mode 100644 index e54f9105e..000000000 --- a/owl-base/Owl_algodiff_generic/Make/Linalg/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -Linalg (owl-base.Owl_algodiff_generic.Make.Linalg)

Module Make.Linalg

val inv : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val logdet : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val chol : ?upper:bool -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val qr : t -> t * t

Refer to :doc:`owl_dense_ndarray_generic`

val lq : t -> t * t

Refer to :doc:`owl_dense_ndarray_generic`

val svd : ?thin:bool -> t -> t * t * t

Refer to :doc:`owl_dense_ndarray_generic`

val sylvester : t -> t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val lyapunov : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val discrete_lyapunov : - ?solver:[ `default | `bilinear | `direct ] -> - t -> - t -> - t

Refer to :doc:`owl_dense_ndarray_generic`

val (/@) : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val linsolve : ?trans:bool -> ?typ:[ `n | `u | `l ] -> t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val care : ?diag_r:bool -> t -> t -> t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val dare : ?diag_r:bool -> t -> t -> t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

\ No newline at end of file diff --git a/owl-base/Owl_algodiff_generic/Make/Mat/index.html b/owl-base/Owl_algodiff_generic/Make/Mat/index.html deleted file mode 100644 index 6977c26d8..000000000 --- a/owl-base/Owl_algodiff_generic/Make/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl-base.Owl_algodiff_generic.Make.Mat)

Module Make.Mat

val empty : int -> int -> t
val zeros : int -> int -> t
val eye : int -> t
val ones : int -> int -> t
val uniform : ?a:A.elt -> ?b:A.elt -> int -> int -> t
val gaussian : ?mu:A.elt -> ?sigma:A.elt -> int -> int -> t
val shape : t -> int * int
val numel : t -> int
val row_num : t -> int
val col_num : t -> int
val reset : t -> unit
val reshape : int -> int -> t -> t
val get : t -> int -> int -> t
val set : t -> int -> int -> t -> t
val row : t -> int -> t
val mean : t -> t
val add : t -> t -> t
val sub : t -> t -> t
val mul : t -> t -> t
val div : t -> t -> t
val dot : t -> t -> t
val map_by_row : (t -> t) -> t -> t
val of_arrays : A.elt array array -> t
val init_2d : int -> int -> (int -> int -> t) -> t
val print : t -> unit
\ No newline at end of file diff --git a/owl-base/Owl_algodiff_generic/Make/Maths/index.html b/owl-base/Owl_algodiff_generic/Make/Maths/index.html deleted file mode 100644 index 901f53374..000000000 --- a/owl-base/Owl_algodiff_generic/Make/Maths/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Maths (owl-base.Owl_algodiff_generic.Make.Maths)

Module Make.Maths

val (+) : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val (-) : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val (*) : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val (/) : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val (*@) : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val (**) : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val add : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val sub : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val mul : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val div : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val kron : t -> t -> t

Refer to :doc:`owl_dense_matrix_generic`

val dot : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val pow : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val atan2 : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val min2 : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val max2 : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val cross_entropy : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val inv : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val neg : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val abs : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val signum : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val floor : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val ceil : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val round : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val sqr : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val sqrt : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val log : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val log2 : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val log10 : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val exp : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val sin : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val cos : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val tan : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val sinh : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val cosh : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val tanh : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val asin : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val acos : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val atan : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val asinh : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val acosh : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val atanh : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val sum' : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val log_sum_exp' : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val log_sum_exp : ?axis:int -> ?keep_dims:bool -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val sum : ?axis:int -> ?keep_dims:bool -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val sum_reduce : ?axis:int array -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val mean : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val transpose : ?axis:int array -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val swap : int -> int -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val l1norm' : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val l2norm' : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val l2norm_sqr' : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val sigmoid : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val relu : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val dawsn : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val softplus : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val softsign : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val softmax : ?axis:int -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val reshape : t -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val flatten : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val get_item : t -> int -> int -> t

Refer to :doc:`owl_dense_ndarray_generic`

val get_row : t -> int -> t

Refer to :doc:`owl_dense_ndarray_generic`

val concat : axis:int -> t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val split : axis:int -> int array -> t -> t array

Refer to :doc:`owl_dense_ndarray_generic`

val of_arrays : t array array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val to_arrays : t -> t array array

Refer to :doc:`owl_dense_ndarray_generic`

val concatenate : axis:int -> t array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val stack : axis:int -> t array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val get_slice : int list list -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val set_slice : int list list -> t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val get_fancy : Owl_types.index list -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val set_fancy : Owl_types.index list -> t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val diag : ?k:int -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val diagm : ?k:int -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val trace : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val triu : ?k:int -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val tril : ?k:int -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

\ No newline at end of file diff --git a/owl-base/Owl_algodiff_generic/Make/NN/index.html b/owl-base/Owl_algodiff_generic/Make/NN/index.html deleted file mode 100644 index 6e4a3a37b..000000000 --- a/owl-base/Owl_algodiff_generic/Make/NN/index.html +++ /dev/null @@ -1,20 +0,0 @@ - -NN (owl-base.Owl_algodiff_generic.Make.NN)

Module Make.NN

val dropout : ?rate:float -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val conv1d : ?padding:Owl_types.padding -> t -> t -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val conv2d : ?padding:Owl_types.padding -> t -> t -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val conv3d : ?padding:Owl_types.padding -> t -> t -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val dilated_conv1d : - ?padding:Owl_types.padding -> - t -> - t -> - int array -> - int array -> - t

Refer to :doc:`owl_dense_ndarray_generic`

val dilated_conv2d : - ?padding:Owl_types.padding -> - t -> - t -> - int array -> - int array -> - t

Refer to :doc:`owl_dense_ndarray_generic`

val dilated_conv3d : - ?padding:Owl_types.padding -> - t -> - t -> - int array -> - int array -> - t

Refer to :doc:`owl_dense_ndarray_generic`

val transpose_conv1d : ?padding:Owl_types.padding -> t -> t -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val transpose_conv2d : ?padding:Owl_types.padding -> t -> t -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val transpose_conv3d : ?padding:Owl_types.padding -> t -> t -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val max_pool1d : Owl_types.padding -> t -> int array -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val max_pool2d : Owl_types.padding -> t -> int array -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val max_pool3d : Owl_types.padding -> t -> int array -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val avg_pool1d : Owl_types.padding -> t -> int array -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val avg_pool2d : Owl_types.padding -> t -> int array -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val avg_pool3d : Owl_types.padding -> t -> int array -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val upsampling2d : t -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val pad : ?v:A.elt -> int list list -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

\ No newline at end of file diff --git a/owl-base/Owl_algodiff_generic/Make/argument-1-A/Linalg/index.html b/owl-base/Owl_algodiff_generic/Make/argument-1-A/Linalg/index.html deleted file mode 100644 index 8fae43576..000000000 --- a/owl-base/Owl_algodiff_generic/Make/argument-1-A/Linalg/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -Linalg (owl-base.Owl_algodiff_generic.Make.A.Linalg)

Module A.Linalg

val inv : arr -> arr
val logdet : arr -> elt
val chol : ?upper:bool -> arr -> arr
val svd : ?thin:bool -> arr -> arr * arr * arr
val qr : arr -> arr * arr
val lq : arr -> arr * arr
val sylvester : arr -> arr -> arr -> arr
val lyapunov : arr -> arr -> arr
val discrete_lyapunov : - ?solver:[ `default | `bilinear | `direct ] -> - arr -> - arr -> - arr
val linsolve : ?trans:bool -> ?typ:[ `n | `u | `l ] -> arr -> arr -> arr
val care : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
val dare : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
\ No newline at end of file diff --git a/owl-base/Owl_algodiff_generic/Make/argument-1-A/Mat/index.html b/owl-base/Owl_algodiff_generic/Make/argument-1-A/Mat/index.html deleted file mode 100644 index d8c2c71b5..000000000 --- a/owl-base/Owl_algodiff_generic/Make/argument-1-A/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl-base.Owl_algodiff_generic.Make.A.Mat)

Module A.Mat

val diagm : ?k:int -> arr -> arr
val triu : ?k:int -> arr -> arr
val tril : ?k:int -> arr -> arr
val eye : int -> arr
\ No newline at end of file diff --git a/owl-base/Owl_algodiff_generic/Make/argument-1-A/Scalar/index.html b/owl-base/Owl_algodiff_generic/Make/argument-1-A/Scalar/index.html deleted file mode 100644 index a5560ac64..000000000 --- a/owl-base/Owl_algodiff_generic/Make/argument-1-A/Scalar/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Scalar (owl-base.Owl_algodiff_generic.Make.A.Scalar)

Module A.Scalar

val add : elt -> elt -> elt
val sub : elt -> elt -> elt
val mul : elt -> elt -> elt
val div : elt -> elt -> elt
val pow : elt -> elt -> elt
val atan2 : elt -> elt -> elt
val abs : elt -> elt
val neg : elt -> elt
val sqr : elt -> elt
val sqrt : elt -> elt
val exp : elt -> elt
val log : elt -> elt
val log2 : elt -> elt
val log10 : elt -> elt
val signum : elt -> elt
val floor : elt -> elt
val ceil : elt -> elt
val round : elt -> elt
val sin : elt -> elt
val cos : elt -> elt
val tan : elt -> elt
val sinh : elt -> elt
val cosh : elt -> elt
val tanh : elt -> elt
val asin : elt -> elt
val acos : elt -> elt
val atan : elt -> elt
val asinh : elt -> elt
val acosh : elt -> elt
val atanh : elt -> elt
val relu : elt -> elt
val dawsn : elt -> elt
val sigmoid : elt -> elt
\ No newline at end of file diff --git a/owl-base/Owl_algodiff_generic/Make/argument-1-A/index.html b/owl-base/Owl_algodiff_generic/Make/argument-1-A/index.html deleted file mode 100644 index 346256bcf..000000000 --- a/owl-base/Owl_algodiff_generic/Make/argument-1-A/index.html +++ /dev/null @@ -1,158 +0,0 @@ - -A (owl-base.Owl_algodiff_generic.Make.A)

Parameter Make.A

include Owl_types_ndarray_eltcmp.Sig
include Owl_types_ndarray_basic.Sig
type arr
type elt
val empty : int array -> arr
val zeros : int array -> arr
val ones : int array -> arr
val create : int array -> elt -> arr
val sequential : ?a:elt -> ?step:elt -> int array -> arr
val uniform : ?a:elt -> ?b:elt -> int array -> arr
val gaussian : ?mu:elt -> ?sigma:elt -> int array -> arr
val bernoulli : ?p:elt -> int array -> arr
val init : int array -> (int -> elt) -> arr
val init_nd : int array -> (int array -> elt) -> arr
val shape : arr -> int array
val numel : arr -> int
val get : arr -> int array -> elt
val set : arr -> int array -> elt -> unit
val get_slice : int list list -> arr -> arr
val set_slice : int list list -> arr -> arr -> unit
val get_fancy : Owl_types_common.index list -> arr -> arr
val set_fancy : Owl_types_common.index list -> arr -> arr -> unit
val copy : arr -> arr
val copy_ : out:arr -> arr -> unit
val reset : arr -> unit
val reshape : arr -> int array -> arr
val reverse : arr -> arr
val tile : arr -> int array -> arr
val repeat : arr -> int array -> arr
val concatenate : ?axis:int -> arr array -> arr
val stack : ?axis:int -> arr array -> arr
val split : ?axis:int -> int array -> arr -> arr array
val expand : ?hi:bool -> arr -> int -> arr
val squeeze : ?axis:int array -> arr -> arr
val draw : ?axis:int -> arr -> int -> arr * int array
val map : (elt -> elt) -> arr -> arr
val fold : ?axis:int -> (elt -> elt -> elt) -> elt -> arr -> arr
val scan : ?axis:int -> (elt -> elt -> elt) -> arr -> arr
val one_hot : int -> arr -> arr
val pad : ?v:elt -> int list list -> arr -> arr
val print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(elt -> string) -> - arr -> - unit
val abs : arr -> arr
val neg : arr -> arr
val floor : arr -> arr
val ceil : arr -> arr
val round : arr -> arr
val sqr : arr -> arr
val sqrt : arr -> arr
val log : arr -> arr
val log2 : arr -> arr
val log10 : arr -> arr
val exp : arr -> arr
val sin : arr -> arr
val cos : arr -> arr
val tan : arr -> arr
val sinh : arr -> arr
val cosh : arr -> arr
val tanh : arr -> arr
val asin : arr -> arr
val acos : arr -> arr
val atan : arr -> arr
val asinh : arr -> arr
val acosh : arr -> arr
val atanh : arr -> arr
val min : ?axis:int -> ?keep_dims:bool -> arr -> arr
val max : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum_reduce : ?axis:int array -> arr -> arr
val signum : arr -> arr
val sigmoid : arr -> arr
val relu : arr -> arr
val dawsn : arr -> arr
val min' : arr -> elt
val max' : arr -> elt
val sum' : arr -> elt
val log_sum_exp' : arr -> elt
val log_sum_exp : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm' : arr -> elt
val l2norm' : arr -> elt
val l2norm_sqr' : arr -> elt
val clip_by_value : ?amin:elt -> ?amax:elt -> arr -> arr
val clip_by_l2norm : elt -> arr -> arr
val pow : arr -> arr -> arr
val scalar_pow : elt -> arr -> arr
val pow_scalar : arr -> elt -> arr
val atan2 : arr -> arr -> arr
val scalar_atan2 : elt -> arr -> arr
val atan2_scalar : arr -> elt -> arr
val add : arr -> arr -> arr
val sub : arr -> arr -> arr
val mul : arr -> arr -> arr
val div : arr -> arr -> arr
val add_scalar : arr -> elt -> arr
val sub_scalar : arr -> elt -> arr
val mul_scalar : arr -> elt -> arr
val div_scalar : arr -> elt -> arr
val scalar_add : elt -> arr -> arr
val scalar_sub : elt -> arr -> arr
val scalar_mul : elt -> arr -> arr
val scalar_div : elt -> arr -> arr
val fma : arr -> arr -> arr -> arr
val conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val dilated_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val transpose_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val max_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val upsampling2d : arr -> int array -> arr
val conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val dilated_conv1d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv1d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val transpose_conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val max_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val upsampling2d_backward : arr -> int array -> arr -> arr
val row_num : arr -> int
val col_num : arr -> int
val row : arr -> int -> arr
val rows : arr -> int array -> arr
val copy_row_to : arr -> arr -> int -> unit
val copy_col_to : arr -> arr -> int -> unit
val dot : arr -> arr -> arr
val diag : ?k:int -> arr -> arr
val trace : arr -> elt
val transpose : ?axis:int array -> arr -> arr
val to_rows : arr -> arr array
val of_rows : arr array -> arr
val to_cols : arr -> arr array
val of_cols : arr array -> arr
val of_array : elt array -> int array -> arr
val of_arrays : elt array array -> arr
val float_to_elt : float -> elt
val elt_to_float : elt -> float
val elt_equal : arr -> arr -> arr
val elt_not_equal : arr -> arr -> arr
val elt_less : arr -> arr -> arr
val elt_greater : arr -> arr -> arr
val elt_less_equal : arr -> arr -> arr
val elt_greater_equal : arr -> arr -> arr
val elt_equal_scalar : arr -> elt -> arr
val elt_not_equal_scalar : arr -> elt -> arr
val elt_less_scalar : arr -> elt -> arr
val elt_greater_scalar : arr -> elt -> arr
val elt_less_equal_scalar : arr -> elt -> arr
val elt_greater_equal_scalar : arr -> elt -> arr
module Scalar : sig ... end
module Mat : sig ... end
module Linalg : sig ... end
\ No newline at end of file diff --git a/owl-base/Owl_algodiff_generic/Make/index.html b/owl-base/Owl_algodiff_generic/Make/index.html deleted file mode 100644 index 81397d5a7..000000000 --- a/owl-base/Owl_algodiff_generic/Make/index.html +++ /dev/null @@ -1,14 +0,0 @@ - -Make (owl-base.Owl_algodiff_generic.Make)

Module Owl_algodiff_generic.Make

Parameters

Signature

include Owl_algodiff_core_sig.Sig - with type A.arr = A.arr - with type A.elt = A.elt
module A : - Owl_types_ndarray_algodiff.Sig with type arr = A.arr with type elt = A.elt
Type definition
include Owl_algodiff_types_sig.Sig with type elt := A.elt and type arr := A.arr
type t =
  1. | F of A.elt
  2. | Arr of A.arr
  3. | DF of t * t * int
  4. | DR of t * t Stdlib.ref * op * int Stdlib.ref * int * int Stdlib.ref
and adjoint = t -> t Stdlib.ref -> (t * t) list -> (t * t) list
and register = t list -> t list
and label = string * t list
and op = adjoint * register * label
Core functions
val tag : unit -> int

TODO

val primal : t -> t

TODO

val primal' : t -> t

TODO

val zero : t -> t

TODO

val reset_zero : t -> t

TODO

val tangent : t -> t

TODO

val adjref : t -> t Stdlib.ref

TODO

val adjval : t -> t

TODO

val shape : t -> int array

TODO

val is_float : t -> bool

TODO

val is_arr : t -> bool

TODO

val row_num : t -> int

number of rows

val col_num : t -> int

number of columns

val numel : t -> int

number of elements

val clip_by_value : amin:A.elt -> amax:A.elt -> t -> t

other functions, without tracking gradient

val clip_by_l2norm : A.elt -> t -> t

other functions, without tracking gradient

val copy_primal' : t -> t

TODO

val tile : t -> int array -> t

TODO

val repeat : t -> int array -> t

TODO

val pack_elt : A.elt -> t

convert from elt type to t type.

val unpack_elt : t -> A.elt

convert from t type to elt type.

val pack_flt : float -> t

convert from float type to t type.

val _f : float -> t

A shortcut function for F A.(float_to_elt x).

val unpack_flt : t -> float

convert from t type to float type.

val pack_arr : A.arr -> t

convert from arr type to t type.

val unpack_arr : t -> A.arr

convert from t type to arr type.

val deep_info : t -> string

TODO

val type_info : t -> string

TODO

val error_binop : string -> t -> t -> 'a

TODO

val error_uniop : string -> t -> 'a

TODO

val make_forward : t -> t -> int -> t

TODO

val make_reverse : t -> int -> t

TODO

val reverse_prop : t -> t -> unit

TODO

val diff : (t -> t) -> t -> t

diff f x returns the exat derivative of a function f : scalar -> scalar at point x. Simply calling diff f will return its derivative function g of the same type, i.e. g : scalar -> scalar.

Keep calling this function will give you higher-order derivatives of f, i.e. f |> diff |> diff |> diff |> ...

val diff' : (t -> t) -> t -> t * t

similar to diff, but return (f x, diff f x).

val grad : (t -> t) -> t -> t

gradient of f : (vector -> scalar) at x, reverse ad.

val grad' : (t -> t) -> t -> t * t

similar to grad, but return (f x, grad f x).

val jacobian : (t -> t) -> t -> t

jacobian of f : (vector -> vector) at x, both x and y are row vectors.

val jacobian' : (t -> t) -> t -> t * t

similar to jacobian, but return (f x, jacobian f x)

val jacobianv : (t -> t) -> t -> t -> t

jacobian vector product of f : (vector -> vector) at x along v, forward ad. Namely, it calcultes (jacobian x) v

val jacobianv' : (t -> t) -> t -> t -> t * t

similar to jacobianv', but return (f x, jacobianv f x v)

val jacobianTv : (t -> t) -> t -> t -> t

transposed jacobian vector product of f : (vector -> vector) at x along v, backward ad. Namely, it calculates transpose ((jacobianv f x v)).

val jacobianTv' : (t -> t) -> t -> t -> t * t

similar to jacobianTv, but return (f x, transpose (jacobianv f x v))

val hessian : (t -> t) -> t -> t

hessian of f : (scalar -> scalar) at x.

val hessian' : (t -> t) -> t -> t * t

simiarl to hessian, but return (f x, hessian f x)

val hessianv : (t -> t) -> t -> t -> t

hessian vector product of f : (scalar -> scalar) at x along v. Namely, it calculates (hessian x) v.

val hessianv' : (t -> t) -> t -> t -> t * t

similar to hessianv, but return (f x, hessianv f x v).

val laplacian : (t -> t) -> t -> t

laplacian of f : (scalar -> scalar) at x.

val laplacian' : (t -> t) -> t -> t * t

similar to laplacian, but return (f x, laplacian f x).

val gradhessian : (t -> t) -> t -> t * t

return (grad f x, hessian f x), f : (scalar -> scalar)

val gradhessian' : (t -> t) -> t -> t * t * t

return (f x, grad f x, hessian f x)

val gradhessianv : (t -> t) -> t -> t -> t * t

return (grad f x v, hessian f x v)

val gradhessianv' : (t -> t) -> t -> t -> t * t * t

return (f x, grad f x v, hessian f x v)

include Owl_algodiff_ops_sig.Sig - with type t := t - and type elt := A.elt - and type arr := A.arr - and type op := op
module Builder : - Owl_algodiff_ops_builder_sig.Sig - with type t := t - and type elt := A.elt - and type arr := A.arr - and type op := op
Supported Maths functions
module Maths : sig ... end
Supported Linalg functions
module Linalg : sig ... end
Supported Neural Network functions
module NN : sig ... end
Supported Mat functions
module Mat : sig ... end
Supported Arr functions
module Arr : sig ... end
Helper functions
include Owl_algodiff_graph_convert_sig.Sig with type t := t
val to_trace : t list -> string

to_trace [t0; t1; ...] outputs the trace of computation graph on the terminal in a human-readable format.

val to_dot : t list -> string

to_dot [t0; t1; ...] outputs the trace of computation graph in the dot file format which you can use other tools further visualisation, such as Graphviz.

val pp_num : Stdlib.Format.formatter -> t -> unit

pp_num t pretty prints the abstract number used in Algodiff.

\ No newline at end of file diff --git a/owl-base/Owl_algodiff_generic_sig/.dummy b/owl-base/Owl_algodiff_generic_sig/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl-base/Owl_algodiff_generic_sig/module-type-Sig/A/Linalg/index.html b/owl-base/Owl_algodiff_generic_sig/module-type-Sig/A/Linalg/index.html deleted file mode 100644 index fe3e370c1..000000000 --- a/owl-base/Owl_algodiff_generic_sig/module-type-Sig/A/Linalg/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -Linalg (owl-base.Owl_algodiff_generic_sig.Sig.A.Linalg)

Module A.Linalg

val inv : arr -> arr
val logdet : arr -> elt
val chol : ?upper:bool -> arr -> arr
val svd : ?thin:bool -> arr -> arr * arr * arr
val qr : arr -> arr * arr
val lq : arr -> arr * arr
val sylvester : arr -> arr -> arr -> arr
val lyapunov : arr -> arr -> arr
val discrete_lyapunov : - ?solver:[ `default | `bilinear | `direct ] -> - arr -> - arr -> - arr
val linsolve : ?trans:bool -> ?typ:[ `n | `u | `l ] -> arr -> arr -> arr
val care : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
val dare : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
\ No newline at end of file diff --git a/owl-base/Owl_algodiff_generic_sig/module-type-Sig/A/Mat/index.html b/owl-base/Owl_algodiff_generic_sig/module-type-Sig/A/Mat/index.html deleted file mode 100644 index 1b92bb315..000000000 --- a/owl-base/Owl_algodiff_generic_sig/module-type-Sig/A/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl-base.Owl_algodiff_generic_sig.Sig.A.Mat)

Module A.Mat

val diagm : ?k:int -> arr -> arr
val triu : ?k:int -> arr -> arr
val tril : ?k:int -> arr -> arr
val eye : int -> arr
\ No newline at end of file diff --git a/owl-base/Owl_algodiff_generic_sig/module-type-Sig/A/Scalar/index.html b/owl-base/Owl_algodiff_generic_sig/module-type-Sig/A/Scalar/index.html deleted file mode 100644 index b0ad855ba..000000000 --- a/owl-base/Owl_algodiff_generic_sig/module-type-Sig/A/Scalar/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Scalar (owl-base.Owl_algodiff_generic_sig.Sig.A.Scalar)

Module A.Scalar

val add : elt -> elt -> elt
val sub : elt -> elt -> elt
val mul : elt -> elt -> elt
val div : elt -> elt -> elt
val pow : elt -> elt -> elt
val atan2 : elt -> elt -> elt
val abs : elt -> elt
val neg : elt -> elt
val sqr : elt -> elt
val sqrt : elt -> elt
val exp : elt -> elt
val log : elt -> elt
val log2 : elt -> elt
val log10 : elt -> elt
val signum : elt -> elt
val floor : elt -> elt
val ceil : elt -> elt
val round : elt -> elt
val sin : elt -> elt
val cos : elt -> elt
val tan : elt -> elt
val sinh : elt -> elt
val cosh : elt -> elt
val tanh : elt -> elt
val asin : elt -> elt
val acos : elt -> elt
val atan : elt -> elt
val asinh : elt -> elt
val acosh : elt -> elt
val atanh : elt -> elt
val relu : elt -> elt
val dawsn : elt -> elt
val sigmoid : elt -> elt
\ No newline at end of file diff --git a/owl-base/Owl_algodiff_generic_sig/module-type-Sig/A/index.html b/owl-base/Owl_algodiff_generic_sig/module-type-Sig/A/index.html deleted file mode 100644 index d96dab17f..000000000 --- a/owl-base/Owl_algodiff_generic_sig/module-type-Sig/A/index.html +++ /dev/null @@ -1,158 +0,0 @@ - -A (owl-base.Owl_algodiff_generic_sig.Sig.A)

Module Sig.A

include Owl_types_ndarray_eltcmp.Sig
include Owl_types_ndarray_basic.Sig
type arr
type elt
val empty : int array -> arr
val zeros : int array -> arr
val ones : int array -> arr
val create : int array -> elt -> arr
val sequential : ?a:elt -> ?step:elt -> int array -> arr
val uniform : ?a:elt -> ?b:elt -> int array -> arr
val gaussian : ?mu:elt -> ?sigma:elt -> int array -> arr
val bernoulli : ?p:elt -> int array -> arr
val init : int array -> (int -> elt) -> arr
val init_nd : int array -> (int array -> elt) -> arr
val shape : arr -> int array
val numel : arr -> int
val get : arr -> int array -> elt
val set : arr -> int array -> elt -> unit
val get_slice : int list list -> arr -> arr
val set_slice : int list list -> arr -> arr -> unit
val get_fancy : Owl_types_common.index list -> arr -> arr
val set_fancy : Owl_types_common.index list -> arr -> arr -> unit
val copy : arr -> arr
val copy_ : out:arr -> arr -> unit
val reset : arr -> unit
val reshape : arr -> int array -> arr
val reverse : arr -> arr
val tile : arr -> int array -> arr
val repeat : arr -> int array -> arr
val concatenate : ?axis:int -> arr array -> arr
val stack : ?axis:int -> arr array -> arr
val split : ?axis:int -> int array -> arr -> arr array
val expand : ?hi:bool -> arr -> int -> arr
val squeeze : ?axis:int array -> arr -> arr
val draw : ?axis:int -> arr -> int -> arr * int array
val map : (elt -> elt) -> arr -> arr
val fold : ?axis:int -> (elt -> elt -> elt) -> elt -> arr -> arr
val scan : ?axis:int -> (elt -> elt -> elt) -> arr -> arr
val one_hot : int -> arr -> arr
val pad : ?v:elt -> int list list -> arr -> arr
val print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(elt -> string) -> - arr -> - unit
val abs : arr -> arr
val neg : arr -> arr
val floor : arr -> arr
val ceil : arr -> arr
val round : arr -> arr
val sqr : arr -> arr
val sqrt : arr -> arr
val log : arr -> arr
val log2 : arr -> arr
val log10 : arr -> arr
val exp : arr -> arr
val sin : arr -> arr
val cos : arr -> arr
val tan : arr -> arr
val sinh : arr -> arr
val cosh : arr -> arr
val tanh : arr -> arr
val asin : arr -> arr
val acos : arr -> arr
val atan : arr -> arr
val asinh : arr -> arr
val acosh : arr -> arr
val atanh : arr -> arr
val min : ?axis:int -> ?keep_dims:bool -> arr -> arr
val max : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum_reduce : ?axis:int array -> arr -> arr
val signum : arr -> arr
val sigmoid : arr -> arr
val relu : arr -> arr
val dawsn : arr -> arr
val min' : arr -> elt
val max' : arr -> elt
val sum' : arr -> elt
val log_sum_exp' : arr -> elt
val log_sum_exp : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm' : arr -> elt
val l2norm' : arr -> elt
val l2norm_sqr' : arr -> elt
val clip_by_value : ?amin:elt -> ?amax:elt -> arr -> arr
val clip_by_l2norm : elt -> arr -> arr
val pow : arr -> arr -> arr
val scalar_pow : elt -> arr -> arr
val pow_scalar : arr -> elt -> arr
val atan2 : arr -> arr -> arr
val scalar_atan2 : elt -> arr -> arr
val atan2_scalar : arr -> elt -> arr
val add : arr -> arr -> arr
val sub : arr -> arr -> arr
val mul : arr -> arr -> arr
val div : arr -> arr -> arr
val add_scalar : arr -> elt -> arr
val sub_scalar : arr -> elt -> arr
val mul_scalar : arr -> elt -> arr
val div_scalar : arr -> elt -> arr
val scalar_add : elt -> arr -> arr
val scalar_sub : elt -> arr -> arr
val scalar_mul : elt -> arr -> arr
val scalar_div : elt -> arr -> arr
val fma : arr -> arr -> arr -> arr
val conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val dilated_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val transpose_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val max_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val upsampling2d : arr -> int array -> arr
val conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val dilated_conv1d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv1d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val transpose_conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val max_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val upsampling2d_backward : arr -> int array -> arr -> arr
val row_num : arr -> int
val col_num : arr -> int
val row : arr -> int -> arr
val rows : arr -> int array -> arr
val copy_row_to : arr -> arr -> int -> unit
val copy_col_to : arr -> arr -> int -> unit
val dot : arr -> arr -> arr
val diag : ?k:int -> arr -> arr
val trace : arr -> elt
val transpose : ?axis:int array -> arr -> arr
val to_rows : arr -> arr array
val of_rows : arr array -> arr
val to_cols : arr -> arr array
val of_cols : arr array -> arr
val of_array : elt array -> int array -> arr
val of_arrays : elt array array -> arr
val float_to_elt : float -> elt
val elt_to_float : elt -> float
val elt_equal : arr -> arr -> arr
val elt_not_equal : arr -> arr -> arr
val elt_less : arr -> arr -> arr
val elt_greater : arr -> arr -> arr
val elt_less_equal : arr -> arr -> arr
val elt_greater_equal : arr -> arr -> arr
val elt_equal_scalar : arr -> elt -> arr
val elt_not_equal_scalar : arr -> elt -> arr
val elt_less_scalar : arr -> elt -> arr
val elt_greater_scalar : arr -> elt -> arr
val elt_less_equal_scalar : arr -> elt -> arr
val elt_greater_equal_scalar : arr -> elt -> arr
module Scalar : sig ... end
module Mat : sig ... end
module Linalg : sig ... end
\ No newline at end of file diff --git a/owl-base/Owl_algodiff_generic_sig/module-type-Sig/Arr/index.html b/owl-base/Owl_algodiff_generic_sig/module-type-Sig/Arr/index.html deleted file mode 100644 index e6b24aa15..000000000 --- a/owl-base/Owl_algodiff_generic_sig/module-type-Sig/Arr/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Arr (owl-base.Owl_algodiff_generic_sig.Sig.Arr)

Module Sig.Arr

val empty : int array -> t
val zeros : int array -> t
val ones : int array -> t
val uniform : ?a:A.elt -> ?b:A.elt -> int array -> t
val gaussian : ?mu:A.elt -> ?sigma:A.elt -> int array -> t
val shape : t -> int array
val numel : t -> int
val reset : t -> unit
val reshape : t -> int array -> t
val add : t -> t -> t
val sub : t -> t -> t
val mul : t -> t -> t
val div : t -> t -> t
val dot : t -> t -> t
\ No newline at end of file diff --git a/owl-base/Owl_algodiff_generic_sig/module-type-Sig/Builder/index.html b/owl-base/Owl_algodiff_generic_sig/module-type-Sig/Builder/index.html deleted file mode 100644 index a298de65e..000000000 --- a/owl-base/Owl_algodiff_generic_sig/module-type-Sig/Builder/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Builder (owl-base.Owl_algodiff_generic_sig.Sig.Builder)

Module Sig.Builder

Ops Builder
module type Siso = sig ... end
val build_siso : (module Siso) -> t -> t

build single input single output operations

module type Sipo = sig ... end
val build_sipo : (module Sipo) -> t -> t * t

build single input pair outputs operations

module type Sito = sig ... end
val build_sito : (module Sito) -> t -> t * t * t

build single input triple outputs operations

module type Siao = sig ... end
val build_siao : (module Siao) -> t -> t array

build single input array output operations

module type Piso = sig ... end
val build_piso : (module Piso) -> t -> t -> t

build pair inputs single output operations

module type Aiso = sig ... end
val build_aiso : (module Aiso) -> t array -> t

build array input single output operations

\ No newline at end of file diff --git a/owl-base/Owl_algodiff_generic_sig/module-type-Sig/Builder/module-type-Aiso/index.html b/owl-base/Owl_algodiff_generic_sig/module-type-Sig/Builder/module-type-Aiso/index.html deleted file mode 100644 index 66b5f8b3a..000000000 --- a/owl-base/Owl_algodiff_generic_sig/module-type-Sig/Builder/module-type-Aiso/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Aiso (owl-base.Owl_algodiff_generic_sig.Sig.Builder.Aiso)

Module type Builder.Aiso

val label : string
val ff : t array -> t
val df : int list -> t -> t array -> t array -> t
val dr : int list -> t array -> t -> t Stdlib.ref -> t list
\ No newline at end of file diff --git a/owl-base/Owl_algodiff_generic_sig/module-type-Sig/Builder/module-type-Piso/index.html b/owl-base/Owl_algodiff_generic_sig/module-type-Sig/Builder/module-type-Piso/index.html deleted file mode 100644 index 2253039f2..000000000 --- a/owl-base/Owl_algodiff_generic_sig/module-type-Sig/Builder/module-type-Piso/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Piso (owl-base.Owl_algodiff_generic_sig.Sig.Builder.Piso)

Module type Builder.Piso

val label : string
val ff_aa : A.elt -> A.elt -> t
val ff_ab : A.elt -> A.arr -> t
val ff_ba : A.arr -> A.elt -> t
val ff_bb : A.arr -> A.arr -> t
val df_da : t -> t -> t -> t -> t
val df_db : t -> t -> t -> t -> t
val df_dab : t -> t -> t -> t -> t -> t
val dr_ab : t -> t -> t -> t Stdlib.ref -> t * t
val dr_a : t -> t -> t -> t Stdlib.ref -> t
val dr_b : t -> t -> t -> t Stdlib.ref -> t
\ No newline at end of file diff --git a/owl-base/Owl_algodiff_generic_sig/module-type-Sig/Builder/module-type-Siao/index.html b/owl-base/Owl_algodiff_generic_sig/module-type-Sig/Builder/module-type-Siao/index.html deleted file mode 100644 index 23a394d9e..000000000 --- a/owl-base/Owl_algodiff_generic_sig/module-type-Sig/Builder/module-type-Siao/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Siao (owl-base.Owl_algodiff_generic_sig.Sig.Builder.Siao)

Module type Builder.Siao

val label : string
val ff_f : A.elt -> t array
val ff_arr : A.arr -> t array
val df : t array -> t -> t -> t array
val dr : t -> t -> t Stdlib.ref array -> t Stdlib.ref array -> t
\ No newline at end of file diff --git a/owl-base/Owl_algodiff_generic_sig/module-type-Sig/Builder/module-type-Sipo/index.html b/owl-base/Owl_algodiff_generic_sig/module-type-Sig/Builder/module-type-Sipo/index.html deleted file mode 100644 index f0f5d57eb..000000000 --- a/owl-base/Owl_algodiff_generic_sig/module-type-Sig/Builder/module-type-Sipo/index.html +++ /dev/null @@ -1,7 +0,0 @@ - -Sipo (owl-base.Owl_algodiff_generic_sig.Sig.Builder.Sipo)

Module type Builder.Sipo

val label : string
val ff_f : A.elt -> t * t
val ff_arr : A.arr -> t * t
val df : t -> t -> t -> t
val dr : - t -> - t -> - (t Stdlib.ref * t Stdlib.ref) -> - (t Stdlib.ref * t Stdlib.ref) -> - t
\ No newline at end of file diff --git a/owl-base/Owl_algodiff_generic_sig/module-type-Sig/Builder/module-type-Siso/index.html b/owl-base/Owl_algodiff_generic_sig/module-type-Sig/Builder/module-type-Siso/index.html deleted file mode 100644 index 0098d6b47..000000000 --- a/owl-base/Owl_algodiff_generic_sig/module-type-Sig/Builder/module-type-Siso/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Siso (owl-base.Owl_algodiff_generic_sig.Sig.Builder.Siso)

Module type Builder.Siso

val label : string
val ff_f : A.elt -> t
val ff_arr : A.arr -> t
val df : t -> t -> t -> t
val dr : t -> t -> t Stdlib.ref -> t
\ No newline at end of file diff --git a/owl-base/Owl_algodiff_generic_sig/module-type-Sig/Builder/module-type-Sito/index.html b/owl-base/Owl_algodiff_generic_sig/module-type-Sig/Builder/module-type-Sito/index.html deleted file mode 100644 index c5fc17dc7..000000000 --- a/owl-base/Owl_algodiff_generic_sig/module-type-Sig/Builder/module-type-Sito/index.html +++ /dev/null @@ -1,7 +0,0 @@ - -Sito (owl-base.Owl_algodiff_generic_sig.Sig.Builder.Sito)

Module type Builder.Sito

val label : string
val ff_f : A.elt -> t * t * t
val ff_arr : A.arr -> t * t * t
val df : t -> t -> t -> t
val dr : - t -> - t -> - (t Stdlib.ref * t Stdlib.ref * t Stdlib.ref) -> - (t Stdlib.ref * t Stdlib.ref * t Stdlib.ref) -> - t
\ No newline at end of file diff --git a/owl-base/Owl_algodiff_generic_sig/module-type-Sig/Linalg/index.html b/owl-base/Owl_algodiff_generic_sig/module-type-Sig/Linalg/index.html deleted file mode 100644 index 773be344c..000000000 --- a/owl-base/Owl_algodiff_generic_sig/module-type-Sig/Linalg/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -Linalg (owl-base.Owl_algodiff_generic_sig.Sig.Linalg)

Module Sig.Linalg

val inv : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val logdet : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val chol : ?upper:bool -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val qr : t -> t * t

Refer to :doc:`owl_dense_ndarray_generic`

val lq : t -> t * t

Refer to :doc:`owl_dense_ndarray_generic`

val svd : ?thin:bool -> t -> t * t * t

Refer to :doc:`owl_dense_ndarray_generic`

val sylvester : t -> t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val lyapunov : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val discrete_lyapunov : - ?solver:[ `default | `bilinear | `direct ] -> - t -> - t -> - t

Refer to :doc:`owl_dense_ndarray_generic`

val (/@) : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val linsolve : ?trans:bool -> ?typ:[ `n | `u | `l ] -> t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val care : ?diag_r:bool -> t -> t -> t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val dare : ?diag_r:bool -> t -> t -> t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

\ No newline at end of file diff --git a/owl-base/Owl_algodiff_generic_sig/module-type-Sig/Mat/index.html b/owl-base/Owl_algodiff_generic_sig/module-type-Sig/Mat/index.html deleted file mode 100644 index 6e5d83735..000000000 --- a/owl-base/Owl_algodiff_generic_sig/module-type-Sig/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl-base.Owl_algodiff_generic_sig.Sig.Mat)

Module Sig.Mat

val empty : int -> int -> t
val zeros : int -> int -> t
val eye : int -> t
val ones : int -> int -> t
val uniform : ?a:A.elt -> ?b:A.elt -> int -> int -> t
val gaussian : ?mu:A.elt -> ?sigma:A.elt -> int -> int -> t
val shape : t -> int * int
val numel : t -> int
val row_num : t -> int
val col_num : t -> int
val reset : t -> unit
val reshape : int -> int -> t -> t
val get : t -> int -> int -> t
val set : t -> int -> int -> t -> t
val row : t -> int -> t
val mean : t -> t
val add : t -> t -> t
val sub : t -> t -> t
val mul : t -> t -> t
val div : t -> t -> t
val dot : t -> t -> t
val map_by_row : (t -> t) -> t -> t
val of_arrays : A.elt array array -> t
val init_2d : int -> int -> (int -> int -> t) -> t
val print : t -> unit
\ No newline at end of file diff --git a/owl-base/Owl_algodiff_generic_sig/module-type-Sig/Maths/index.html b/owl-base/Owl_algodiff_generic_sig/module-type-Sig/Maths/index.html deleted file mode 100644 index b73e8a67a..000000000 --- a/owl-base/Owl_algodiff_generic_sig/module-type-Sig/Maths/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Maths (owl-base.Owl_algodiff_generic_sig.Sig.Maths)

Module Sig.Maths

val (+) : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val (-) : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val (*) : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val (/) : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val (*@) : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val (**) : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val add : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val sub : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val mul : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val div : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val kron : t -> t -> t

Refer to :doc:`owl_dense_matrix_generic`

val dot : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val pow : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val atan2 : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val min2 : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val max2 : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val cross_entropy : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val inv : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val neg : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val abs : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val signum : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val floor : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val ceil : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val round : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val sqr : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val sqrt : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val log : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val log2 : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val log10 : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val exp : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val sin : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val cos : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val tan : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val sinh : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val cosh : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val tanh : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val asin : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val acos : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val atan : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val asinh : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val acosh : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val atanh : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val sum' : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val log_sum_exp' : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val log_sum_exp : ?axis:int -> ?keep_dims:bool -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val sum : ?axis:int -> ?keep_dims:bool -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val sum_reduce : ?axis:int array -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val mean : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val transpose : ?axis:int array -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val swap : int -> int -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val l1norm' : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val l2norm' : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val l2norm_sqr' : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val sigmoid : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val relu : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val dawsn : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val softplus : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val softsign : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val softmax : ?axis:int -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val reshape : t -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val flatten : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val get_item : t -> int -> int -> t

Refer to :doc:`owl_dense_ndarray_generic`

val get_row : t -> int -> t

Refer to :doc:`owl_dense_ndarray_generic`

val concat : axis:int -> t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val split : axis:int -> int array -> t -> t array

Refer to :doc:`owl_dense_ndarray_generic`

val of_arrays : t array array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val to_arrays : t -> t array array

Refer to :doc:`owl_dense_ndarray_generic`

val concatenate : axis:int -> t array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val stack : axis:int -> t array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val get_slice : int list list -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val set_slice : int list list -> t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val get_fancy : Owl_types.index list -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val set_fancy : Owl_types.index list -> t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val diag : ?k:int -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val diagm : ?k:int -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val trace : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val triu : ?k:int -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val tril : ?k:int -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

\ No newline at end of file diff --git a/owl-base/Owl_algodiff_generic_sig/module-type-Sig/NN/index.html b/owl-base/Owl_algodiff_generic_sig/module-type-Sig/NN/index.html deleted file mode 100644 index ed70959a2..000000000 --- a/owl-base/Owl_algodiff_generic_sig/module-type-Sig/NN/index.html +++ /dev/null @@ -1,20 +0,0 @@ - -NN (owl-base.Owl_algodiff_generic_sig.Sig.NN)

Module Sig.NN

val dropout : ?rate:float -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val conv1d : ?padding:Owl_types.padding -> t -> t -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val conv2d : ?padding:Owl_types.padding -> t -> t -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val conv3d : ?padding:Owl_types.padding -> t -> t -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val dilated_conv1d : - ?padding:Owl_types.padding -> - t -> - t -> - int array -> - int array -> - t

Refer to :doc:`owl_dense_ndarray_generic`

val dilated_conv2d : - ?padding:Owl_types.padding -> - t -> - t -> - int array -> - int array -> - t

Refer to :doc:`owl_dense_ndarray_generic`

val dilated_conv3d : - ?padding:Owl_types.padding -> - t -> - t -> - int array -> - int array -> - t

Refer to :doc:`owl_dense_ndarray_generic`

val transpose_conv1d : ?padding:Owl_types.padding -> t -> t -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val transpose_conv2d : ?padding:Owl_types.padding -> t -> t -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val transpose_conv3d : ?padding:Owl_types.padding -> t -> t -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val max_pool1d : Owl_types.padding -> t -> int array -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val max_pool2d : Owl_types.padding -> t -> int array -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val max_pool3d : Owl_types.padding -> t -> int array -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val avg_pool1d : Owl_types.padding -> t -> int array -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val avg_pool2d : Owl_types.padding -> t -> int array -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val avg_pool3d : Owl_types.padding -> t -> int array -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val upsampling2d : t -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val pad : ?v:A.elt -> int list list -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

\ No newline at end of file diff --git a/owl-base/Owl_algodiff_generic_sig/module-type-Sig/index.html b/owl-base/Owl_algodiff_generic_sig/module-type-Sig/index.html deleted file mode 100644 index 860ba545a..000000000 --- a/owl-base/Owl_algodiff_generic_sig/module-type-Sig/index.html +++ /dev/null @@ -1,11 +0,0 @@ - -Sig (owl-base.Owl_algodiff_generic_sig.Sig)

Module type Owl_algodiff_generic_sig.Sig

include Owl_algodiff_core_sig.Sig
Type definition
include Owl_algodiff_types_sig.Sig with type elt := A.elt and type arr := A.arr
type t =
  1. | F of A.elt
  2. | Arr of A.arr
  3. | DF of t * t * int
  4. | DR of t * t Stdlib.ref * op * int Stdlib.ref * int * int Stdlib.ref
and adjoint = t -> t Stdlib.ref -> (t * t) list -> (t * t) list
and register = t list -> t list
and label = string * t list
and op = adjoint * register * label
Core functions
val tag : unit -> int

TODO

val primal : t -> t

TODO

val primal' : t -> t

TODO

val zero : t -> t

TODO

val reset_zero : t -> t

TODO

val tangent : t -> t

TODO

val adjref : t -> t Stdlib.ref

TODO

val adjval : t -> t

TODO

val shape : t -> int array

TODO

val is_float : t -> bool

TODO

val is_arr : t -> bool

TODO

val row_num : t -> int

number of rows

val col_num : t -> int

number of columns

val numel : t -> int

number of elements

val clip_by_value : amin:A.elt -> amax:A.elt -> t -> t

other functions, without tracking gradient

val clip_by_l2norm : A.elt -> t -> t

other functions, without tracking gradient

val copy_primal' : t -> t

TODO

val tile : t -> int array -> t

TODO

val repeat : t -> int array -> t

TODO

val pack_elt : A.elt -> t

convert from elt type to t type.

val unpack_elt : t -> A.elt

convert from t type to elt type.

val pack_flt : float -> t

convert from float type to t type.

val _f : float -> t

A shortcut function for F A.(float_to_elt x).

val unpack_flt : t -> float

convert from t type to float type.

val pack_arr : A.arr -> t

convert from arr type to t type.

val unpack_arr : t -> A.arr

convert from t type to arr type.

val deep_info : t -> string

TODO

val type_info : t -> string

TODO

val error_binop : string -> t -> t -> 'a

TODO

val error_uniop : string -> t -> 'a

TODO

val make_forward : t -> t -> int -> t

TODO

val make_reverse : t -> int -> t

TODO

val reverse_prop : t -> t -> unit

TODO

val diff : (t -> t) -> t -> t

diff f x returns the exat derivative of a function f : scalar -> scalar at point x. Simply calling diff f will return its derivative function g of the same type, i.e. g : scalar -> scalar.

Keep calling this function will give you higher-order derivatives of f, i.e. f |> diff |> diff |> diff |> ...

val diff' : (t -> t) -> t -> t * t

similar to diff, but return (f x, diff f x).

val grad : (t -> t) -> t -> t

gradient of f : (vector -> scalar) at x, reverse ad.

val grad' : (t -> t) -> t -> t * t

similar to grad, but return (f x, grad f x).

val jacobian : (t -> t) -> t -> t

jacobian of f : (vector -> vector) at x, both x and y are row vectors.

val jacobian' : (t -> t) -> t -> t * t

similar to jacobian, but return (f x, jacobian f x)

val jacobianv : (t -> t) -> t -> t -> t

jacobian vector product of f : (vector -> vector) at x along v, forward ad. Namely, it calcultes (jacobian x) v

val jacobianv' : (t -> t) -> t -> t -> t * t

similar to jacobianv', but return (f x, jacobianv f x v)

val jacobianTv : (t -> t) -> t -> t -> t

transposed jacobian vector product of f : (vector -> vector) at x along v, backward ad. Namely, it calculates transpose ((jacobianv f x v)).

val jacobianTv' : (t -> t) -> t -> t -> t * t

similar to jacobianTv, but return (f x, transpose (jacobianv f x v))

val hessian : (t -> t) -> t -> t

hessian of f : (scalar -> scalar) at x.

val hessian' : (t -> t) -> t -> t * t

simiarl to hessian, but return (f x, hessian f x)

val hessianv : (t -> t) -> t -> t -> t

hessian vector product of f : (scalar -> scalar) at x along v. Namely, it calculates (hessian x) v.

val hessianv' : (t -> t) -> t -> t -> t * t

similar to hessianv, but return (f x, hessianv f x v).

val laplacian : (t -> t) -> t -> t

laplacian of f : (scalar -> scalar) at x.

val laplacian' : (t -> t) -> t -> t * t

similar to laplacian, but return (f x, laplacian f x).

val gradhessian : (t -> t) -> t -> t * t

return (grad f x, hessian f x), f : (scalar -> scalar)

val gradhessian' : (t -> t) -> t -> t * t * t

return (f x, grad f x, hessian f x)

val gradhessianv : (t -> t) -> t -> t -> t * t

return (grad f x v, hessian f x v)

val gradhessianv' : (t -> t) -> t -> t -> t * t * t

return (f x, grad f x v, hessian f x v)

include Owl_algodiff_ops_sig.Sig - with type t := t - and type elt := A.elt - and type arr := A.arr - and type op := op
module Builder : - Owl_algodiff_ops_builder_sig.Sig - with type t := t - and type elt := A.elt - and type arr := A.arr - and type op := op
Supported Maths functions
module Maths : sig ... end
Supported Linalg functions
module Linalg : sig ... end
Supported Neural Network functions
module NN : sig ... end
Supported Mat functions
module Mat : sig ... end
Supported Arr functions
module Arr : sig ... end
Helper functions
include Owl_algodiff_graph_convert_sig.Sig with type t := t
val to_trace : t list -> string

to_trace [t0; t1; ...] outputs the trace of computation graph on the terminal in a human-readable format.

val to_dot : t list -> string

to_dot [t0; t1; ...] outputs the trace of computation graph in the dot file format which you can use other tools further visualisation, such as Graphviz.

val pp_num : Stdlib.Format.formatter -> t -> unit

pp_num t pretty prints the abstract number used in Algodiff.

\ No newline at end of file diff --git a/owl-base/Owl_algodiff_graph_convert/.dummy b/owl-base/Owl_algodiff_graph_convert/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl-base/Owl_algodiff_graph_convert/Make/argument-1-Core/A/Linalg/index.html b/owl-base/Owl_algodiff_graph_convert/Make/argument-1-Core/A/Linalg/index.html deleted file mode 100644 index ed429f461..000000000 --- a/owl-base/Owl_algodiff_graph_convert/Make/argument-1-Core/A/Linalg/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -Linalg (owl-base.Owl_algodiff_graph_convert.Make.Core.A.Linalg)

Module A.Linalg

val inv : arr -> arr
val logdet : arr -> elt
val chol : ?upper:bool -> arr -> arr
val svd : ?thin:bool -> arr -> arr * arr * arr
val qr : arr -> arr * arr
val lq : arr -> arr * arr
val sylvester : arr -> arr -> arr -> arr
val lyapunov : arr -> arr -> arr
val discrete_lyapunov : - ?solver:[ `default | `bilinear | `direct ] -> - arr -> - arr -> - arr
val linsolve : ?trans:bool -> ?typ:[ `n | `u | `l ] -> arr -> arr -> arr
val care : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
val dare : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
\ No newline at end of file diff --git a/owl-base/Owl_algodiff_graph_convert/Make/argument-1-Core/A/Mat/index.html b/owl-base/Owl_algodiff_graph_convert/Make/argument-1-Core/A/Mat/index.html deleted file mode 100644 index f94c292da..000000000 --- a/owl-base/Owl_algodiff_graph_convert/Make/argument-1-Core/A/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl-base.Owl_algodiff_graph_convert.Make.Core.A.Mat)

Module A.Mat

val diagm : ?k:int -> arr -> arr
val triu : ?k:int -> arr -> arr
val tril : ?k:int -> arr -> arr
val eye : int -> arr
\ No newline at end of file diff --git a/owl-base/Owl_algodiff_graph_convert/Make/argument-1-Core/A/Scalar/index.html b/owl-base/Owl_algodiff_graph_convert/Make/argument-1-Core/A/Scalar/index.html deleted file mode 100644 index f99213a05..000000000 --- a/owl-base/Owl_algodiff_graph_convert/Make/argument-1-Core/A/Scalar/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Scalar (owl-base.Owl_algodiff_graph_convert.Make.Core.A.Scalar)

Module A.Scalar

val add : elt -> elt -> elt
val sub : elt -> elt -> elt
val mul : elt -> elt -> elt
val div : elt -> elt -> elt
val pow : elt -> elt -> elt
val atan2 : elt -> elt -> elt
val abs : elt -> elt
val neg : elt -> elt
val sqr : elt -> elt
val sqrt : elt -> elt
val exp : elt -> elt
val log : elt -> elt
val log2 : elt -> elt
val log10 : elt -> elt
val signum : elt -> elt
val floor : elt -> elt
val ceil : elt -> elt
val round : elt -> elt
val sin : elt -> elt
val cos : elt -> elt
val tan : elt -> elt
val sinh : elt -> elt
val cosh : elt -> elt
val tanh : elt -> elt
val asin : elt -> elt
val acos : elt -> elt
val atan : elt -> elt
val asinh : elt -> elt
val acosh : elt -> elt
val atanh : elt -> elt
val relu : elt -> elt
val dawsn : elt -> elt
val sigmoid : elt -> elt
\ No newline at end of file diff --git a/owl-base/Owl_algodiff_graph_convert/Make/argument-1-Core/A/index.html b/owl-base/Owl_algodiff_graph_convert/Make/argument-1-Core/A/index.html deleted file mode 100644 index 9e5e67f69..000000000 --- a/owl-base/Owl_algodiff_graph_convert/Make/argument-1-Core/A/index.html +++ /dev/null @@ -1,158 +0,0 @@ - -A (owl-base.Owl_algodiff_graph_convert.Make.Core.A)

Module Core.A

include Owl_types_ndarray_eltcmp.Sig
include Owl_types_ndarray_basic.Sig
type arr
type elt
val empty : int array -> arr
val zeros : int array -> arr
val ones : int array -> arr
val create : int array -> elt -> arr
val sequential : ?a:elt -> ?step:elt -> int array -> arr
val uniform : ?a:elt -> ?b:elt -> int array -> arr
val gaussian : ?mu:elt -> ?sigma:elt -> int array -> arr
val bernoulli : ?p:elt -> int array -> arr
val init : int array -> (int -> elt) -> arr
val init_nd : int array -> (int array -> elt) -> arr
val shape : arr -> int array
val numel : arr -> int
val get : arr -> int array -> elt
val set : arr -> int array -> elt -> unit
val get_slice : int list list -> arr -> arr
val set_slice : int list list -> arr -> arr -> unit
val get_fancy : Owl_types_common.index list -> arr -> arr
val set_fancy : Owl_types_common.index list -> arr -> arr -> unit
val copy : arr -> arr
val copy_ : out:arr -> arr -> unit
val reset : arr -> unit
val reshape : arr -> int array -> arr
val reverse : arr -> arr
val tile : arr -> int array -> arr
val repeat : arr -> int array -> arr
val concatenate : ?axis:int -> arr array -> arr
val stack : ?axis:int -> arr array -> arr
val split : ?axis:int -> int array -> arr -> arr array
val expand : ?hi:bool -> arr -> int -> arr
val squeeze : ?axis:int array -> arr -> arr
val draw : ?axis:int -> arr -> int -> arr * int array
val map : (elt -> elt) -> arr -> arr
val fold : ?axis:int -> (elt -> elt -> elt) -> elt -> arr -> arr
val scan : ?axis:int -> (elt -> elt -> elt) -> arr -> arr
val one_hot : int -> arr -> arr
val pad : ?v:elt -> int list list -> arr -> arr
val print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(elt -> string) -> - arr -> - unit
val abs : arr -> arr
val neg : arr -> arr
val floor : arr -> arr
val ceil : arr -> arr
val round : arr -> arr
val sqr : arr -> arr
val sqrt : arr -> arr
val log : arr -> arr
val log2 : arr -> arr
val log10 : arr -> arr
val exp : arr -> arr
val sin : arr -> arr
val cos : arr -> arr
val tan : arr -> arr
val sinh : arr -> arr
val cosh : arr -> arr
val tanh : arr -> arr
val asin : arr -> arr
val acos : arr -> arr
val atan : arr -> arr
val asinh : arr -> arr
val acosh : arr -> arr
val atanh : arr -> arr
val min : ?axis:int -> ?keep_dims:bool -> arr -> arr
val max : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum_reduce : ?axis:int array -> arr -> arr
val signum : arr -> arr
val sigmoid : arr -> arr
val relu : arr -> arr
val dawsn : arr -> arr
val min' : arr -> elt
val max' : arr -> elt
val sum' : arr -> elt
val log_sum_exp' : arr -> elt
val log_sum_exp : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm' : arr -> elt
val l2norm' : arr -> elt
val l2norm_sqr' : arr -> elt
val clip_by_value : ?amin:elt -> ?amax:elt -> arr -> arr
val clip_by_l2norm : elt -> arr -> arr
val pow : arr -> arr -> arr
val scalar_pow : elt -> arr -> arr
val pow_scalar : arr -> elt -> arr
val atan2 : arr -> arr -> arr
val scalar_atan2 : elt -> arr -> arr
val atan2_scalar : arr -> elt -> arr
val add : arr -> arr -> arr
val sub : arr -> arr -> arr
val mul : arr -> arr -> arr
val div : arr -> arr -> arr
val add_scalar : arr -> elt -> arr
val sub_scalar : arr -> elt -> arr
val mul_scalar : arr -> elt -> arr
val div_scalar : arr -> elt -> arr
val scalar_add : elt -> arr -> arr
val scalar_sub : elt -> arr -> arr
val scalar_mul : elt -> arr -> arr
val scalar_div : elt -> arr -> arr
val fma : arr -> arr -> arr -> arr
val conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val dilated_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val transpose_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val max_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val upsampling2d : arr -> int array -> arr
val conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val dilated_conv1d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv1d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val transpose_conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val max_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val upsampling2d_backward : arr -> int array -> arr -> arr
val row_num : arr -> int
val col_num : arr -> int
val row : arr -> int -> arr
val rows : arr -> int array -> arr
val copy_row_to : arr -> arr -> int -> unit
val copy_col_to : arr -> arr -> int -> unit
val dot : arr -> arr -> arr
val diag : ?k:int -> arr -> arr
val trace : arr -> elt
val transpose : ?axis:int array -> arr -> arr
val to_rows : arr -> arr array
val of_rows : arr array -> arr
val to_cols : arr -> arr array
val of_cols : arr array -> arr
val of_array : elt array -> int array -> arr
val of_arrays : elt array array -> arr
val float_to_elt : float -> elt
val elt_to_float : elt -> float
val elt_equal : arr -> arr -> arr
val elt_not_equal : arr -> arr -> arr
val elt_less : arr -> arr -> arr
val elt_greater : arr -> arr -> arr
val elt_less_equal : arr -> arr -> arr
val elt_greater_equal : arr -> arr -> arr
val elt_equal_scalar : arr -> elt -> arr
val elt_not_equal_scalar : arr -> elt -> arr
val elt_less_scalar : arr -> elt -> arr
val elt_greater_scalar : arr -> elt -> arr
val elt_less_equal_scalar : arr -> elt -> arr
val elt_greater_equal_scalar : arr -> elt -> arr
module Scalar : sig ... end
module Mat : sig ... end
module Linalg : sig ... end
\ No newline at end of file diff --git a/owl-base/Owl_algodiff_graph_convert/Make/argument-1-Core/index.html b/owl-base/Owl_algodiff_graph_convert/Make/argument-1-Core/index.html deleted file mode 100644 index e7441c68f..000000000 --- a/owl-base/Owl_algodiff_graph_convert/Make/argument-1-Core/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Core (owl-base.Owl_algodiff_graph_convert.Make.Core)

Parameter Make.Core

Type definition
include Owl_algodiff_types_sig.Sig with type elt := A.elt and type arr := A.arr
type t =
  1. | F of A.elt
  2. | Arr of A.arr
  3. | DF of t * t * int
  4. | DR of t * t Stdlib.ref * op * int Stdlib.ref * int * int Stdlib.ref
and adjoint = t -> t Stdlib.ref -> (t * t) list -> (t * t) list
and register = t list -> t list
and label = string * t list
and op = adjoint * register * label
Core functions
val tag : unit -> int

TODO

val primal : t -> t

TODO

val primal' : t -> t

TODO

val zero : t -> t

TODO

val reset_zero : t -> t

TODO

val tangent : t -> t

TODO

val adjref : t -> t Stdlib.ref

TODO

val adjval : t -> t

TODO

val shape : t -> int array

TODO

val is_float : t -> bool

TODO

val is_arr : t -> bool

TODO

val row_num : t -> int

number of rows

val col_num : t -> int

number of columns

val numel : t -> int

number of elements

val clip_by_value : amin:A.elt -> amax:A.elt -> t -> t

other functions, without tracking gradient

val clip_by_l2norm : A.elt -> t -> t

other functions, without tracking gradient

val copy_primal' : t -> t

TODO

val tile : t -> int array -> t

TODO

val repeat : t -> int array -> t

TODO

val pack_elt : A.elt -> t

convert from elt type to t type.

val unpack_elt : t -> A.elt

convert from t type to elt type.

val pack_flt : float -> t

convert from float type to t type.

val _f : float -> t

A shortcut function for F A.(float_to_elt x).

val unpack_flt : t -> float

convert from t type to float type.

val pack_arr : A.arr -> t

convert from arr type to t type.

val unpack_arr : t -> A.arr

convert from t type to arr type.

val deep_info : t -> string

TODO

val type_info : t -> string

TODO

val error_binop : string -> t -> t -> 'a

TODO

val error_uniop : string -> t -> 'a

TODO

\ No newline at end of file diff --git a/owl-base/Owl_algodiff_graph_convert/Make/index.html b/owl-base/Owl_algodiff_graph_convert/Make/index.html deleted file mode 100644 index a64faa2bd..000000000 --- a/owl-base/Owl_algodiff_graph_convert/Make/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Make (owl-base.Owl_algodiff_graph_convert.Make)

Module Owl_algodiff_graph_convert.Make

Parameters

Signature

val to_trace : Core.t list -> string

to_trace [t0; t1; ...] outputs the trace of computation graph on the terminal in a human-readable format.

val to_dot : Core.t list -> string

to_dot [t0; t1; ...] outputs the trace of computation graph in the dot file format which you can use other tools further visualisation, such as Graphviz.

val pp_num : Stdlib.Format.formatter -> Core.t -> unit

pp_num t pretty prints the abstract number used in Algodiff.

\ No newline at end of file diff --git a/owl-base/Owl_algodiff_graph_convert_sig/.dummy b/owl-base/Owl_algodiff_graph_convert_sig/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl-base/Owl_algodiff_graph_convert_sig/module-type-Sig/index.html b/owl-base/Owl_algodiff_graph_convert_sig/module-type-Sig/index.html deleted file mode 100644 index fec25de5a..000000000 --- a/owl-base/Owl_algodiff_graph_convert_sig/module-type-Sig/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Sig (owl-base.Owl_algodiff_graph_convert_sig.Sig)

Module type Owl_algodiff_graph_convert_sig.Sig

type t
val to_trace : t list -> string

to_trace [t0; t1; ...] outputs the trace of computation graph on the terminal in a human-readable format.

val to_dot : t list -> string

to_dot [t0; t1; ...] outputs the trace of computation graph in the dot file format which you can use other tools further visualisation, such as Graphviz.

val pp_num : Stdlib.Format.formatter -> t -> unit

pp_num t pretty prints the abstract number used in Algodiff.

\ No newline at end of file diff --git a/owl-base/Owl_algodiff_ops/.dummy b/owl-base/Owl_algodiff_ops/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl-base/Owl_algodiff_ops/Make/Arr/index.html b/owl-base/Owl_algodiff_ops/Make/Arr/index.html deleted file mode 100644 index 3b775a973..000000000 --- a/owl-base/Owl_algodiff_ops/Make/Arr/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Arr (owl-base.Owl_algodiff_ops.Make.Arr)

Module Make.Arr

val empty : int array -> Core.t
val zeros : int array -> Core.t
val ones : int array -> Core.t
val uniform : ?a:Core.A.elt -> ?b:Core.A.elt -> int array -> Core.t
val gaussian : ?mu:Core.A.elt -> ?sigma:Core.A.elt -> int array -> Core.t
val shape : Core.t -> int array
val numel : Core.t -> int
val reset : Core.t -> unit
val reshape : Core.t -> int array -> Core.t
val add : Core.t -> Core.t -> Core.t
val sub : Core.t -> Core.t -> Core.t
val mul : Core.t -> Core.t -> Core.t
val div : Core.t -> Core.t -> Core.t
val dot : Core.t -> Core.t -> Core.t
\ No newline at end of file diff --git a/owl-base/Owl_algodiff_ops/Make/Builder/index.html b/owl-base/Owl_algodiff_ops/Make/Builder/index.html deleted file mode 100644 index 33be8bb68..000000000 --- a/owl-base/Owl_algodiff_ops/Make/Builder/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Builder (owl-base.Owl_algodiff_ops.Make.Builder)

Module Make.Builder

Ops Builder
module type Siso = sig ... end
val build_siso : (module Siso) -> Core.t -> Core.t

build single input single output operations

module type Sipo = sig ... end
val build_sipo : (module Sipo) -> Core.t -> Core.t * Core.t

build single input pair outputs operations

module type Sito = sig ... end
val build_sito : (module Sito) -> Core.t -> Core.t * Core.t * Core.t

build single input triple outputs operations

module type Siao = sig ... end
val build_siao : (module Siao) -> Core.t -> Core.t array

build single input array output operations

module type Piso = sig ... end
val build_piso : (module Piso) -> Core.t -> Core.t -> Core.t

build pair inputs single output operations

module type Aiso = sig ... end
val build_aiso : (module Aiso) -> Core.t array -> Core.t

build array input single output operations

\ No newline at end of file diff --git a/owl-base/Owl_algodiff_ops/Make/Builder/module-type-Aiso/index.html b/owl-base/Owl_algodiff_ops/Make/Builder/module-type-Aiso/index.html deleted file mode 100644 index 6069df4de..000000000 --- a/owl-base/Owl_algodiff_ops/Make/Builder/module-type-Aiso/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Aiso (owl-base.Owl_algodiff_ops.Make.Builder.Aiso)

Module type Builder.Aiso

val label : string
val ff : Core.t array -> Core.t
val df : int list -> Core.t -> Core.t array -> Core.t array -> Core.t
val dr : int list -> Core.t array -> Core.t -> Core.t Stdlib.ref -> Core.t list
\ No newline at end of file diff --git a/owl-base/Owl_algodiff_ops/Make/Builder/module-type-Piso/index.html b/owl-base/Owl_algodiff_ops/Make/Builder/module-type-Piso/index.html deleted file mode 100644 index 420fae291..000000000 --- a/owl-base/Owl_algodiff_ops/Make/Builder/module-type-Piso/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Piso (owl-base.Owl_algodiff_ops.Make.Builder.Piso)

Module type Builder.Piso

val label : string
val ff_aa : Core.A.elt -> Core.A.elt -> Core.t
val ff_ab : Core.A.elt -> Core.A.arr -> Core.t
val ff_ba : Core.A.arr -> Core.A.elt -> Core.t
val ff_bb : Core.A.arr -> Core.A.arr -> Core.t
val df_da : Core.t -> Core.t -> Core.t -> Core.t -> Core.t
val df_db : Core.t -> Core.t -> Core.t -> Core.t -> Core.t
val df_dab : Core.t -> Core.t -> Core.t -> Core.t -> Core.t -> Core.t
val dr_ab : Core.t -> Core.t -> Core.t -> Core.t Stdlib.ref -> Core.t * Core.t
val dr_a : Core.t -> Core.t -> Core.t -> Core.t Stdlib.ref -> Core.t
val dr_b : Core.t -> Core.t -> Core.t -> Core.t Stdlib.ref -> Core.t
\ No newline at end of file diff --git a/owl-base/Owl_algodiff_ops/Make/Builder/module-type-Siao/index.html b/owl-base/Owl_algodiff_ops/Make/Builder/module-type-Siao/index.html deleted file mode 100644 index 2538033d0..000000000 --- a/owl-base/Owl_algodiff_ops/Make/Builder/module-type-Siao/index.html +++ /dev/null @@ -1,7 +0,0 @@ - -Siao (owl-base.Owl_algodiff_ops.Make.Builder.Siao)

Module type Builder.Siao

val label : string
val ff_f : Core.A.elt -> Core.t array
val ff_arr : Core.A.arr -> Core.t array
val df : Core.t array -> Core.t -> Core.t -> Core.t array
val dr : - Core.t -> - Core.t -> - Core.t Stdlib.ref array -> - Core.t Stdlib.ref array -> - Core.t
\ No newline at end of file diff --git a/owl-base/Owl_algodiff_ops/Make/Builder/module-type-Sipo/index.html b/owl-base/Owl_algodiff_ops/Make/Builder/module-type-Sipo/index.html deleted file mode 100644 index 5c1e4f836..000000000 --- a/owl-base/Owl_algodiff_ops/Make/Builder/module-type-Sipo/index.html +++ /dev/null @@ -1,7 +0,0 @@ - -Sipo (owl-base.Owl_algodiff_ops.Make.Builder.Sipo)

Module type Builder.Sipo

val label : string
val ff_f : Core.A.elt -> Core.t * Core.t
val ff_arr : Core.A.arr -> Core.t * Core.t
val df : Core.t -> Core.t -> Core.t -> Core.t
val dr : - Core.t -> - Core.t -> - (Core.t Stdlib.ref * Core.t Stdlib.ref) -> - (Core.t Stdlib.ref * Core.t Stdlib.ref) -> - Core.t
\ No newline at end of file diff --git a/owl-base/Owl_algodiff_ops/Make/Builder/module-type-Siso/index.html b/owl-base/Owl_algodiff_ops/Make/Builder/module-type-Siso/index.html deleted file mode 100644 index a0f877a7d..000000000 --- a/owl-base/Owl_algodiff_ops/Make/Builder/module-type-Siso/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Siso (owl-base.Owl_algodiff_ops.Make.Builder.Siso)

Module type Builder.Siso

val label : string
val ff_f : Core.A.elt -> Core.t
val ff_arr : Core.A.arr -> Core.t
val df : Core.t -> Core.t -> Core.t -> Core.t
val dr : Core.t -> Core.t -> Core.t Stdlib.ref -> Core.t
\ No newline at end of file diff --git a/owl-base/Owl_algodiff_ops/Make/Builder/module-type-Sito/index.html b/owl-base/Owl_algodiff_ops/Make/Builder/module-type-Sito/index.html deleted file mode 100644 index 17039bec4..000000000 --- a/owl-base/Owl_algodiff_ops/Make/Builder/module-type-Sito/index.html +++ /dev/null @@ -1,7 +0,0 @@ - -Sito (owl-base.Owl_algodiff_ops.Make.Builder.Sito)

Module type Builder.Sito

val label : string
val ff_f : Core.A.elt -> Core.t * Core.t * Core.t
val ff_arr : Core.A.arr -> Core.t * Core.t * Core.t
val df : Core.t -> Core.t -> Core.t -> Core.t
val dr : - Core.t -> - Core.t -> - (Core.t Stdlib.ref * Core.t Stdlib.ref * Core.t Stdlib.ref) -> - (Core.t Stdlib.ref * Core.t Stdlib.ref * Core.t Stdlib.ref) -> - Core.t
\ No newline at end of file diff --git a/owl-base/Owl_algodiff_ops/Make/Linalg/index.html b/owl-base/Owl_algodiff_ops/Make/Linalg/index.html deleted file mode 100644 index 290ea4263..000000000 --- a/owl-base/Owl_algodiff_ops/Make/Linalg/index.html +++ /dev/null @@ -1,11 +0,0 @@ - -Linalg (owl-base.Owl_algodiff_ops.Make.Linalg)

Module Make.Linalg

val inv : Core.t -> Core.t

Refer to :doc:`owl_dense_ndarray_generic`

val logdet : Core.t -> Core.t

Refer to :doc:`owl_dense_ndarray_generic`

val chol : ?upper:bool -> Core.t -> Core.t

Refer to :doc:`owl_dense_ndarray_generic`

val qr : Core.t -> Core.t * Core.t

Refer to :doc:`owl_dense_ndarray_generic`

val lq : Core.t -> Core.t * Core.t

Refer to :doc:`owl_dense_ndarray_generic`

val svd : ?thin:bool -> Core.t -> Core.t * Core.t * Core.t

Refer to :doc:`owl_dense_ndarray_generic`

val sylvester : Core.t -> Core.t -> Core.t -> Core.t

Refer to :doc:`owl_dense_ndarray_generic`

val lyapunov : Core.t -> Core.t -> Core.t

Refer to :doc:`owl_dense_ndarray_generic`

val discrete_lyapunov : - ?solver:[ `default | `bilinear | `direct ] -> - Core.t -> - Core.t -> - Core.t

Refer to :doc:`owl_dense_ndarray_generic`

val (/@) : Core.t -> Core.t -> Core.t

Refer to :doc:`owl_dense_ndarray_generic`

val linsolve : - ?trans:bool -> - ?typ:[ `n | `u | `l ] -> - Core.t -> - Core.t -> - Core.t

Refer to :doc:`owl_dense_ndarray_generic`

val care : ?diag_r:bool -> Core.t -> Core.t -> Core.t -> Core.t -> Core.t

Refer to :doc:`owl_dense_ndarray_generic`

val dare : ?diag_r:bool -> Core.t -> Core.t -> Core.t -> Core.t -> Core.t

Refer to :doc:`owl_dense_ndarray_generic`

\ No newline at end of file diff --git a/owl-base/Owl_algodiff_ops/Make/Mat/index.html b/owl-base/Owl_algodiff_ops/Make/Mat/index.html deleted file mode 100644 index e6e21be4a..000000000 --- a/owl-base/Owl_algodiff_ops/Make/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl-base.Owl_algodiff_ops.Make.Mat)

Module Make.Mat

val empty : int -> int -> Core.t
val zeros : int -> int -> Core.t
val eye : int -> Core.t
val ones : int -> int -> Core.t
val uniform : ?a:Core.A.elt -> ?b:Core.A.elt -> int -> int -> Core.t
val gaussian : ?mu:Core.A.elt -> ?sigma:Core.A.elt -> int -> int -> Core.t
val shape : Core.t -> int * int
val numel : Core.t -> int
val row_num : Core.t -> int
val col_num : Core.t -> int
val reset : Core.t -> unit
val reshape : int -> int -> Core.t -> Core.t
val get : Core.t -> int -> int -> Core.t
val set : Core.t -> int -> int -> Core.t -> Core.t
val row : Core.t -> int -> Core.t
val mean : Core.t -> Core.t
val add : Core.t -> Core.t -> Core.t
val sub : Core.t -> Core.t -> Core.t
val mul : Core.t -> Core.t -> Core.t
val div : Core.t -> Core.t -> Core.t
val dot : Core.t -> Core.t -> Core.t
val map_by_row : (Core.t -> Core.t) -> Core.t -> Core.t
val of_arrays : Core.A.elt array array -> Core.t
val init_2d : int -> int -> (int -> int -> Core.t) -> Core.t
val print : Core.t -> unit
\ No newline at end of file diff --git a/owl-base/Owl_algodiff_ops/Make/Maths/index.html b/owl-base/Owl_algodiff_ops/Make/Maths/index.html deleted file mode 100644 index 2cbc79de6..000000000 --- a/owl-base/Owl_algodiff_ops/Make/Maths/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Maths (owl-base.Owl_algodiff_ops.Make.Maths)

Module Make.Maths

val (+) : Core.t -> Core.t -> Core.t

Refer to :doc:`owl_dense_ndarray_generic`

val (-) : Core.t -> Core.t -> Core.t

Refer to :doc:`owl_dense_ndarray_generic`

val (*) : Core.t -> Core.t -> Core.t

Refer to :doc:`owl_dense_ndarray_generic`

val (/) : Core.t -> Core.t -> Core.t

Refer to :doc:`owl_dense_ndarray_generic`

val (*@) : Core.t -> Core.t -> Core.t

Refer to :doc:`owl_dense_ndarray_generic`

val (**) : Core.t -> Core.t -> Core.t

Refer to :doc:`owl_dense_ndarray_generic`

val add : Core.t -> Core.t -> Core.t

Refer to :doc:`owl_dense_ndarray_generic`

val sub : Core.t -> Core.t -> Core.t

Refer to :doc:`owl_dense_ndarray_generic`

val mul : Core.t -> Core.t -> Core.t

Refer to :doc:`owl_dense_ndarray_generic`

val div : Core.t -> Core.t -> Core.t

Refer to :doc:`owl_dense_ndarray_generic`

val kron : Core.t -> Core.t -> Core.t

Refer to :doc:`owl_dense_matrix_generic`

val dot : Core.t -> Core.t -> Core.t

Refer to :doc:`owl_dense_ndarray_generic`

val pow : Core.t -> Core.t -> Core.t

Refer to :doc:`owl_dense_ndarray_generic`

val atan2 : Core.t -> Core.t -> Core.t

Refer to :doc:`owl_dense_ndarray_generic`

val min2 : Core.t -> Core.t -> Core.t

Refer to :doc:`owl_dense_ndarray_generic`

val max2 : Core.t -> Core.t -> Core.t

Refer to :doc:`owl_dense_ndarray_generic`

val cross_entropy : Core.t -> Core.t -> Core.t

Refer to :doc:`owl_dense_ndarray_generic`

val inv : Core.t -> Core.t

Refer to :doc:`owl_dense_ndarray_generic`

val neg : Core.t -> Core.t

Refer to :doc:`owl_dense_ndarray_generic`

val abs : Core.t -> Core.t

Refer to :doc:`owl_dense_ndarray_generic`

val signum : Core.t -> Core.t

Refer to :doc:`owl_dense_ndarray_generic`

val floor : Core.t -> Core.t

Refer to :doc:`owl_dense_ndarray_generic`

val ceil : Core.t -> Core.t

Refer to :doc:`owl_dense_ndarray_generic`

val round : Core.t -> Core.t

Refer to :doc:`owl_dense_ndarray_generic`

val sqr : Core.t -> Core.t

Refer to :doc:`owl_dense_ndarray_generic`

val sqrt : Core.t -> Core.t

Refer to :doc:`owl_dense_ndarray_generic`

val log : Core.t -> Core.t

Refer to :doc:`owl_dense_ndarray_generic`

val log2 : Core.t -> Core.t

Refer to :doc:`owl_dense_ndarray_generic`

val log10 : Core.t -> Core.t

Refer to :doc:`owl_dense_ndarray_generic`

val exp : Core.t -> Core.t

Refer to :doc:`owl_dense_ndarray_generic`

val sin : Core.t -> Core.t

Refer to :doc:`owl_dense_ndarray_generic`

val cos : Core.t -> Core.t

Refer to :doc:`owl_dense_ndarray_generic`

val tan : Core.t -> Core.t

Refer to :doc:`owl_dense_ndarray_generic`

val sinh : Core.t -> Core.t

Refer to :doc:`owl_dense_ndarray_generic`

val cosh : Core.t -> Core.t

Refer to :doc:`owl_dense_ndarray_generic`

val tanh : Core.t -> Core.t

Refer to :doc:`owl_dense_ndarray_generic`

val asin : Core.t -> Core.t

Refer to :doc:`owl_dense_ndarray_generic`

val acos : Core.t -> Core.t

Refer to :doc:`owl_dense_ndarray_generic`

val atan : Core.t -> Core.t

Refer to :doc:`owl_dense_ndarray_generic`

val asinh : Core.t -> Core.t

Refer to :doc:`owl_dense_ndarray_generic`

val acosh : Core.t -> Core.t

Refer to :doc:`owl_dense_ndarray_generic`

val atanh : Core.t -> Core.t

Refer to :doc:`owl_dense_ndarray_generic`

val sum' : Core.t -> Core.t

Refer to :doc:`owl_dense_ndarray_generic`

val log_sum_exp' : Core.t -> Core.t

Refer to :doc:`owl_dense_ndarray_generic`

val log_sum_exp : ?axis:int -> ?keep_dims:bool -> Core.t -> Core.t

Refer to :doc:`owl_dense_ndarray_generic`

val sum : ?axis:int -> ?keep_dims:bool -> Core.t -> Core.t

Refer to :doc:`owl_dense_ndarray_generic`

val sum_reduce : ?axis:int array -> Core.t -> Core.t

Refer to :doc:`owl_dense_ndarray_generic`

val mean : Core.t -> Core.t

Refer to :doc:`owl_dense_ndarray_generic`

val transpose : ?axis:int array -> Core.t -> Core.t

Refer to :doc:`owl_dense_ndarray_generic`

val swap : int -> int -> Core.t -> Core.t

Refer to :doc:`owl_dense_ndarray_generic`

val l1norm' : Core.t -> Core.t

Refer to :doc:`owl_dense_ndarray_generic`

val l2norm' : Core.t -> Core.t

Refer to :doc:`owl_dense_ndarray_generic`

val l2norm_sqr' : Core.t -> Core.t

Refer to :doc:`owl_dense_ndarray_generic`

val sigmoid : Core.t -> Core.t

Refer to :doc:`owl_dense_ndarray_generic`

val relu : Core.t -> Core.t

Refer to :doc:`owl_dense_ndarray_generic`

val dawsn : Core.t -> Core.t

Refer to :doc:`owl_dense_ndarray_generic`

val softplus : Core.t -> Core.t

Refer to :doc:`owl_dense_ndarray_generic`

val softsign : Core.t -> Core.t

Refer to :doc:`owl_dense_ndarray_generic`

val softmax : ?axis:int -> Core.t -> Core.t

Refer to :doc:`owl_dense_ndarray_generic`

val reshape : Core.t -> int array -> Core.t

Refer to :doc:`owl_dense_ndarray_generic`

val flatten : Core.t -> Core.t

Refer to :doc:`owl_dense_ndarray_generic`

val get_item : Core.t -> int -> int -> Core.t

Refer to :doc:`owl_dense_ndarray_generic`

val get_row : Core.t -> int -> Core.t

Refer to :doc:`owl_dense_ndarray_generic`

val concat : axis:int -> Core.t -> Core.t -> Core.t

Refer to :doc:`owl_dense_ndarray_generic`

val split : axis:int -> int array -> Core.t -> Core.t array

Refer to :doc:`owl_dense_ndarray_generic`

val of_arrays : Core.t array array -> Core.t

Refer to :doc:`owl_dense_ndarray_generic`

val to_arrays : Core.t -> Core.t array array

Refer to :doc:`owl_dense_ndarray_generic`

val concatenate : axis:int -> Core.t array -> Core.t

Refer to :doc:`owl_dense_ndarray_generic`

val stack : axis:int -> Core.t array -> Core.t

Refer to :doc:`owl_dense_ndarray_generic`

val get_slice : int list list -> Core.t -> Core.t

Refer to :doc:`owl_dense_ndarray_generic`

val set_slice : int list list -> Core.t -> Core.t -> Core.t

Refer to :doc:`owl_dense_ndarray_generic`

val get_fancy : Owl_types.index list -> Core.t -> Core.t

Refer to :doc:`owl_dense_ndarray_generic`

val set_fancy : Owl_types.index list -> Core.t -> Core.t -> Core.t

Refer to :doc:`owl_dense_ndarray_generic`

val diag : ?k:int -> Core.t -> Core.t

Refer to :doc:`owl_dense_ndarray_generic`

val diagm : ?k:int -> Core.t -> Core.t

Refer to :doc:`owl_dense_ndarray_generic`

val trace : Core.t -> Core.t

Refer to :doc:`owl_dense_ndarray_generic`

val triu : ?k:int -> Core.t -> Core.t

Refer to :doc:`owl_dense_ndarray_generic`

val tril : ?k:int -> Core.t -> Core.t

Refer to :doc:`owl_dense_ndarray_generic`

\ No newline at end of file diff --git a/owl-base/Owl_algodiff_ops/Make/NN/index.html b/owl-base/Owl_algodiff_ops/Make/NN/index.html deleted file mode 100644 index 477a9124e..000000000 --- a/owl-base/Owl_algodiff_ops/Make/NN/index.html +++ /dev/null @@ -1,80 +0,0 @@ - -NN (owl-base.Owl_algodiff_ops.Make.NN)

Module Make.NN

val dropout : ?rate:float -> Core.t -> Core.t

Refer to :doc:`owl_dense_ndarray_generic`

val conv1d : - ?padding:Owl_types.padding -> - Core.t -> - Core.t -> - int array -> - Core.t

Refer to :doc:`owl_dense_ndarray_generic`

val conv2d : - ?padding:Owl_types.padding -> - Core.t -> - Core.t -> - int array -> - Core.t

Refer to :doc:`owl_dense_ndarray_generic`

val conv3d : - ?padding:Owl_types.padding -> - Core.t -> - Core.t -> - int array -> - Core.t

Refer to :doc:`owl_dense_ndarray_generic`

val dilated_conv1d : - ?padding:Owl_types.padding -> - Core.t -> - Core.t -> - int array -> - int array -> - Core.t

Refer to :doc:`owl_dense_ndarray_generic`

val dilated_conv2d : - ?padding:Owl_types.padding -> - Core.t -> - Core.t -> - int array -> - int array -> - Core.t

Refer to :doc:`owl_dense_ndarray_generic`

val dilated_conv3d : - ?padding:Owl_types.padding -> - Core.t -> - Core.t -> - int array -> - int array -> - Core.t

Refer to :doc:`owl_dense_ndarray_generic`

val transpose_conv1d : - ?padding:Owl_types.padding -> - Core.t -> - Core.t -> - int array -> - Core.t

Refer to :doc:`owl_dense_ndarray_generic`

val transpose_conv2d : - ?padding:Owl_types.padding -> - Core.t -> - Core.t -> - int array -> - Core.t

Refer to :doc:`owl_dense_ndarray_generic`

val transpose_conv3d : - ?padding:Owl_types.padding -> - Core.t -> - Core.t -> - int array -> - Core.t

Refer to :doc:`owl_dense_ndarray_generic`

val max_pool1d : - Owl_types.padding -> - Core.t -> - int array -> - int array -> - Core.t

Refer to :doc:`owl_dense_ndarray_generic`

val max_pool2d : - Owl_types.padding -> - Core.t -> - int array -> - int array -> - Core.t

Refer to :doc:`owl_dense_ndarray_generic`

val max_pool3d : - Owl_types.padding -> - Core.t -> - int array -> - int array -> - Core.t

Refer to :doc:`owl_dense_ndarray_generic`

val avg_pool1d : - Owl_types.padding -> - Core.t -> - int array -> - int array -> - Core.t

Refer to :doc:`owl_dense_ndarray_generic`

val avg_pool2d : - Owl_types.padding -> - Core.t -> - int array -> - int array -> - Core.t

Refer to :doc:`owl_dense_ndarray_generic`

val avg_pool3d : - Owl_types.padding -> - Core.t -> - int array -> - int array -> - Core.t

Refer to :doc:`owl_dense_ndarray_generic`

val upsampling2d : Core.t -> int array -> Core.t

Refer to :doc:`owl_dense_ndarray_generic`

val pad : ?v:Core.A.elt -> int list list -> Core.t -> Core.t

Refer to :doc:`owl_dense_ndarray_generic`

\ No newline at end of file diff --git a/owl-base/Owl_algodiff_ops/Make/argument-1-Core/A/Linalg/index.html b/owl-base/Owl_algodiff_ops/Make/argument-1-Core/A/Linalg/index.html deleted file mode 100644 index 243ce6dff..000000000 --- a/owl-base/Owl_algodiff_ops/Make/argument-1-Core/A/Linalg/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -Linalg (owl-base.Owl_algodiff_ops.Make.Core.A.Linalg)

Module A.Linalg

val inv : arr -> arr
val logdet : arr -> elt
val chol : ?upper:bool -> arr -> arr
val svd : ?thin:bool -> arr -> arr * arr * arr
val qr : arr -> arr * arr
val lq : arr -> arr * arr
val sylvester : arr -> arr -> arr -> arr
val lyapunov : arr -> arr -> arr
val discrete_lyapunov : - ?solver:[ `default | `bilinear | `direct ] -> - arr -> - arr -> - arr
val linsolve : ?trans:bool -> ?typ:[ `n | `u | `l ] -> arr -> arr -> arr
val care : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
val dare : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
\ No newline at end of file diff --git a/owl-base/Owl_algodiff_ops/Make/argument-1-Core/A/Mat/index.html b/owl-base/Owl_algodiff_ops/Make/argument-1-Core/A/Mat/index.html deleted file mode 100644 index 633c56ac3..000000000 --- a/owl-base/Owl_algodiff_ops/Make/argument-1-Core/A/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl-base.Owl_algodiff_ops.Make.Core.A.Mat)

Module A.Mat

val diagm : ?k:int -> arr -> arr
val triu : ?k:int -> arr -> arr
val tril : ?k:int -> arr -> arr
val eye : int -> arr
\ No newline at end of file diff --git a/owl-base/Owl_algodiff_ops/Make/argument-1-Core/A/Scalar/index.html b/owl-base/Owl_algodiff_ops/Make/argument-1-Core/A/Scalar/index.html deleted file mode 100644 index aef76547e..000000000 --- a/owl-base/Owl_algodiff_ops/Make/argument-1-Core/A/Scalar/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Scalar (owl-base.Owl_algodiff_ops.Make.Core.A.Scalar)

Module A.Scalar

val add : elt -> elt -> elt
val sub : elt -> elt -> elt
val mul : elt -> elt -> elt
val div : elt -> elt -> elt
val pow : elt -> elt -> elt
val atan2 : elt -> elt -> elt
val abs : elt -> elt
val neg : elt -> elt
val sqr : elt -> elt
val sqrt : elt -> elt
val exp : elt -> elt
val log : elt -> elt
val log2 : elt -> elt
val log10 : elt -> elt
val signum : elt -> elt
val floor : elt -> elt
val ceil : elt -> elt
val round : elt -> elt
val sin : elt -> elt
val cos : elt -> elt
val tan : elt -> elt
val sinh : elt -> elt
val cosh : elt -> elt
val tanh : elt -> elt
val asin : elt -> elt
val acos : elt -> elt
val atan : elt -> elt
val asinh : elt -> elt
val acosh : elt -> elt
val atanh : elt -> elt
val relu : elt -> elt
val dawsn : elt -> elt
val sigmoid : elt -> elt
\ No newline at end of file diff --git a/owl-base/Owl_algodiff_ops/Make/argument-1-Core/A/index.html b/owl-base/Owl_algodiff_ops/Make/argument-1-Core/A/index.html deleted file mode 100644 index 01504a03d..000000000 --- a/owl-base/Owl_algodiff_ops/Make/argument-1-Core/A/index.html +++ /dev/null @@ -1,158 +0,0 @@ - -A (owl-base.Owl_algodiff_ops.Make.Core.A)

Module Core.A

include Owl_types_ndarray_eltcmp.Sig
include Owl_types_ndarray_basic.Sig
type arr
type elt
val empty : int array -> arr
val zeros : int array -> arr
val ones : int array -> arr
val create : int array -> elt -> arr
val sequential : ?a:elt -> ?step:elt -> int array -> arr
val uniform : ?a:elt -> ?b:elt -> int array -> arr
val gaussian : ?mu:elt -> ?sigma:elt -> int array -> arr
val bernoulli : ?p:elt -> int array -> arr
val init : int array -> (int -> elt) -> arr
val init_nd : int array -> (int array -> elt) -> arr
val shape : arr -> int array
val numel : arr -> int
val get : arr -> int array -> elt
val set : arr -> int array -> elt -> unit
val get_slice : int list list -> arr -> arr
val set_slice : int list list -> arr -> arr -> unit
val get_fancy : Owl_types_common.index list -> arr -> arr
val set_fancy : Owl_types_common.index list -> arr -> arr -> unit
val copy : arr -> arr
val copy_ : out:arr -> arr -> unit
val reset : arr -> unit
val reshape : arr -> int array -> arr
val reverse : arr -> arr
val tile : arr -> int array -> arr
val repeat : arr -> int array -> arr
val concatenate : ?axis:int -> arr array -> arr
val stack : ?axis:int -> arr array -> arr
val split : ?axis:int -> int array -> arr -> arr array
val expand : ?hi:bool -> arr -> int -> arr
val squeeze : ?axis:int array -> arr -> arr
val draw : ?axis:int -> arr -> int -> arr * int array
val map : (elt -> elt) -> arr -> arr
val fold : ?axis:int -> (elt -> elt -> elt) -> elt -> arr -> arr
val scan : ?axis:int -> (elt -> elt -> elt) -> arr -> arr
val one_hot : int -> arr -> arr
val pad : ?v:elt -> int list list -> arr -> arr
val print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(elt -> string) -> - arr -> - unit
val abs : arr -> arr
val neg : arr -> arr
val floor : arr -> arr
val ceil : arr -> arr
val round : arr -> arr
val sqr : arr -> arr
val sqrt : arr -> arr
val log : arr -> arr
val log2 : arr -> arr
val log10 : arr -> arr
val exp : arr -> arr
val sin : arr -> arr
val cos : arr -> arr
val tan : arr -> arr
val sinh : arr -> arr
val cosh : arr -> arr
val tanh : arr -> arr
val asin : arr -> arr
val acos : arr -> arr
val atan : arr -> arr
val asinh : arr -> arr
val acosh : arr -> arr
val atanh : arr -> arr
val min : ?axis:int -> ?keep_dims:bool -> arr -> arr
val max : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum_reduce : ?axis:int array -> arr -> arr
val signum : arr -> arr
val sigmoid : arr -> arr
val relu : arr -> arr
val dawsn : arr -> arr
val min' : arr -> elt
val max' : arr -> elt
val sum' : arr -> elt
val log_sum_exp' : arr -> elt
val log_sum_exp : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm' : arr -> elt
val l2norm' : arr -> elt
val l2norm_sqr' : arr -> elt
val clip_by_value : ?amin:elt -> ?amax:elt -> arr -> arr
val clip_by_l2norm : elt -> arr -> arr
val pow : arr -> arr -> arr
val scalar_pow : elt -> arr -> arr
val pow_scalar : arr -> elt -> arr
val atan2 : arr -> arr -> arr
val scalar_atan2 : elt -> arr -> arr
val atan2_scalar : arr -> elt -> arr
val add : arr -> arr -> arr
val sub : arr -> arr -> arr
val mul : arr -> arr -> arr
val div : arr -> arr -> arr
val add_scalar : arr -> elt -> arr
val sub_scalar : arr -> elt -> arr
val mul_scalar : arr -> elt -> arr
val div_scalar : arr -> elt -> arr
val scalar_add : elt -> arr -> arr
val scalar_sub : elt -> arr -> arr
val scalar_mul : elt -> arr -> arr
val scalar_div : elt -> arr -> arr
val fma : arr -> arr -> arr -> arr
val conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val dilated_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val transpose_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val max_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val upsampling2d : arr -> int array -> arr
val conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val dilated_conv1d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv1d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val transpose_conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val max_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val upsampling2d_backward : arr -> int array -> arr -> arr
val row_num : arr -> int
val col_num : arr -> int
val row : arr -> int -> arr
val rows : arr -> int array -> arr
val copy_row_to : arr -> arr -> int -> unit
val copy_col_to : arr -> arr -> int -> unit
val dot : arr -> arr -> arr
val diag : ?k:int -> arr -> arr
val trace : arr -> elt
val transpose : ?axis:int array -> arr -> arr
val to_rows : arr -> arr array
val of_rows : arr array -> arr
val to_cols : arr -> arr array
val of_cols : arr array -> arr
val of_array : elt array -> int array -> arr
val of_arrays : elt array array -> arr
val float_to_elt : float -> elt
val elt_to_float : elt -> float
val elt_equal : arr -> arr -> arr
val elt_not_equal : arr -> arr -> arr
val elt_less : arr -> arr -> arr
val elt_greater : arr -> arr -> arr
val elt_less_equal : arr -> arr -> arr
val elt_greater_equal : arr -> arr -> arr
val elt_equal_scalar : arr -> elt -> arr
val elt_not_equal_scalar : arr -> elt -> arr
val elt_less_scalar : arr -> elt -> arr
val elt_greater_scalar : arr -> elt -> arr
val elt_less_equal_scalar : arr -> elt -> arr
val elt_greater_equal_scalar : arr -> elt -> arr
module Scalar : sig ... end
module Mat : sig ... end
module Linalg : sig ... end
\ No newline at end of file diff --git a/owl-base/Owl_algodiff_ops/Make/argument-1-Core/index.html b/owl-base/Owl_algodiff_ops/Make/argument-1-Core/index.html deleted file mode 100644 index f8af8c431..000000000 --- a/owl-base/Owl_algodiff_ops/Make/argument-1-Core/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Core (owl-base.Owl_algodiff_ops.Make.Core)

Parameter Make.Core

Type definition
include Owl_algodiff_types_sig.Sig with type elt := A.elt and type arr := A.arr
type t =
  1. | F of A.elt
  2. | Arr of A.arr
  3. | DF of t * t * int
  4. | DR of t * t Stdlib.ref * op * int Stdlib.ref * int * int Stdlib.ref
and adjoint = t -> t Stdlib.ref -> (t * t) list -> (t * t) list
and register = t list -> t list
and label = string * t list
and op = adjoint * register * label
Core functions
val tag : unit -> int

TODO

val primal : t -> t

TODO

val primal' : t -> t

TODO

val zero : t -> t

TODO

val reset_zero : t -> t

TODO

val tangent : t -> t

TODO

val adjref : t -> t Stdlib.ref

TODO

val adjval : t -> t

TODO

val shape : t -> int array

TODO

val is_float : t -> bool

TODO

val is_arr : t -> bool

TODO

val row_num : t -> int

number of rows

val col_num : t -> int

number of columns

val numel : t -> int

number of elements

val clip_by_value : amin:A.elt -> amax:A.elt -> t -> t

other functions, without tracking gradient

val clip_by_l2norm : A.elt -> t -> t

other functions, without tracking gradient

val copy_primal' : t -> t

TODO

val tile : t -> int array -> t

TODO

val repeat : t -> int array -> t

TODO

val pack_elt : A.elt -> t

convert from elt type to t type.

val unpack_elt : t -> A.elt

convert from t type to elt type.

val pack_flt : float -> t

convert from float type to t type.

val _f : float -> t

A shortcut function for F A.(float_to_elt x).

val unpack_flt : t -> float

convert from t type to float type.

val pack_arr : A.arr -> t

convert from arr type to t type.

val unpack_arr : t -> A.arr

convert from t type to arr type.

val deep_info : t -> string

TODO

val type_info : t -> string

TODO

val error_binop : string -> t -> t -> 'a

TODO

val error_uniop : string -> t -> 'a

TODO

\ No newline at end of file diff --git a/owl-base/Owl_algodiff_ops/Make/index.html b/owl-base/Owl_algodiff_ops/Make/index.html deleted file mode 100644 index a8b6151a8..000000000 --- a/owl-base/Owl_algodiff_ops/Make/index.html +++ /dev/null @@ -1,7 +0,0 @@ - -Make (owl-base.Owl_algodiff_ops.Make)

Module Owl_algodiff_ops.Make

Parameters

Signature

module Builder : - Owl_algodiff_ops_builder_sig.Sig - with type t := Core.t - and type elt := Core.A.elt - and type arr := Core.A.arr - and type op := Core.op
Supported Maths functions
module Maths : sig ... end
Supported Linalg functions
module Linalg : sig ... end
Supported Neural Network functions
module NN : sig ... end
Supported Mat functions
module Mat : sig ... end
Supported Arr functions
module Arr : sig ... end
\ No newline at end of file diff --git a/owl-base/Owl_algodiff_ops_builder/.dummy b/owl-base/Owl_algodiff_ops_builder/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl-base/Owl_algodiff_ops_builder/Make/argument-1-Core/A/Linalg/index.html b/owl-base/Owl_algodiff_ops_builder/Make/argument-1-Core/A/Linalg/index.html deleted file mode 100644 index c472c8161..000000000 --- a/owl-base/Owl_algodiff_ops_builder/Make/argument-1-Core/A/Linalg/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -Linalg (owl-base.Owl_algodiff_ops_builder.Make.Core.A.Linalg)

Module A.Linalg

val inv : arr -> arr
val logdet : arr -> elt
val chol : ?upper:bool -> arr -> arr
val svd : ?thin:bool -> arr -> arr * arr * arr
val qr : arr -> arr * arr
val lq : arr -> arr * arr
val sylvester : arr -> arr -> arr -> arr
val lyapunov : arr -> arr -> arr
val discrete_lyapunov : - ?solver:[ `default | `bilinear | `direct ] -> - arr -> - arr -> - arr
val linsolve : ?trans:bool -> ?typ:[ `n | `u | `l ] -> arr -> arr -> arr
val care : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
val dare : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
\ No newline at end of file diff --git a/owl-base/Owl_algodiff_ops_builder/Make/argument-1-Core/A/Mat/index.html b/owl-base/Owl_algodiff_ops_builder/Make/argument-1-Core/A/Mat/index.html deleted file mode 100644 index d53bc47c8..000000000 --- a/owl-base/Owl_algodiff_ops_builder/Make/argument-1-Core/A/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl-base.Owl_algodiff_ops_builder.Make.Core.A.Mat)

Module A.Mat

val diagm : ?k:int -> arr -> arr
val triu : ?k:int -> arr -> arr
val tril : ?k:int -> arr -> arr
val eye : int -> arr
\ No newline at end of file diff --git a/owl-base/Owl_algodiff_ops_builder/Make/argument-1-Core/A/Scalar/index.html b/owl-base/Owl_algodiff_ops_builder/Make/argument-1-Core/A/Scalar/index.html deleted file mode 100644 index e4b215085..000000000 --- a/owl-base/Owl_algodiff_ops_builder/Make/argument-1-Core/A/Scalar/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Scalar (owl-base.Owl_algodiff_ops_builder.Make.Core.A.Scalar)

Module A.Scalar

val add : elt -> elt -> elt
val sub : elt -> elt -> elt
val mul : elt -> elt -> elt
val div : elt -> elt -> elt
val pow : elt -> elt -> elt
val atan2 : elt -> elt -> elt
val abs : elt -> elt
val neg : elt -> elt
val sqr : elt -> elt
val sqrt : elt -> elt
val exp : elt -> elt
val log : elt -> elt
val log2 : elt -> elt
val log10 : elt -> elt
val signum : elt -> elt
val floor : elt -> elt
val ceil : elt -> elt
val round : elt -> elt
val sin : elt -> elt
val cos : elt -> elt
val tan : elt -> elt
val sinh : elt -> elt
val cosh : elt -> elt
val tanh : elt -> elt
val asin : elt -> elt
val acos : elt -> elt
val atan : elt -> elt
val asinh : elt -> elt
val acosh : elt -> elt
val atanh : elt -> elt
val relu : elt -> elt
val dawsn : elt -> elt
val sigmoid : elt -> elt
\ No newline at end of file diff --git a/owl-base/Owl_algodiff_ops_builder/Make/argument-1-Core/A/index.html b/owl-base/Owl_algodiff_ops_builder/Make/argument-1-Core/A/index.html deleted file mode 100644 index e57604a56..000000000 --- a/owl-base/Owl_algodiff_ops_builder/Make/argument-1-Core/A/index.html +++ /dev/null @@ -1,158 +0,0 @@ - -A (owl-base.Owl_algodiff_ops_builder.Make.Core.A)

Module Core.A

include Owl_types_ndarray_eltcmp.Sig
include Owl_types_ndarray_basic.Sig
type arr
type elt
val empty : int array -> arr
val zeros : int array -> arr
val ones : int array -> arr
val create : int array -> elt -> arr
val sequential : ?a:elt -> ?step:elt -> int array -> arr
val uniform : ?a:elt -> ?b:elt -> int array -> arr
val gaussian : ?mu:elt -> ?sigma:elt -> int array -> arr
val bernoulli : ?p:elt -> int array -> arr
val init : int array -> (int -> elt) -> arr
val init_nd : int array -> (int array -> elt) -> arr
val shape : arr -> int array
val numel : arr -> int
val get : arr -> int array -> elt
val set : arr -> int array -> elt -> unit
val get_slice : int list list -> arr -> arr
val set_slice : int list list -> arr -> arr -> unit
val get_fancy : Owl_types_common.index list -> arr -> arr
val set_fancy : Owl_types_common.index list -> arr -> arr -> unit
val copy : arr -> arr
val copy_ : out:arr -> arr -> unit
val reset : arr -> unit
val reshape : arr -> int array -> arr
val reverse : arr -> arr
val tile : arr -> int array -> arr
val repeat : arr -> int array -> arr
val concatenate : ?axis:int -> arr array -> arr
val stack : ?axis:int -> arr array -> arr
val split : ?axis:int -> int array -> arr -> arr array
val expand : ?hi:bool -> arr -> int -> arr
val squeeze : ?axis:int array -> arr -> arr
val draw : ?axis:int -> arr -> int -> arr * int array
val map : (elt -> elt) -> arr -> arr
val fold : ?axis:int -> (elt -> elt -> elt) -> elt -> arr -> arr
val scan : ?axis:int -> (elt -> elt -> elt) -> arr -> arr
val one_hot : int -> arr -> arr
val pad : ?v:elt -> int list list -> arr -> arr
val print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(elt -> string) -> - arr -> - unit
val abs : arr -> arr
val neg : arr -> arr
val floor : arr -> arr
val ceil : arr -> arr
val round : arr -> arr
val sqr : arr -> arr
val sqrt : arr -> arr
val log : arr -> arr
val log2 : arr -> arr
val log10 : arr -> arr
val exp : arr -> arr
val sin : arr -> arr
val cos : arr -> arr
val tan : arr -> arr
val sinh : arr -> arr
val cosh : arr -> arr
val tanh : arr -> arr
val asin : arr -> arr
val acos : arr -> arr
val atan : arr -> arr
val asinh : arr -> arr
val acosh : arr -> arr
val atanh : arr -> arr
val min : ?axis:int -> ?keep_dims:bool -> arr -> arr
val max : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum_reduce : ?axis:int array -> arr -> arr
val signum : arr -> arr
val sigmoid : arr -> arr
val relu : arr -> arr
val dawsn : arr -> arr
val min' : arr -> elt
val max' : arr -> elt
val sum' : arr -> elt
val log_sum_exp' : arr -> elt
val log_sum_exp : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm' : arr -> elt
val l2norm' : arr -> elt
val l2norm_sqr' : arr -> elt
val clip_by_value : ?amin:elt -> ?amax:elt -> arr -> arr
val clip_by_l2norm : elt -> arr -> arr
val pow : arr -> arr -> arr
val scalar_pow : elt -> arr -> arr
val pow_scalar : arr -> elt -> arr
val atan2 : arr -> arr -> arr
val scalar_atan2 : elt -> arr -> arr
val atan2_scalar : arr -> elt -> arr
val add : arr -> arr -> arr
val sub : arr -> arr -> arr
val mul : arr -> arr -> arr
val div : arr -> arr -> arr
val add_scalar : arr -> elt -> arr
val sub_scalar : arr -> elt -> arr
val mul_scalar : arr -> elt -> arr
val div_scalar : arr -> elt -> arr
val scalar_add : elt -> arr -> arr
val scalar_sub : elt -> arr -> arr
val scalar_mul : elt -> arr -> arr
val scalar_div : elt -> arr -> arr
val fma : arr -> arr -> arr -> arr
val conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val dilated_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val transpose_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val max_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val upsampling2d : arr -> int array -> arr
val conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val dilated_conv1d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv1d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val transpose_conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val max_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val upsampling2d_backward : arr -> int array -> arr -> arr
val row_num : arr -> int
val col_num : arr -> int
val row : arr -> int -> arr
val rows : arr -> int array -> arr
val copy_row_to : arr -> arr -> int -> unit
val copy_col_to : arr -> arr -> int -> unit
val dot : arr -> arr -> arr
val diag : ?k:int -> arr -> arr
val trace : arr -> elt
val transpose : ?axis:int array -> arr -> arr
val to_rows : arr -> arr array
val of_rows : arr array -> arr
val to_cols : arr -> arr array
val of_cols : arr array -> arr
val of_array : elt array -> int array -> arr
val of_arrays : elt array array -> arr
val float_to_elt : float -> elt
val elt_to_float : elt -> float
val elt_equal : arr -> arr -> arr
val elt_not_equal : arr -> arr -> arr
val elt_less : arr -> arr -> arr
val elt_greater : arr -> arr -> arr
val elt_less_equal : arr -> arr -> arr
val elt_greater_equal : arr -> arr -> arr
val elt_equal_scalar : arr -> elt -> arr
val elt_not_equal_scalar : arr -> elt -> arr
val elt_less_scalar : arr -> elt -> arr
val elt_greater_scalar : arr -> elt -> arr
val elt_less_equal_scalar : arr -> elt -> arr
val elt_greater_equal_scalar : arr -> elt -> arr
module Scalar : sig ... end
module Mat : sig ... end
module Linalg : sig ... end
\ No newline at end of file diff --git a/owl-base/Owl_algodiff_ops_builder/Make/argument-1-Core/index.html b/owl-base/Owl_algodiff_ops_builder/Make/argument-1-Core/index.html deleted file mode 100644 index fc9f71265..000000000 --- a/owl-base/Owl_algodiff_ops_builder/Make/argument-1-Core/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Core (owl-base.Owl_algodiff_ops_builder.Make.Core)

Parameter Make.Core

Type definition
include Owl_algodiff_types_sig.Sig with type elt := A.elt and type arr := A.arr
type t =
  1. | F of A.elt
  2. | Arr of A.arr
  3. | DF of t * t * int
  4. | DR of t * t Stdlib.ref * op * int Stdlib.ref * int * int Stdlib.ref
and adjoint = t -> t Stdlib.ref -> (t * t) list -> (t * t) list
and register = t list -> t list
and label = string * t list
and op = adjoint * register * label
Core functions
val tag : unit -> int

TODO

val primal : t -> t

TODO

val primal' : t -> t

TODO

val zero : t -> t

TODO

val reset_zero : t -> t

TODO

val tangent : t -> t

TODO

val adjref : t -> t Stdlib.ref

TODO

val adjval : t -> t

TODO

val shape : t -> int array

TODO

val is_float : t -> bool

TODO

val is_arr : t -> bool

TODO

val row_num : t -> int

number of rows

val col_num : t -> int

number of columns

val numel : t -> int

number of elements

val clip_by_value : amin:A.elt -> amax:A.elt -> t -> t

other functions, without tracking gradient

val clip_by_l2norm : A.elt -> t -> t

other functions, without tracking gradient

val copy_primal' : t -> t

TODO

val tile : t -> int array -> t

TODO

val repeat : t -> int array -> t

TODO

val pack_elt : A.elt -> t

convert from elt type to t type.

val unpack_elt : t -> A.elt

convert from t type to elt type.

val pack_flt : float -> t

convert from float type to t type.

val _f : float -> t

A shortcut function for F A.(float_to_elt x).

val unpack_flt : t -> float

convert from t type to float type.

val pack_arr : A.arr -> t

convert from arr type to t type.

val unpack_arr : t -> A.arr

convert from t type to arr type.

val deep_info : t -> string

TODO

val type_info : t -> string

TODO

val error_binop : string -> t -> t -> 'a

TODO

val error_uniop : string -> t -> 'a

TODO

\ No newline at end of file diff --git a/owl-base/Owl_algodiff_ops_builder/Make/index.html b/owl-base/Owl_algodiff_ops_builder/Make/index.html deleted file mode 100644 index c46d5f474..000000000 --- a/owl-base/Owl_algodiff_ops_builder/Make/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Make (owl-base.Owl_algodiff_ops_builder.Make)

Module Owl_algodiff_ops_builder.Make

Parameters

Signature

module type Siso = sig ... end
val build_siso : (module Siso) -> Core.t -> Core.t

build single input single output operations

module type Sipo = sig ... end
val build_sipo : (module Sipo) -> Core.t -> Core.t * Core.t

build single input pair outputs operations

module type Sito = sig ... end
val build_sito : (module Sito) -> Core.t -> Core.t * Core.t * Core.t

build single input triple outputs operations

module type Siao = sig ... end
val build_siao : (module Siao) -> Core.t -> Core.t array

build single input array output operations

module type Piso = sig ... end
val build_piso : (module Piso) -> Core.t -> Core.t -> Core.t

build pair inputs single output operations

module type Aiso = sig ... end
val build_aiso : (module Aiso) -> Core.t array -> Core.t

build array input single output operations

\ No newline at end of file diff --git a/owl-base/Owl_algodiff_ops_builder/Make/module-type-Aiso/index.html b/owl-base/Owl_algodiff_ops_builder/Make/module-type-Aiso/index.html deleted file mode 100644 index 654b4d7a2..000000000 --- a/owl-base/Owl_algodiff_ops_builder/Make/module-type-Aiso/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Aiso (owl-base.Owl_algodiff_ops_builder.Make.Aiso)

Module type Make.Aiso

val label : string
val ff : Core.t array -> Core.t
val df : int list -> Core.t -> Core.t array -> Core.t array -> Core.t
val dr : int list -> Core.t array -> Core.t -> Core.t Stdlib.ref -> Core.t list
\ No newline at end of file diff --git a/owl-base/Owl_algodiff_ops_builder/Make/module-type-Piso/index.html b/owl-base/Owl_algodiff_ops_builder/Make/module-type-Piso/index.html deleted file mode 100644 index 4de2b4af7..000000000 --- a/owl-base/Owl_algodiff_ops_builder/Make/module-type-Piso/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Piso (owl-base.Owl_algodiff_ops_builder.Make.Piso)

Module type Make.Piso

val label : string
val ff_aa : Core.A.elt -> Core.A.elt -> Core.t
val ff_ab : Core.A.elt -> Core.A.arr -> Core.t
val ff_ba : Core.A.arr -> Core.A.elt -> Core.t
val ff_bb : Core.A.arr -> Core.A.arr -> Core.t
val df_da : Core.t -> Core.t -> Core.t -> Core.t -> Core.t
val df_db : Core.t -> Core.t -> Core.t -> Core.t -> Core.t
val df_dab : Core.t -> Core.t -> Core.t -> Core.t -> Core.t -> Core.t
val dr_ab : Core.t -> Core.t -> Core.t -> Core.t Stdlib.ref -> Core.t * Core.t
val dr_a : Core.t -> Core.t -> Core.t -> Core.t Stdlib.ref -> Core.t
val dr_b : Core.t -> Core.t -> Core.t -> Core.t Stdlib.ref -> Core.t
\ No newline at end of file diff --git a/owl-base/Owl_algodiff_ops_builder/Make/module-type-Siao/index.html b/owl-base/Owl_algodiff_ops_builder/Make/module-type-Siao/index.html deleted file mode 100644 index 2542af41c..000000000 --- a/owl-base/Owl_algodiff_ops_builder/Make/module-type-Siao/index.html +++ /dev/null @@ -1,7 +0,0 @@ - -Siao (owl-base.Owl_algodiff_ops_builder.Make.Siao)

Module type Make.Siao

val label : string
val ff_f : Core.A.elt -> Core.t array
val ff_arr : Core.A.arr -> Core.t array
val df : Core.t array -> Core.t -> Core.t -> Core.t array
val dr : - Core.t -> - Core.t -> - Core.t Stdlib.ref array -> - Core.t Stdlib.ref array -> - Core.t
\ No newline at end of file diff --git a/owl-base/Owl_algodiff_ops_builder/Make/module-type-Sipo/index.html b/owl-base/Owl_algodiff_ops_builder/Make/module-type-Sipo/index.html deleted file mode 100644 index 84c9e9c65..000000000 --- a/owl-base/Owl_algodiff_ops_builder/Make/module-type-Sipo/index.html +++ /dev/null @@ -1,7 +0,0 @@ - -Sipo (owl-base.Owl_algodiff_ops_builder.Make.Sipo)

Module type Make.Sipo

val label : string
val ff_f : Core.A.elt -> Core.t * Core.t
val ff_arr : Core.A.arr -> Core.t * Core.t
val df : Core.t -> Core.t -> Core.t -> Core.t
val dr : - Core.t -> - Core.t -> - (Core.t Stdlib.ref * Core.t Stdlib.ref) -> - (Core.t Stdlib.ref * Core.t Stdlib.ref) -> - Core.t
\ No newline at end of file diff --git a/owl-base/Owl_algodiff_ops_builder/Make/module-type-Siso/index.html b/owl-base/Owl_algodiff_ops_builder/Make/module-type-Siso/index.html deleted file mode 100644 index cd617a8a8..000000000 --- a/owl-base/Owl_algodiff_ops_builder/Make/module-type-Siso/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Siso (owl-base.Owl_algodiff_ops_builder.Make.Siso)

Module type Make.Siso

val label : string
val ff_f : Core.A.elt -> Core.t
val ff_arr : Core.A.arr -> Core.t
val df : Core.t -> Core.t -> Core.t -> Core.t
val dr : Core.t -> Core.t -> Core.t Stdlib.ref -> Core.t
\ No newline at end of file diff --git a/owl-base/Owl_algodiff_ops_builder/Make/module-type-Sito/index.html b/owl-base/Owl_algodiff_ops_builder/Make/module-type-Sito/index.html deleted file mode 100644 index 1f1aaf321..000000000 --- a/owl-base/Owl_algodiff_ops_builder/Make/module-type-Sito/index.html +++ /dev/null @@ -1,7 +0,0 @@ - -Sito (owl-base.Owl_algodiff_ops_builder.Make.Sito)

Module type Make.Sito

val label : string
val ff_f : Core.A.elt -> Core.t * Core.t * Core.t
val ff_arr : Core.A.arr -> Core.t * Core.t * Core.t
val df : Core.t -> Core.t -> Core.t -> Core.t
val dr : - Core.t -> - Core.t -> - (Core.t Stdlib.ref * Core.t Stdlib.ref * Core.t Stdlib.ref) -> - (Core.t Stdlib.ref * Core.t Stdlib.ref * Core.t Stdlib.ref) -> - Core.t
\ No newline at end of file diff --git a/owl-base/Owl_algodiff_ops_builder_sig/.dummy b/owl-base/Owl_algodiff_ops_builder_sig/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl-base/Owl_algodiff_ops_builder_sig/module-type-Sig/index.html b/owl-base/Owl_algodiff_ops_builder_sig/module-type-Sig/index.html deleted file mode 100644 index c86ba1e4f..000000000 --- a/owl-base/Owl_algodiff_ops_builder_sig/module-type-Sig/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Sig (owl-base.Owl_algodiff_ops_builder_sig.Sig)

Module type Owl_algodiff_ops_builder_sig.Sig

type elt
type arr
type t
type op
module type Siso = sig ... end
val build_siso : (module Siso) -> t -> t

build single input single output operations

module type Sipo = sig ... end
val build_sipo : (module Sipo) -> t -> t * t

build single input pair outputs operations

module type Sito = sig ... end
val build_sito : (module Sito) -> t -> t * t * t

build single input triple outputs operations

module type Siao = sig ... end
val build_siao : (module Siao) -> t -> t array

build single input array output operations

module type Piso = sig ... end
val build_piso : (module Piso) -> t -> t -> t

build pair inputs single output operations

module type Aiso = sig ... end
val build_aiso : (module Aiso) -> t array -> t

build array input single output operations

\ No newline at end of file diff --git a/owl-base/Owl_algodiff_ops_builder_sig/module-type-Sig/module-type-Aiso/index.html b/owl-base/Owl_algodiff_ops_builder_sig/module-type-Sig/module-type-Aiso/index.html deleted file mode 100644 index 9a1853374..000000000 --- a/owl-base/Owl_algodiff_ops_builder_sig/module-type-Sig/module-type-Aiso/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Aiso (owl-base.Owl_algodiff_ops_builder_sig.Sig.Aiso)

Module type Sig.Aiso

val label : string
val ff : t array -> t
val df : int list -> t -> t array -> t array -> t
val dr : int list -> t array -> t -> t Stdlib.ref -> t list
\ No newline at end of file diff --git a/owl-base/Owl_algodiff_ops_builder_sig/module-type-Sig/module-type-Piso/index.html b/owl-base/Owl_algodiff_ops_builder_sig/module-type-Sig/module-type-Piso/index.html deleted file mode 100644 index 696b9a46c..000000000 --- a/owl-base/Owl_algodiff_ops_builder_sig/module-type-Sig/module-type-Piso/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Piso (owl-base.Owl_algodiff_ops_builder_sig.Sig.Piso)

Module type Sig.Piso

val label : string
val ff_aa : elt -> elt -> t
val ff_ab : elt -> arr -> t
val ff_ba : arr -> elt -> t
val ff_bb : arr -> arr -> t
val df_da : t -> t -> t -> t -> t
val df_db : t -> t -> t -> t -> t
val df_dab : t -> t -> t -> t -> t -> t
val dr_ab : t -> t -> t -> t Stdlib.ref -> t * t
val dr_a : t -> t -> t -> t Stdlib.ref -> t
val dr_b : t -> t -> t -> t Stdlib.ref -> t
\ No newline at end of file diff --git a/owl-base/Owl_algodiff_ops_builder_sig/module-type-Sig/module-type-Siao/index.html b/owl-base/Owl_algodiff_ops_builder_sig/module-type-Sig/module-type-Siao/index.html deleted file mode 100644 index 71d744da9..000000000 --- a/owl-base/Owl_algodiff_ops_builder_sig/module-type-Sig/module-type-Siao/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Siao (owl-base.Owl_algodiff_ops_builder_sig.Sig.Siao)

Module type Sig.Siao

val label : string
val ff_f : elt -> t array
val ff_arr : arr -> t array
val df : t array -> t -> t -> t array
val dr : t -> t -> t Stdlib.ref array -> t Stdlib.ref array -> t
\ No newline at end of file diff --git a/owl-base/Owl_algodiff_ops_builder_sig/module-type-Sig/module-type-Sipo/index.html b/owl-base/Owl_algodiff_ops_builder_sig/module-type-Sig/module-type-Sipo/index.html deleted file mode 100644 index 8b7ae738b..000000000 --- a/owl-base/Owl_algodiff_ops_builder_sig/module-type-Sig/module-type-Sipo/index.html +++ /dev/null @@ -1,7 +0,0 @@ - -Sipo (owl-base.Owl_algodiff_ops_builder_sig.Sig.Sipo)

Module type Sig.Sipo

val label : string
val ff_f : elt -> t * t
val ff_arr : arr -> t * t
val df : t -> t -> t -> t
val dr : - t -> - t -> - (t Stdlib.ref * t Stdlib.ref) -> - (t Stdlib.ref * t Stdlib.ref) -> - t
\ No newline at end of file diff --git a/owl-base/Owl_algodiff_ops_builder_sig/module-type-Sig/module-type-Siso/index.html b/owl-base/Owl_algodiff_ops_builder_sig/module-type-Sig/module-type-Siso/index.html deleted file mode 100644 index 1a948c70b..000000000 --- a/owl-base/Owl_algodiff_ops_builder_sig/module-type-Sig/module-type-Siso/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Siso (owl-base.Owl_algodiff_ops_builder_sig.Sig.Siso)

Module type Sig.Siso

val label : string
val ff_f : elt -> t
val ff_arr : arr -> t
val df : t -> t -> t -> t
val dr : t -> t -> t Stdlib.ref -> t
\ No newline at end of file diff --git a/owl-base/Owl_algodiff_ops_builder_sig/module-type-Sig/module-type-Sito/index.html b/owl-base/Owl_algodiff_ops_builder_sig/module-type-Sig/module-type-Sito/index.html deleted file mode 100644 index 7fd6244a5..000000000 --- a/owl-base/Owl_algodiff_ops_builder_sig/module-type-Sig/module-type-Sito/index.html +++ /dev/null @@ -1,7 +0,0 @@ - -Sito (owl-base.Owl_algodiff_ops_builder_sig.Sig.Sito)

Module type Sig.Sito

val label : string
val ff_f : elt -> t * t * t
val ff_arr : arr -> t * t * t
val df : t -> t -> t -> t
val dr : - t -> - t -> - (t Stdlib.ref * t Stdlib.ref * t Stdlib.ref) -> - (t Stdlib.ref * t Stdlib.ref * t Stdlib.ref) -> - t
\ No newline at end of file diff --git a/owl-base/Owl_algodiff_ops_sig/.dummy b/owl-base/Owl_algodiff_ops_sig/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl-base/Owl_algodiff_ops_sig/module-type-Sig/Arr/index.html b/owl-base/Owl_algodiff_ops_sig/module-type-Sig/Arr/index.html deleted file mode 100644 index b6de398e7..000000000 --- a/owl-base/Owl_algodiff_ops_sig/module-type-Sig/Arr/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Arr (owl-base.Owl_algodiff_ops_sig.Sig.Arr)

Module Sig.Arr

val empty : int array -> t
val zeros : int array -> t
val ones : int array -> t
val uniform : ?a:elt -> ?b:elt -> int array -> t
val gaussian : ?mu:elt -> ?sigma:elt -> int array -> t
val shape : t -> int array
val numel : t -> int
val reset : t -> unit
val reshape : t -> int array -> t
val add : t -> t -> t
val sub : t -> t -> t
val mul : t -> t -> t
val div : t -> t -> t
val dot : t -> t -> t
\ No newline at end of file diff --git a/owl-base/Owl_algodiff_ops_sig/module-type-Sig/Builder/index.html b/owl-base/Owl_algodiff_ops_sig/module-type-Sig/Builder/index.html deleted file mode 100644 index 4eab5a465..000000000 --- a/owl-base/Owl_algodiff_ops_sig/module-type-Sig/Builder/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Builder (owl-base.Owl_algodiff_ops_sig.Sig.Builder)

Module Sig.Builder

Ops Builder
module type Siso = sig ... end
val build_siso : (module Siso) -> t -> t

build single input single output operations

module type Sipo = sig ... end
val build_sipo : (module Sipo) -> t -> t * t

build single input pair outputs operations

module type Sito = sig ... end
val build_sito : (module Sito) -> t -> t * t * t

build single input triple outputs operations

module type Siao = sig ... end
val build_siao : (module Siao) -> t -> t array

build single input array output operations

module type Piso = sig ... end
val build_piso : (module Piso) -> t -> t -> t

build pair inputs single output operations

module type Aiso = sig ... end
val build_aiso : (module Aiso) -> t array -> t

build array input single output operations

\ No newline at end of file diff --git a/owl-base/Owl_algodiff_ops_sig/module-type-Sig/Builder/module-type-Aiso/index.html b/owl-base/Owl_algodiff_ops_sig/module-type-Sig/Builder/module-type-Aiso/index.html deleted file mode 100644 index e08b12803..000000000 --- a/owl-base/Owl_algodiff_ops_sig/module-type-Sig/Builder/module-type-Aiso/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Aiso (owl-base.Owl_algodiff_ops_sig.Sig.Builder.Aiso)

Module type Builder.Aiso

val label : string
val ff : t array -> t
val df : int list -> t -> t array -> t array -> t
val dr : int list -> t array -> t -> t Stdlib.ref -> t list
\ No newline at end of file diff --git a/owl-base/Owl_algodiff_ops_sig/module-type-Sig/Builder/module-type-Piso/index.html b/owl-base/Owl_algodiff_ops_sig/module-type-Sig/Builder/module-type-Piso/index.html deleted file mode 100644 index 956c2e545..000000000 --- a/owl-base/Owl_algodiff_ops_sig/module-type-Sig/Builder/module-type-Piso/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Piso (owl-base.Owl_algodiff_ops_sig.Sig.Builder.Piso)

Module type Builder.Piso

val label : string
val ff_aa : elt -> elt -> t
val ff_ab : elt -> arr -> t
val ff_ba : arr -> elt -> t
val ff_bb : arr -> arr -> t
val df_da : t -> t -> t -> t -> t
val df_db : t -> t -> t -> t -> t
val df_dab : t -> t -> t -> t -> t -> t
val dr_ab : t -> t -> t -> t Stdlib.ref -> t * t
val dr_a : t -> t -> t -> t Stdlib.ref -> t
val dr_b : t -> t -> t -> t Stdlib.ref -> t
\ No newline at end of file diff --git a/owl-base/Owl_algodiff_ops_sig/module-type-Sig/Builder/module-type-Siao/index.html b/owl-base/Owl_algodiff_ops_sig/module-type-Sig/Builder/module-type-Siao/index.html deleted file mode 100644 index 3be40e65a..000000000 --- a/owl-base/Owl_algodiff_ops_sig/module-type-Sig/Builder/module-type-Siao/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Siao (owl-base.Owl_algodiff_ops_sig.Sig.Builder.Siao)

Module type Builder.Siao

val label : string
val ff_f : elt -> t array
val ff_arr : arr -> t array
val df : t array -> t -> t -> t array
val dr : t -> t -> t Stdlib.ref array -> t Stdlib.ref array -> t
\ No newline at end of file diff --git a/owl-base/Owl_algodiff_ops_sig/module-type-Sig/Builder/module-type-Sipo/index.html b/owl-base/Owl_algodiff_ops_sig/module-type-Sig/Builder/module-type-Sipo/index.html deleted file mode 100644 index a052c08cb..000000000 --- a/owl-base/Owl_algodiff_ops_sig/module-type-Sig/Builder/module-type-Sipo/index.html +++ /dev/null @@ -1,7 +0,0 @@ - -Sipo (owl-base.Owl_algodiff_ops_sig.Sig.Builder.Sipo)

Module type Builder.Sipo

val label : string
val ff_f : elt -> t * t
val ff_arr : arr -> t * t
val df : t -> t -> t -> t
val dr : - t -> - t -> - (t Stdlib.ref * t Stdlib.ref) -> - (t Stdlib.ref * t Stdlib.ref) -> - t
\ No newline at end of file diff --git a/owl-base/Owl_algodiff_ops_sig/module-type-Sig/Builder/module-type-Siso/index.html b/owl-base/Owl_algodiff_ops_sig/module-type-Sig/Builder/module-type-Siso/index.html deleted file mode 100644 index 13478544b..000000000 --- a/owl-base/Owl_algodiff_ops_sig/module-type-Sig/Builder/module-type-Siso/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Siso (owl-base.Owl_algodiff_ops_sig.Sig.Builder.Siso)

Module type Builder.Siso

val label : string
val ff_f : elt -> t
val ff_arr : arr -> t
val df : t -> t -> t -> t
val dr : t -> t -> t Stdlib.ref -> t
\ No newline at end of file diff --git a/owl-base/Owl_algodiff_ops_sig/module-type-Sig/Builder/module-type-Sito/index.html b/owl-base/Owl_algodiff_ops_sig/module-type-Sig/Builder/module-type-Sito/index.html deleted file mode 100644 index cfd337c03..000000000 --- a/owl-base/Owl_algodiff_ops_sig/module-type-Sig/Builder/module-type-Sito/index.html +++ /dev/null @@ -1,7 +0,0 @@ - -Sito (owl-base.Owl_algodiff_ops_sig.Sig.Builder.Sito)

Module type Builder.Sito

val label : string
val ff_f : elt -> t * t * t
val ff_arr : arr -> t * t * t
val df : t -> t -> t -> t
val dr : - t -> - t -> - (t Stdlib.ref * t Stdlib.ref * t Stdlib.ref) -> - (t Stdlib.ref * t Stdlib.ref * t Stdlib.ref) -> - t
\ No newline at end of file diff --git a/owl-base/Owl_algodiff_ops_sig/module-type-Sig/Linalg/index.html b/owl-base/Owl_algodiff_ops_sig/module-type-Sig/Linalg/index.html deleted file mode 100644 index 506f15972..000000000 --- a/owl-base/Owl_algodiff_ops_sig/module-type-Sig/Linalg/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -Linalg (owl-base.Owl_algodiff_ops_sig.Sig.Linalg)

Module Sig.Linalg

val inv : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val logdet : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val chol : ?upper:bool -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val qr : t -> t * t

Refer to :doc:`owl_dense_ndarray_generic`

val lq : t -> t * t

Refer to :doc:`owl_dense_ndarray_generic`

val svd : ?thin:bool -> t -> t * t * t

Refer to :doc:`owl_dense_ndarray_generic`

val sylvester : t -> t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val lyapunov : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val discrete_lyapunov : - ?solver:[ `default | `bilinear | `direct ] -> - t -> - t -> - t

Refer to :doc:`owl_dense_ndarray_generic`

val (/@) : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val linsolve : ?trans:bool -> ?typ:[ `n | `u | `l ] -> t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val care : ?diag_r:bool -> t -> t -> t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val dare : ?diag_r:bool -> t -> t -> t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

\ No newline at end of file diff --git a/owl-base/Owl_algodiff_ops_sig/module-type-Sig/Mat/index.html b/owl-base/Owl_algodiff_ops_sig/module-type-Sig/Mat/index.html deleted file mode 100644 index 41e82a2d2..000000000 --- a/owl-base/Owl_algodiff_ops_sig/module-type-Sig/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl-base.Owl_algodiff_ops_sig.Sig.Mat)

Module Sig.Mat

val empty : int -> int -> t
val zeros : int -> int -> t
val eye : int -> t
val ones : int -> int -> t
val uniform : ?a:elt -> ?b:elt -> int -> int -> t
val gaussian : ?mu:elt -> ?sigma:elt -> int -> int -> t
val shape : t -> int * int
val numel : t -> int
val row_num : t -> int
val col_num : t -> int
val reset : t -> unit
val reshape : int -> int -> t -> t
val get : t -> int -> int -> t
val set : t -> int -> int -> t -> t
val row : t -> int -> t
val mean : t -> t
val add : t -> t -> t
val sub : t -> t -> t
val mul : t -> t -> t
val div : t -> t -> t
val dot : t -> t -> t
val map_by_row : (t -> t) -> t -> t
val of_arrays : elt array array -> t
val init_2d : int -> int -> (int -> int -> t) -> t
val print : t -> unit
\ No newline at end of file diff --git a/owl-base/Owl_algodiff_ops_sig/module-type-Sig/Maths/index.html b/owl-base/Owl_algodiff_ops_sig/module-type-Sig/Maths/index.html deleted file mode 100644 index 72020a75c..000000000 --- a/owl-base/Owl_algodiff_ops_sig/module-type-Sig/Maths/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Maths (owl-base.Owl_algodiff_ops_sig.Sig.Maths)

Module Sig.Maths

val (+) : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val (-) : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val (*) : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val (/) : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val (*@) : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val (**) : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val add : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val sub : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val mul : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val div : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val kron : t -> t -> t

Refer to :doc:`owl_dense_matrix_generic`

val dot : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val pow : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val atan2 : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val min2 : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val max2 : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val cross_entropy : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val inv : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val neg : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val abs : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val signum : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val floor : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val ceil : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val round : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val sqr : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val sqrt : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val log : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val log2 : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val log10 : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val exp : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val sin : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val cos : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val tan : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val sinh : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val cosh : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val tanh : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val asin : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val acos : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val atan : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val asinh : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val acosh : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val atanh : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val sum' : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val log_sum_exp' : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val log_sum_exp : ?axis:int -> ?keep_dims:bool -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val sum : ?axis:int -> ?keep_dims:bool -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val sum_reduce : ?axis:int array -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val mean : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val transpose : ?axis:int array -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val swap : int -> int -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val l1norm' : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val l2norm' : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val l2norm_sqr' : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val sigmoid : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val relu : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val dawsn : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val softplus : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val softsign : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val softmax : ?axis:int -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val reshape : t -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val flatten : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val get_item : t -> int -> int -> t

Refer to :doc:`owl_dense_ndarray_generic`

val get_row : t -> int -> t

Refer to :doc:`owl_dense_ndarray_generic`

val concat : axis:int -> t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val split : axis:int -> int array -> t -> t array

Refer to :doc:`owl_dense_ndarray_generic`

val of_arrays : t array array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val to_arrays : t -> t array array

Refer to :doc:`owl_dense_ndarray_generic`

val concatenate : axis:int -> t array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val stack : axis:int -> t array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val get_slice : int list list -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val set_slice : int list list -> t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val get_fancy : Owl_types.index list -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val set_fancy : Owl_types.index list -> t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val diag : ?k:int -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val diagm : ?k:int -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val trace : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val triu : ?k:int -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val tril : ?k:int -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

\ No newline at end of file diff --git a/owl-base/Owl_algodiff_ops_sig/module-type-Sig/NN/index.html b/owl-base/Owl_algodiff_ops_sig/module-type-Sig/NN/index.html deleted file mode 100644 index bca834a79..000000000 --- a/owl-base/Owl_algodiff_ops_sig/module-type-Sig/NN/index.html +++ /dev/null @@ -1,20 +0,0 @@ - -NN (owl-base.Owl_algodiff_ops_sig.Sig.NN)

Module Sig.NN

val dropout : ?rate:float -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val conv1d : ?padding:Owl_types.padding -> t -> t -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val conv2d : ?padding:Owl_types.padding -> t -> t -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val conv3d : ?padding:Owl_types.padding -> t -> t -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val dilated_conv1d : - ?padding:Owl_types.padding -> - t -> - t -> - int array -> - int array -> - t

Refer to :doc:`owl_dense_ndarray_generic`

val dilated_conv2d : - ?padding:Owl_types.padding -> - t -> - t -> - int array -> - int array -> - t

Refer to :doc:`owl_dense_ndarray_generic`

val dilated_conv3d : - ?padding:Owl_types.padding -> - t -> - t -> - int array -> - int array -> - t

Refer to :doc:`owl_dense_ndarray_generic`

val transpose_conv1d : ?padding:Owl_types.padding -> t -> t -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val transpose_conv2d : ?padding:Owl_types.padding -> t -> t -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val transpose_conv3d : ?padding:Owl_types.padding -> t -> t -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val max_pool1d : Owl_types.padding -> t -> int array -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val max_pool2d : Owl_types.padding -> t -> int array -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val max_pool3d : Owl_types.padding -> t -> int array -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val avg_pool1d : Owl_types.padding -> t -> int array -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val avg_pool2d : Owl_types.padding -> t -> int array -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val avg_pool3d : Owl_types.padding -> t -> int array -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val upsampling2d : t -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val pad : ?v:elt -> int list list -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

\ No newline at end of file diff --git a/owl-base/Owl_algodiff_ops_sig/module-type-Sig/index.html b/owl-base/Owl_algodiff_ops_sig/module-type-Sig/index.html deleted file mode 100644 index bcb9a3316..000000000 --- a/owl-base/Owl_algodiff_ops_sig/module-type-Sig/index.html +++ /dev/null @@ -1,7 +0,0 @@ - -Sig (owl-base.Owl_algodiff_ops_sig.Sig)

Module type Owl_algodiff_ops_sig.Sig

type t
type elt
type arr
type op
module Builder : - Owl_algodiff_ops_builder_sig.Sig - with type t := t - and type elt := elt - and type arr := arr - and type op := op
Supported Maths functions
module Maths : sig ... end
Supported Linalg functions
module Linalg : sig ... end
Supported Neural Network functions
module NN : sig ... end
Supported Mat functions
module Mat : sig ... end
Supported Arr functions
module Arr : sig ... end
\ No newline at end of file diff --git a/owl-base/Owl_algodiff_reverse/.dummy b/owl-base/Owl_algodiff_reverse/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl-base/Owl_algodiff_reverse/Make/argument-1-C/A/Linalg/index.html b/owl-base/Owl_algodiff_reverse/Make/argument-1-C/A/Linalg/index.html deleted file mode 100644 index 9d8e800fe..000000000 --- a/owl-base/Owl_algodiff_reverse/Make/argument-1-C/A/Linalg/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -Linalg (owl-base.Owl_algodiff_reverse.Make.C.A.Linalg)

Module A.Linalg

val inv : arr -> arr
val logdet : arr -> elt
val chol : ?upper:bool -> arr -> arr
val svd : ?thin:bool -> arr -> arr * arr * arr
val qr : arr -> arr * arr
val lq : arr -> arr * arr
val sylvester : arr -> arr -> arr -> arr
val lyapunov : arr -> arr -> arr
val discrete_lyapunov : - ?solver:[ `default | `bilinear | `direct ] -> - arr -> - arr -> - arr
val linsolve : ?trans:bool -> ?typ:[ `n | `u | `l ] -> arr -> arr -> arr
val care : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
val dare : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
\ No newline at end of file diff --git a/owl-base/Owl_algodiff_reverse/Make/argument-1-C/A/Mat/index.html b/owl-base/Owl_algodiff_reverse/Make/argument-1-C/A/Mat/index.html deleted file mode 100644 index f973aeff4..000000000 --- a/owl-base/Owl_algodiff_reverse/Make/argument-1-C/A/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl-base.Owl_algodiff_reverse.Make.C.A.Mat)

Module A.Mat

val diagm : ?k:int -> arr -> arr
val triu : ?k:int -> arr -> arr
val tril : ?k:int -> arr -> arr
val eye : int -> arr
\ No newline at end of file diff --git a/owl-base/Owl_algodiff_reverse/Make/argument-1-C/A/Scalar/index.html b/owl-base/Owl_algodiff_reverse/Make/argument-1-C/A/Scalar/index.html deleted file mode 100644 index f7cc9e74a..000000000 --- a/owl-base/Owl_algodiff_reverse/Make/argument-1-C/A/Scalar/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Scalar (owl-base.Owl_algodiff_reverse.Make.C.A.Scalar)

Module A.Scalar

val add : elt -> elt -> elt
val sub : elt -> elt -> elt
val mul : elt -> elt -> elt
val div : elt -> elt -> elt
val pow : elt -> elt -> elt
val atan2 : elt -> elt -> elt
val abs : elt -> elt
val neg : elt -> elt
val sqr : elt -> elt
val sqrt : elt -> elt
val exp : elt -> elt
val log : elt -> elt
val log2 : elt -> elt
val log10 : elt -> elt
val signum : elt -> elt
val floor : elt -> elt
val ceil : elt -> elt
val round : elt -> elt
val sin : elt -> elt
val cos : elt -> elt
val tan : elt -> elt
val sinh : elt -> elt
val cosh : elt -> elt
val tanh : elt -> elt
val asin : elt -> elt
val acos : elt -> elt
val atan : elt -> elt
val asinh : elt -> elt
val acosh : elt -> elt
val atanh : elt -> elt
val relu : elt -> elt
val dawsn : elt -> elt
val sigmoid : elt -> elt
\ No newline at end of file diff --git a/owl-base/Owl_algodiff_reverse/Make/argument-1-C/A/index.html b/owl-base/Owl_algodiff_reverse/Make/argument-1-C/A/index.html deleted file mode 100644 index 2fa03c47b..000000000 --- a/owl-base/Owl_algodiff_reverse/Make/argument-1-C/A/index.html +++ /dev/null @@ -1,158 +0,0 @@ - -A (owl-base.Owl_algodiff_reverse.Make.C.A)

Module C.A

include Owl_types_ndarray_eltcmp.Sig
include Owl_types_ndarray_basic.Sig
type arr
type elt
val empty : int array -> arr
val zeros : int array -> arr
val ones : int array -> arr
val create : int array -> elt -> arr
val sequential : ?a:elt -> ?step:elt -> int array -> arr
val uniform : ?a:elt -> ?b:elt -> int array -> arr
val gaussian : ?mu:elt -> ?sigma:elt -> int array -> arr
val bernoulli : ?p:elt -> int array -> arr
val init : int array -> (int -> elt) -> arr
val init_nd : int array -> (int array -> elt) -> arr
val shape : arr -> int array
val numel : arr -> int
val get : arr -> int array -> elt
val set : arr -> int array -> elt -> unit
val get_slice : int list list -> arr -> arr
val set_slice : int list list -> arr -> arr -> unit
val get_fancy : Owl_types_common.index list -> arr -> arr
val set_fancy : Owl_types_common.index list -> arr -> arr -> unit
val copy : arr -> arr
val copy_ : out:arr -> arr -> unit
val reset : arr -> unit
val reshape : arr -> int array -> arr
val reverse : arr -> arr
val tile : arr -> int array -> arr
val repeat : arr -> int array -> arr
val concatenate : ?axis:int -> arr array -> arr
val stack : ?axis:int -> arr array -> arr
val split : ?axis:int -> int array -> arr -> arr array
val expand : ?hi:bool -> arr -> int -> arr
val squeeze : ?axis:int array -> arr -> arr
val draw : ?axis:int -> arr -> int -> arr * int array
val map : (elt -> elt) -> arr -> arr
val fold : ?axis:int -> (elt -> elt -> elt) -> elt -> arr -> arr
val scan : ?axis:int -> (elt -> elt -> elt) -> arr -> arr
val one_hot : int -> arr -> arr
val pad : ?v:elt -> int list list -> arr -> arr
val print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(elt -> string) -> - arr -> - unit
val abs : arr -> arr
val neg : arr -> arr
val floor : arr -> arr
val ceil : arr -> arr
val round : arr -> arr
val sqr : arr -> arr
val sqrt : arr -> arr
val log : arr -> arr
val log2 : arr -> arr
val log10 : arr -> arr
val exp : arr -> arr
val sin : arr -> arr
val cos : arr -> arr
val tan : arr -> arr
val sinh : arr -> arr
val cosh : arr -> arr
val tanh : arr -> arr
val asin : arr -> arr
val acos : arr -> arr
val atan : arr -> arr
val asinh : arr -> arr
val acosh : arr -> arr
val atanh : arr -> arr
val min : ?axis:int -> ?keep_dims:bool -> arr -> arr
val max : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum_reduce : ?axis:int array -> arr -> arr
val signum : arr -> arr
val sigmoid : arr -> arr
val relu : arr -> arr
val dawsn : arr -> arr
val min' : arr -> elt
val max' : arr -> elt
val sum' : arr -> elt
val log_sum_exp' : arr -> elt
val log_sum_exp : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm' : arr -> elt
val l2norm' : arr -> elt
val l2norm_sqr' : arr -> elt
val clip_by_value : ?amin:elt -> ?amax:elt -> arr -> arr
val clip_by_l2norm : elt -> arr -> arr
val pow : arr -> arr -> arr
val scalar_pow : elt -> arr -> arr
val pow_scalar : arr -> elt -> arr
val atan2 : arr -> arr -> arr
val scalar_atan2 : elt -> arr -> arr
val atan2_scalar : arr -> elt -> arr
val add : arr -> arr -> arr
val sub : arr -> arr -> arr
val mul : arr -> arr -> arr
val div : arr -> arr -> arr
val add_scalar : arr -> elt -> arr
val sub_scalar : arr -> elt -> arr
val mul_scalar : arr -> elt -> arr
val div_scalar : arr -> elt -> arr
val scalar_add : elt -> arr -> arr
val scalar_sub : elt -> arr -> arr
val scalar_mul : elt -> arr -> arr
val scalar_div : elt -> arr -> arr
val fma : arr -> arr -> arr -> arr
val conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val dilated_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val transpose_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val max_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val upsampling2d : arr -> int array -> arr
val conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val dilated_conv1d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv1d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val transpose_conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val max_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val upsampling2d_backward : arr -> int array -> arr -> arr
val row_num : arr -> int
val col_num : arr -> int
val row : arr -> int -> arr
val rows : arr -> int array -> arr
val copy_row_to : arr -> arr -> int -> unit
val copy_col_to : arr -> arr -> int -> unit
val dot : arr -> arr -> arr
val diag : ?k:int -> arr -> arr
val trace : arr -> elt
val transpose : ?axis:int array -> arr -> arr
val to_rows : arr -> arr array
val of_rows : arr array -> arr
val to_cols : arr -> arr array
val of_cols : arr array -> arr
val of_array : elt array -> int array -> arr
val of_arrays : elt array array -> arr
val float_to_elt : float -> elt
val elt_to_float : elt -> float
val elt_equal : arr -> arr -> arr
val elt_not_equal : arr -> arr -> arr
val elt_less : arr -> arr -> arr
val elt_greater : arr -> arr -> arr
val elt_less_equal : arr -> arr -> arr
val elt_greater_equal : arr -> arr -> arr
val elt_equal_scalar : arr -> elt -> arr
val elt_not_equal_scalar : arr -> elt -> arr
val elt_less_scalar : arr -> elt -> arr
val elt_greater_scalar : arr -> elt -> arr
val elt_less_equal_scalar : arr -> elt -> arr
val elt_greater_equal_scalar : arr -> elt -> arr
module Scalar : sig ... end
module Mat : sig ... end
module Linalg : sig ... end
\ No newline at end of file diff --git a/owl-base/Owl_algodiff_reverse/Make/argument-1-C/index.html b/owl-base/Owl_algodiff_reverse/Make/argument-1-C/index.html deleted file mode 100644 index c3bba61fc..000000000 --- a/owl-base/Owl_algodiff_reverse/Make/argument-1-C/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -C (owl-base.Owl_algodiff_reverse.Make.C)

Parameter Make.C

include Owl_algodiff_core_sig.Sig
Type definition
include Owl_algodiff_types_sig.Sig with type elt := A.elt and type arr := A.arr
type t =
  1. | F of A.elt
  2. | Arr of A.arr
  3. | DF of t * t * int
  4. | DR of t * t Stdlib.ref * op * int Stdlib.ref * int * int Stdlib.ref
and adjoint = t -> t Stdlib.ref -> (t * t) list -> (t * t) list
and register = t list -> t list
and label = string * t list
and op = adjoint * register * label
Core functions
val tag : unit -> int

TODO

val primal : t -> t

TODO

val primal' : t -> t

TODO

val zero : t -> t

TODO

val reset_zero : t -> t

TODO

val tangent : t -> t

TODO

val adjref : t -> t Stdlib.ref

TODO

val adjval : t -> t

TODO

val shape : t -> int array

TODO

val is_float : t -> bool

TODO

val is_arr : t -> bool

TODO

val row_num : t -> int

number of rows

val col_num : t -> int

number of columns

val numel : t -> int

number of elements

val clip_by_value : amin:A.elt -> amax:A.elt -> t -> t

other functions, without tracking gradient

val clip_by_l2norm : A.elt -> t -> t

other functions, without tracking gradient

val copy_primal' : t -> t

TODO

val tile : t -> int array -> t

TODO

val repeat : t -> int array -> t

TODO

val pack_elt : A.elt -> t

convert from elt type to t type.

val unpack_elt : t -> A.elt

convert from t type to elt type.

val pack_flt : float -> t

convert from float type to t type.

val _f : float -> t

A shortcut function for F A.(float_to_elt x).

val unpack_flt : t -> float

convert from t type to float type.

val pack_arr : A.arr -> t

convert from arr type to t type.

val unpack_arr : t -> A.arr

convert from t type to arr type.

val deep_info : t -> string

TODO

val type_info : t -> string

TODO

val error_binop : string -> t -> t -> 'a

TODO

val error_uniop : string -> t -> 'a

TODO

val reverse_add : t -> t -> t
\ No newline at end of file diff --git a/owl-base/Owl_algodiff_reverse/Make/index.html b/owl-base/Owl_algodiff_reverse/Make/index.html deleted file mode 100644 index 4250a83e5..000000000 --- a/owl-base/Owl_algodiff_reverse/Make/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Make (owl-base.Owl_algodiff_reverse.Make)

Module Owl_algodiff_reverse.Make

Parameters

module C : sig ... end

Signature

val reverse_push : C.t -> C.t -> unit
val reverse_prop : C.t -> C.t -> unit
val reverse_reset : C.t -> unit
\ No newline at end of file diff --git a/owl-base/Owl_algodiff_types/.dummy b/owl-base/Owl_algodiff_types/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl-base/Owl_algodiff_types/Make/argument-1-A/Linalg/index.html b/owl-base/Owl_algodiff_types/Make/argument-1-A/Linalg/index.html deleted file mode 100644 index 72be466c8..000000000 --- a/owl-base/Owl_algodiff_types/Make/argument-1-A/Linalg/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -Linalg (owl-base.Owl_algodiff_types.Make.A.Linalg)

Module A.Linalg

val inv : arr -> arr
val logdet : arr -> elt
val chol : ?upper:bool -> arr -> arr
val svd : ?thin:bool -> arr -> arr * arr * arr
val qr : arr -> arr * arr
val lq : arr -> arr * arr
val sylvester : arr -> arr -> arr -> arr
val lyapunov : arr -> arr -> arr
val discrete_lyapunov : - ?solver:[ `default | `bilinear | `direct ] -> - arr -> - arr -> - arr
val linsolve : ?trans:bool -> ?typ:[ `n | `u | `l ] -> arr -> arr -> arr
val care : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
val dare : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
\ No newline at end of file diff --git a/owl-base/Owl_algodiff_types/Make/argument-1-A/Mat/index.html b/owl-base/Owl_algodiff_types/Make/argument-1-A/Mat/index.html deleted file mode 100644 index 660ebbbb8..000000000 --- a/owl-base/Owl_algodiff_types/Make/argument-1-A/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl-base.Owl_algodiff_types.Make.A.Mat)

Module A.Mat

val diagm : ?k:int -> arr -> arr
val triu : ?k:int -> arr -> arr
val tril : ?k:int -> arr -> arr
val eye : int -> arr
\ No newline at end of file diff --git a/owl-base/Owl_algodiff_types/Make/argument-1-A/Scalar/index.html b/owl-base/Owl_algodiff_types/Make/argument-1-A/Scalar/index.html deleted file mode 100644 index 54ce3f4e8..000000000 --- a/owl-base/Owl_algodiff_types/Make/argument-1-A/Scalar/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Scalar (owl-base.Owl_algodiff_types.Make.A.Scalar)

Module A.Scalar

val add : elt -> elt -> elt
val sub : elt -> elt -> elt
val mul : elt -> elt -> elt
val div : elt -> elt -> elt
val pow : elt -> elt -> elt
val atan2 : elt -> elt -> elt
val abs : elt -> elt
val neg : elt -> elt
val sqr : elt -> elt
val sqrt : elt -> elt
val exp : elt -> elt
val log : elt -> elt
val log2 : elt -> elt
val log10 : elt -> elt
val signum : elt -> elt
val floor : elt -> elt
val ceil : elt -> elt
val round : elt -> elt
val sin : elt -> elt
val cos : elt -> elt
val tan : elt -> elt
val sinh : elt -> elt
val cosh : elt -> elt
val tanh : elt -> elt
val asin : elt -> elt
val acos : elt -> elt
val atan : elt -> elt
val asinh : elt -> elt
val acosh : elt -> elt
val atanh : elt -> elt
val relu : elt -> elt
val dawsn : elt -> elt
val sigmoid : elt -> elt
\ No newline at end of file diff --git a/owl-base/Owl_algodiff_types/Make/argument-1-A/index.html b/owl-base/Owl_algodiff_types/Make/argument-1-A/index.html deleted file mode 100644 index 86631d66c..000000000 --- a/owl-base/Owl_algodiff_types/Make/argument-1-A/index.html +++ /dev/null @@ -1,158 +0,0 @@ - -A (owl-base.Owl_algodiff_types.Make.A)

Parameter Make.A

include Owl_types_ndarray_eltcmp.Sig
include Owl_types_ndarray_basic.Sig
type arr
type elt
val empty : int array -> arr
val zeros : int array -> arr
val ones : int array -> arr
val create : int array -> elt -> arr
val sequential : ?a:elt -> ?step:elt -> int array -> arr
val uniform : ?a:elt -> ?b:elt -> int array -> arr
val gaussian : ?mu:elt -> ?sigma:elt -> int array -> arr
val bernoulli : ?p:elt -> int array -> arr
val init : int array -> (int -> elt) -> arr
val init_nd : int array -> (int array -> elt) -> arr
val shape : arr -> int array
val numel : arr -> int
val get : arr -> int array -> elt
val set : arr -> int array -> elt -> unit
val get_slice : int list list -> arr -> arr
val set_slice : int list list -> arr -> arr -> unit
val get_fancy : Owl_types_common.index list -> arr -> arr
val set_fancy : Owl_types_common.index list -> arr -> arr -> unit
val copy : arr -> arr
val copy_ : out:arr -> arr -> unit
val reset : arr -> unit
val reshape : arr -> int array -> arr
val reverse : arr -> arr
val tile : arr -> int array -> arr
val repeat : arr -> int array -> arr
val concatenate : ?axis:int -> arr array -> arr
val stack : ?axis:int -> arr array -> arr
val split : ?axis:int -> int array -> arr -> arr array
val expand : ?hi:bool -> arr -> int -> arr
val squeeze : ?axis:int array -> arr -> arr
val draw : ?axis:int -> arr -> int -> arr * int array
val map : (elt -> elt) -> arr -> arr
val fold : ?axis:int -> (elt -> elt -> elt) -> elt -> arr -> arr
val scan : ?axis:int -> (elt -> elt -> elt) -> arr -> arr
val one_hot : int -> arr -> arr
val pad : ?v:elt -> int list list -> arr -> arr
val print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(elt -> string) -> - arr -> - unit
val abs : arr -> arr
val neg : arr -> arr
val floor : arr -> arr
val ceil : arr -> arr
val round : arr -> arr
val sqr : arr -> arr
val sqrt : arr -> arr
val log : arr -> arr
val log2 : arr -> arr
val log10 : arr -> arr
val exp : arr -> arr
val sin : arr -> arr
val cos : arr -> arr
val tan : arr -> arr
val sinh : arr -> arr
val cosh : arr -> arr
val tanh : arr -> arr
val asin : arr -> arr
val acos : arr -> arr
val atan : arr -> arr
val asinh : arr -> arr
val acosh : arr -> arr
val atanh : arr -> arr
val min : ?axis:int -> ?keep_dims:bool -> arr -> arr
val max : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum_reduce : ?axis:int array -> arr -> arr
val signum : arr -> arr
val sigmoid : arr -> arr
val relu : arr -> arr
val dawsn : arr -> arr
val min' : arr -> elt
val max' : arr -> elt
val sum' : arr -> elt
val log_sum_exp' : arr -> elt
val log_sum_exp : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm' : arr -> elt
val l2norm' : arr -> elt
val l2norm_sqr' : arr -> elt
val clip_by_value : ?amin:elt -> ?amax:elt -> arr -> arr
val clip_by_l2norm : elt -> arr -> arr
val pow : arr -> arr -> arr
val scalar_pow : elt -> arr -> arr
val pow_scalar : arr -> elt -> arr
val atan2 : arr -> arr -> arr
val scalar_atan2 : elt -> arr -> arr
val atan2_scalar : arr -> elt -> arr
val add : arr -> arr -> arr
val sub : arr -> arr -> arr
val mul : arr -> arr -> arr
val div : arr -> arr -> arr
val add_scalar : arr -> elt -> arr
val sub_scalar : arr -> elt -> arr
val mul_scalar : arr -> elt -> arr
val div_scalar : arr -> elt -> arr
val scalar_add : elt -> arr -> arr
val scalar_sub : elt -> arr -> arr
val scalar_mul : elt -> arr -> arr
val scalar_div : elt -> arr -> arr
val fma : arr -> arr -> arr -> arr
val conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val dilated_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val transpose_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val max_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val upsampling2d : arr -> int array -> arr
val conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val dilated_conv1d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv1d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val transpose_conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val max_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val upsampling2d_backward : arr -> int array -> arr -> arr
val row_num : arr -> int
val col_num : arr -> int
val row : arr -> int -> arr
val rows : arr -> int array -> arr
val copy_row_to : arr -> arr -> int -> unit
val copy_col_to : arr -> arr -> int -> unit
val dot : arr -> arr -> arr
val diag : ?k:int -> arr -> arr
val trace : arr -> elt
val transpose : ?axis:int array -> arr -> arr
val to_rows : arr -> arr array
val of_rows : arr array -> arr
val to_cols : arr -> arr array
val of_cols : arr array -> arr
val of_array : elt array -> int array -> arr
val of_arrays : elt array array -> arr
val float_to_elt : float -> elt
val elt_to_float : elt -> float
val elt_equal : arr -> arr -> arr
val elt_not_equal : arr -> arr -> arr
val elt_less : arr -> arr -> arr
val elt_greater : arr -> arr -> arr
val elt_less_equal : arr -> arr -> arr
val elt_greater_equal : arr -> arr -> arr
val elt_equal_scalar : arr -> elt -> arr
val elt_not_equal_scalar : arr -> elt -> arr
val elt_less_scalar : arr -> elt -> arr
val elt_greater_scalar : arr -> elt -> arr
val elt_less_equal_scalar : arr -> elt -> arr
val elt_greater_equal_scalar : arr -> elt -> arr
module Scalar : sig ... end
module Mat : sig ... end
module Linalg : sig ... end
\ No newline at end of file diff --git a/owl-base/Owl_algodiff_types/Make/index.html b/owl-base/Owl_algodiff_types/Make/index.html deleted file mode 100644 index d3b082d1d..000000000 --- a/owl-base/Owl_algodiff_types/Make/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Make (owl-base.Owl_algodiff_types.Make)

Module Owl_algodiff_types.Make

Parameters

Signature

type t =
  1. | F of A.elt
  2. | Arr of A.arr
  3. | DF of t * t * int
  4. | DR of t * t Stdlib.ref * op * int Stdlib.ref * int * int Stdlib.ref
and adjoint = t -> t Stdlib.ref -> (t * t) list -> (t * t) list
and register = t list -> t list
and label = string * t list
and op = adjoint * register * label
\ No newline at end of file diff --git a/owl-base/Owl_algodiff_types_sig/.dummy b/owl-base/Owl_algodiff_types_sig/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl-base/Owl_algodiff_types_sig/module-type-Sig/index.html b/owl-base/Owl_algodiff_types_sig/module-type-Sig/index.html deleted file mode 100644 index a7ad4bc28..000000000 --- a/owl-base/Owl_algodiff_types_sig/module-type-Sig/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Sig (owl-base.Owl_algodiff_types_sig.Sig)

Module type Owl_algodiff_types_sig.Sig

type elt
type arr
type t =
  1. | F of elt
  2. | Arr of arr
  3. | DF of t * t * int
  4. | DR of t * t Stdlib.ref * op * int Stdlib.ref * int * int Stdlib.ref
and adjoint = t -> t Stdlib.ref -> (t * t) list -> (t * t) list
and register = t list -> t list
and label = string * t list
and op = adjoint * register * label
\ No newline at end of file diff --git a/owl-base/Owl_base/.dummy b/owl-base/Owl_base/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl-base/Owl_base_algodiff_primal_ops/.dummy b/owl-base/Owl_base_algodiff_primal_ops/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl-base/Owl_base_algodiff_primal_ops/D/Linalg/index.html b/owl-base/Owl_base_algodiff_primal_ops/D/Linalg/index.html deleted file mode 100644 index f33d33d96..000000000 --- a/owl-base/Owl_base_algodiff_primal_ops/D/Linalg/index.html +++ /dev/null @@ -1,11 +0,0 @@ - -Linalg (owl-base.Owl_base_algodiff_primal_ops.D.Linalg)

Module D.Linalg

include module type of struct include Owl_base_linalg_d end
type elt = float
type complex_mat = Owl_base_dense_matrix_z.mat
type int32_mat = - (int32, Stdlib.Bigarray.int32_elt) Owl_base_dense_matrix_generic.t
include Owl_base_linalg_intf.Common - with type elt := elt - and type mat := mat - and type complex_mat := complex_mat - and type int32_mat := int32_mat
Basic functions
val inv : mat -> mat
val det : mat -> elt
val logdet : mat -> elt
val is_triu : mat -> bool
val is_tril : mat -> bool
val is_symmetric : mat -> bool
val is_diag : mat -> bool
Factorisation
val svd : ?thin:bool -> mat -> mat * mat * mat
val chol : ?upper:bool -> mat -> mat
Linear system of equations
val linsolve : ?trans:bool -> ?typ:[ `n | `u | `l ] -> mat -> mat -> mat
val sylvester : mat -> mat -> mat -> mat
val lyapunov : mat -> mat -> mat
val discrete_lyapunov : - ?solver:[ `default | `direct | `bilinear ] -> - mat -> - mat -> - mat
include Owl_base_linalg_intf.Real with type elt := elt and type mat := mat
val care : ?diag_r:bool -> mat -> mat -> mat -> mat -> mat
val dare : ?diag_r:bool -> mat -> mat -> mat -> mat -> mat
val qr : mat -> mat * mat
val lq : mat -> mat * mat
\ No newline at end of file diff --git a/owl-base/Owl_base_algodiff_primal_ops/D/Mat/index.html b/owl-base/Owl_base_algodiff_primal_ops/D/Mat/index.html deleted file mode 100644 index 44f15500c..000000000 --- a/owl-base/Owl_base_algodiff_primal_ops/D/Mat/index.html +++ /dev/null @@ -1,13 +0,0 @@ - -Mat (owl-base.Owl_base_algodiff_primal_ops.D.Mat)

Module D.Mat

val eye : - int -> - (float, Stdlib.Bigarray.float64_elt) Owl_base_dense_matrix_d.M.t
val tril : - ?k:int -> - ('a, 'b) Owl_base_dense_matrix_d.t -> - ('a, 'b) Owl_base_dense_matrix_d.t
val triu : - ?k:int -> - ('a, 'b) Owl_base_dense_matrix_d.t -> - ('a, 'b) Owl_base_dense_matrix_d.t
val diagm : - ?k:int -> - ('a, 'b) Owl_base_dense_matrix_d.t -> - ('a, 'b) Owl_base_dense_matrix_d.t
\ No newline at end of file diff --git a/owl-base/Owl_base_algodiff_primal_ops/D/index.html b/owl-base/Owl_base_algodiff_primal_ops/D/index.html deleted file mode 100644 index 2902b8840..000000000 --- a/owl-base/Owl_base_algodiff_primal_ops/D/index.html +++ /dev/null @@ -1,164 +0,0 @@ - -D (owl-base.Owl_base_algodiff_primal_ops.D)

Module Owl_base_algodiff_primal_ops.D

include module type of struct include Owl_base_dense_ndarray_d end
type elt = float
type arr = - (float, Stdlib.Bigarray.float64_elt, Stdlib.Bigarray.c_layout) - Stdlib.Bigarray.Genarray.t
include Owl_base_dense_ndarray_intf.Common - with type arr := arr - and type elt := elt
val empty : int array -> arr
val zeros : int array -> arr
val ones : int array -> arr
val create : int array -> elt -> arr
val init : int array -> (int -> elt) -> arr
val init_nd : int array -> (int array -> elt) -> arr
val sequential : ?a:elt -> ?step:elt -> int array -> arr
val uniform : ?a:elt -> ?b:elt -> int array -> arr
val gaussian : ?mu:elt -> ?sigma:elt -> int array -> arr
val bernoulli : ?p:float -> int array -> arr
val shape : arr -> int array
val numel : arr -> int
val strides : arr -> int array

Refer to :doc:`owl_dense_ndarray_generic`

val slice_size : arr -> int array

Refer to :doc:`owl_dense_ndarray_generic`

val get : arr -> int array -> elt
val set : arr -> int array -> elt -> unit
val get_slice : int list list -> arr -> arr
val set_slice : int list list -> arr -> arr -> unit
val get_fancy : Owl_types_common.index list -> arr -> arr
val set_fancy : Owl_types_common.index list -> arr -> arr -> unit
val copy : arr -> arr
val copy_ : out:arr -> arr -> unit
val reset : arr -> unit
val reshape : arr -> int array -> arr
val flatten : arr -> arr
val reverse : arr -> arr
val tile : arr -> int array -> arr
val repeat : arr -> int array -> arr
val concatenate : ?axis:int -> arr array -> arr
val stack : ?axis:int -> arr array -> arr
val squeeze : ?axis:int array -> arr -> arr
val expand : ?hi:bool -> arr -> int -> arr
val split : ?axis:int -> int array -> arr -> arr array
val draw : ?axis:int -> arr -> int -> arr * int array
val pad : ?v:elt -> int list list -> arr -> arr
val one_hot : int -> arr -> arr
val print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(elt -> string) -> - arr -> - unit
val abs : arr -> arr
val neg : arr -> arr
val floor : arr -> arr
val ceil : arr -> arr
val round : arr -> arr
val sqr : arr -> arr
val sqrt : arr -> arr
val log : arr -> arr
val log2 : arr -> arr
val log10 : arr -> arr
val exp : arr -> arr
val sin : arr -> arr
val cos : arr -> arr
val tan : arr -> arr
val sinh : arr -> arr
val cosh : arr -> arr
val tanh : arr -> arr
val asin : arr -> arr
val acos : arr -> arr
val atan : arr -> arr
val asinh : arr -> arr
val acosh : arr -> arr
val atanh : arr -> arr
val min : ?axis:int -> ?keep_dims:bool -> arr -> arr
val max : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum_reduce : ?axis:int array -> arr -> arr
val min' : arr -> elt
val max' : arr -> elt
val sum' : arr -> elt
val pow : arr -> arr -> arr
val scalar_pow : elt -> arr -> arr
val pow_scalar : arr -> elt -> arr
val add : arr -> arr -> arr
val sub : arr -> arr -> arr
val mul : arr -> arr -> arr
val div : arr -> arr -> arr
val add_scalar : arr -> elt -> arr
val sub_scalar : arr -> elt -> arr
val mul_scalar : arr -> elt -> arr
val div_scalar : arr -> elt -> arr
val scalar_add : elt -> arr -> arr
val scalar_sub : elt -> arr -> arr
val scalar_mul : elt -> arr -> arr
val scalar_div : elt -> arr -> arr
val fma : arr -> arr -> arr -> arr
Iterate array elements
val iteri : (int -> elt -> unit) -> arr -> unit
val iter : (elt -> unit) -> arr -> unit
val mapi : (int -> elt -> elt) -> arr -> arr
val map : (elt -> elt) -> arr -> arr
val filteri : (int -> elt -> bool) -> arr -> int array
val filter : (elt -> bool) -> arr -> int array
val foldi : ?axis:int -> (int -> elt -> elt -> elt) -> elt -> arr -> arr
val fold : ?axis:int -> (elt -> elt -> elt) -> elt -> arr -> arr
val scani : ?axis:int -> (int -> elt -> elt -> elt) -> arr -> arr
val scan : ?axis:int -> (elt -> elt -> elt) -> arr -> arr
Examination & Comparison
val exists : (elt -> bool) -> arr -> bool
val not_exists : (elt -> bool) -> arr -> bool
val for_all : (elt -> bool) -> arr -> bool
val is_zero : arr -> bool
val is_positive : arr -> bool
val is_negative : arr -> bool
val is_nonpositive : arr -> bool
val is_nonnegative : arr -> bool
val is_normal : arr -> bool
val not_nan : arr -> bool
val not_inf : arr -> bool
val equal : arr -> arr -> bool
val not_equal : arr -> arr -> bool
val greater : arr -> arr -> bool
val less : arr -> arr -> bool
val greater_equal : arr -> arr -> bool
val less_equal : arr -> arr -> bool
val elt_equal : arr -> arr -> arr
val elt_not_equal : arr -> arr -> arr
val elt_less : arr -> arr -> arr
val elt_greater : arr -> arr -> arr
val elt_less_equal : arr -> arr -> arr
val elt_greater_equal : arr -> arr -> arr
val equal_scalar : arr -> elt -> bool
val not_equal_scalar : arr -> elt -> bool
val less_scalar : arr -> elt -> bool
val greater_scalar : arr -> elt -> bool
val less_equal_scalar : arr -> elt -> bool
val greater_equal_scalar : arr -> elt -> bool
val elt_equal_scalar : arr -> elt -> arr
val elt_not_equal_scalar : arr -> elt -> arr
val elt_less_scalar : arr -> elt -> arr
val elt_greater_scalar : arr -> elt -> arr
val elt_less_equal_scalar : arr -> elt -> arr
val elt_greater_equal_scalar : arr -> elt -> arr
val row_num : arr -> int
val col_num : arr -> int
val row : arr -> int -> arr
val rows : arr -> int array -> arr
val copy_row_to : arr -> arr -> int -> unit
val copy_col_to : arr -> arr -> int -> unit
val diag : ?k:int -> arr -> arr
val transpose : ?axis:int array -> arr -> arr
val to_rows : arr -> arr array
val of_rows : arr array -> arr
val to_cols : arr -> arr array
val of_cols : arr array -> arr
val of_array : elt array -> int array -> arr
val of_arrays : elt array array -> arr
include Owl_base_dense_ndarray_intf.Real - with type arr := arr - and type elt := elt
val log_sum_exp' : arr -> elt
val log_sum_exp : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum_slices : ?axis:int -> arr -> arr
val signum : arr -> arr
val sigmoid : arr -> arr
val relu : arr -> arr
val dawsn : arr -> arr
val l1norm' : arr -> elt
val l2norm' : arr -> elt
val l2norm_sqr' : arr -> elt
val clip_by_value : ?amin:elt -> ?amax:elt -> arr -> arr
val clip_by_l2norm : elt -> arr -> arr
val atan2 : arr -> arr -> arr
val scalar_atan2 : elt -> arr -> arr
val atan2_scalar : arr -> elt -> arr
val approx_equal : ?eps:float -> arr -> arr -> bool
val approx_equal_scalar : ?eps:float -> arr -> float -> bool
val approx_elt_equal : ?eps:float -> arr -> arr -> arr
val approx_elt_equal_scalar : ?eps:float -> arr -> float -> arr
val dot : arr -> arr -> arr
val trace : arr -> elt
Helper functions
val float_to_elt : float -> elt
val elt_to_float : elt -> float
include Owl_base_dense_ndarray_intf.NN with type arr := arr
val conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val dilated_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val transpose_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val max_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val upsampling2d : arr -> int array -> arr
val conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val dilated_conv1d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv1d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val transpose_conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val max_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val upsampling2d_backward : arr -> int array -> arr -> arr
module Scalar = Owl_base_maths
module Mat : sig ... end
module Linalg : sig ... end
\ No newline at end of file diff --git a/owl-base/Owl_base_algodiff_primal_ops/S/Linalg/index.html b/owl-base/Owl_base_algodiff_primal_ops/S/Linalg/index.html deleted file mode 100644 index 12230b400..000000000 --- a/owl-base/Owl_base_algodiff_primal_ops/S/Linalg/index.html +++ /dev/null @@ -1,11 +0,0 @@ - -Linalg (owl-base.Owl_base_algodiff_primal_ops.S.Linalg)

Module S.Linalg

include module type of struct include Owl_base_linalg_s end
type elt = float
type complex_mat = Owl_base_dense_matrix_c.mat
type int32_mat = - (int32, Stdlib.Bigarray.int32_elt) Owl_base_dense_matrix_generic.t
include Owl_base_linalg_intf.Common - with type elt := elt - and type mat := mat - and type complex_mat := complex_mat - and type int32_mat := int32_mat
Basic functions
val inv : mat -> mat
val det : mat -> elt
val logdet : mat -> elt
val is_triu : mat -> bool
val is_tril : mat -> bool
val is_symmetric : mat -> bool
val is_diag : mat -> bool
Factorisation
val svd : ?thin:bool -> mat -> mat * mat * mat
val chol : ?upper:bool -> mat -> mat
Linear system of equations
val linsolve : ?trans:bool -> ?typ:[ `n | `u | `l ] -> mat -> mat -> mat
val sylvester : mat -> mat -> mat -> mat
val lyapunov : mat -> mat -> mat
val discrete_lyapunov : - ?solver:[ `default | `direct | `bilinear ] -> - mat -> - mat -> - mat
include Owl_base_linalg_intf.Real with type elt := elt and type mat := mat
val care : ?diag_r:bool -> mat -> mat -> mat -> mat -> mat
val dare : ?diag_r:bool -> mat -> mat -> mat -> mat -> mat
val qr : mat -> mat * mat
val lq : mat -> mat * mat
\ No newline at end of file diff --git a/owl-base/Owl_base_algodiff_primal_ops/S/Mat/index.html b/owl-base/Owl_base_algodiff_primal_ops/S/Mat/index.html deleted file mode 100644 index 747c4f9c5..000000000 --- a/owl-base/Owl_base_algodiff_primal_ops/S/Mat/index.html +++ /dev/null @@ -1,13 +0,0 @@ - -Mat (owl-base.Owl_base_algodiff_primal_ops.S.Mat)

Module S.Mat

val eye : - int -> - (float, Stdlib.Bigarray.float32_elt) Owl_base_dense_matrix_s.M.t
val tril : - ?k:int -> - ('a, 'b) Owl_base_dense_matrix_s.t -> - ('a, 'b) Owl_base_dense_matrix_s.t
val triu : - ?k:int -> - ('a, 'b) Owl_base_dense_matrix_s.t -> - ('a, 'b) Owl_base_dense_matrix_s.t
val diagm : - ?k:int -> - ('a, 'b) Owl_base_dense_matrix_s.t -> - ('a, 'b) Owl_base_dense_matrix_s.t
\ No newline at end of file diff --git a/owl-base/Owl_base_algodiff_primal_ops/S/index.html b/owl-base/Owl_base_algodiff_primal_ops/S/index.html deleted file mode 100644 index 0c8654204..000000000 --- a/owl-base/Owl_base_algodiff_primal_ops/S/index.html +++ /dev/null @@ -1,225 +0,0 @@ - -S (owl-base.Owl_base_algodiff_primal_ops.S)

Module Owl_base_algodiff_primal_ops.S

include module type of struct include Owl_base_dense_ndarray.S end
include module type of struct include Owl_base_dense_ndarray_s end
type elt = float
type arr = - (float, Stdlib.Bigarray.float32_elt, Stdlib.Bigarray.c_layout) - Stdlib.Bigarray.Genarray.t
include Owl_base_dense_ndarray_intf.Common - with type arr := arr - and type elt := elt
val empty : int array -> arr
val zeros : int array -> arr
val ones : int array -> arr
val create : int array -> elt -> arr
val init : int array -> (int -> elt) -> arr
val init_nd : int array -> (int array -> elt) -> arr
val sequential : ?a:elt -> ?step:elt -> int array -> arr
val uniform : ?a:elt -> ?b:elt -> int array -> arr
val gaussian : ?mu:elt -> ?sigma:elt -> int array -> arr
val bernoulli : ?p:float -> int array -> arr
val shape : arr -> int array
val numel : arr -> int
val strides : arr -> int array

Refer to :doc:`owl_dense_ndarray_generic`

val slice_size : arr -> int array

Refer to :doc:`owl_dense_ndarray_generic`

val get : arr -> int array -> elt
val set : arr -> int array -> elt -> unit
val get_slice : int list list -> arr -> arr
val set_slice : int list list -> arr -> arr -> unit
val get_fancy : Owl_types_common.index list -> arr -> arr
val set_fancy : Owl_types_common.index list -> arr -> arr -> unit
val copy : arr -> arr
val copy_ : out:arr -> arr -> unit
val reset : arr -> unit
val reshape : arr -> int array -> arr
val flatten : arr -> arr
val reverse : arr -> arr
val tile : arr -> int array -> arr
val repeat : arr -> int array -> arr
val concatenate : ?axis:int -> arr array -> arr
val stack : ?axis:int -> arr array -> arr
val squeeze : ?axis:int array -> arr -> arr
val expand : ?hi:bool -> arr -> int -> arr
val split : ?axis:int -> int array -> arr -> arr array
val draw : ?axis:int -> arr -> int -> arr * int array
val pad : ?v:elt -> int list list -> arr -> arr
val one_hot : int -> arr -> arr
val print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(elt -> string) -> - arr -> - unit
val abs : arr -> arr
val neg : arr -> arr
val floor : arr -> arr
val ceil : arr -> arr
val round : arr -> arr
val sqr : arr -> arr
val sqrt : arr -> arr
val log : arr -> arr
val log2 : arr -> arr
val log10 : arr -> arr
val exp : arr -> arr
val sin : arr -> arr
val cos : arr -> arr
val tan : arr -> arr
val sinh : arr -> arr
val cosh : arr -> arr
val tanh : arr -> arr
val asin : arr -> arr
val acos : arr -> arr
val atan : arr -> arr
val asinh : arr -> arr
val acosh : arr -> arr
val atanh : arr -> arr
val min : ?axis:int -> ?keep_dims:bool -> arr -> arr
val max : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum_reduce : ?axis:int array -> arr -> arr
val min' : arr -> elt
val max' : arr -> elt
val sum' : arr -> elt
val pow : arr -> arr -> arr
val scalar_pow : elt -> arr -> arr
val pow_scalar : arr -> elt -> arr
val add : arr -> arr -> arr
val sub : arr -> arr -> arr
val mul : arr -> arr -> arr
val div : arr -> arr -> arr
val add_scalar : arr -> elt -> arr
val sub_scalar : arr -> elt -> arr
val mul_scalar : arr -> elt -> arr
val div_scalar : arr -> elt -> arr
val scalar_add : elt -> arr -> arr
val scalar_sub : elt -> arr -> arr
val scalar_mul : elt -> arr -> arr
val scalar_div : elt -> arr -> arr
val fma : arr -> arr -> arr -> arr
Iterate array elements
val iteri : (int -> elt -> unit) -> arr -> unit
val iter : (elt -> unit) -> arr -> unit
val mapi : (int -> elt -> elt) -> arr -> arr
val map : (elt -> elt) -> arr -> arr
val filteri : (int -> elt -> bool) -> arr -> int array
val filter : (elt -> bool) -> arr -> int array
val foldi : ?axis:int -> (int -> elt -> elt -> elt) -> elt -> arr -> arr
val fold : ?axis:int -> (elt -> elt -> elt) -> elt -> arr -> arr
val scani : ?axis:int -> (int -> elt -> elt -> elt) -> arr -> arr
val scan : ?axis:int -> (elt -> elt -> elt) -> arr -> arr
Examination & Comparison
val exists : (elt -> bool) -> arr -> bool
val not_exists : (elt -> bool) -> arr -> bool
val for_all : (elt -> bool) -> arr -> bool
val is_zero : arr -> bool
val is_positive : arr -> bool
val is_negative : arr -> bool
val is_nonpositive : arr -> bool
val is_nonnegative : arr -> bool
val is_normal : arr -> bool
val not_nan : arr -> bool
val not_inf : arr -> bool
val equal : arr -> arr -> bool
val not_equal : arr -> arr -> bool
val greater : arr -> arr -> bool
val less : arr -> arr -> bool
val greater_equal : arr -> arr -> bool
val less_equal : arr -> arr -> bool
val elt_equal : arr -> arr -> arr
val elt_not_equal : arr -> arr -> arr
val elt_less : arr -> arr -> arr
val elt_greater : arr -> arr -> arr
val elt_less_equal : arr -> arr -> arr
val elt_greater_equal : arr -> arr -> arr
val equal_scalar : arr -> elt -> bool
val not_equal_scalar : arr -> elt -> bool
val less_scalar : arr -> elt -> bool
val greater_scalar : arr -> elt -> bool
val less_equal_scalar : arr -> elt -> bool
val greater_equal_scalar : arr -> elt -> bool
val elt_equal_scalar : arr -> elt -> arr
val elt_not_equal_scalar : arr -> elt -> arr
val elt_less_scalar : arr -> elt -> arr
val elt_greater_scalar : arr -> elt -> arr
val elt_less_equal_scalar : arr -> elt -> arr
val elt_greater_equal_scalar : arr -> elt -> arr
val row_num : arr -> int
val col_num : arr -> int
val row : arr -> int -> arr
val rows : arr -> int array -> arr
val copy_row_to : arr -> arr -> int -> unit
val copy_col_to : arr -> arr -> int -> unit
val diag : ?k:int -> arr -> arr
val transpose : ?axis:int array -> arr -> arr
val to_rows : arr -> arr array
val of_rows : arr array -> arr
val to_cols : arr -> arr array
val of_cols : arr array -> arr
val of_array : elt array -> int array -> arr
val of_arrays : elt array array -> arr
include Owl_base_dense_ndarray_intf.Real - with type arr := arr - and type elt := elt
val log_sum_exp' : arr -> elt
val log_sum_exp : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum_slices : ?axis:int -> arr -> arr
val signum : arr -> arr
val sigmoid : arr -> arr
val relu : arr -> arr
val dawsn : arr -> arr
val l1norm' : arr -> elt
val l2norm' : arr -> elt
val l2norm_sqr' : arr -> elt
val clip_by_value : ?amin:elt -> ?amax:elt -> arr -> arr
val clip_by_l2norm : elt -> arr -> arr
val atan2 : arr -> arr -> arr
val scalar_atan2 : elt -> arr -> arr
val atan2_scalar : arr -> elt -> arr
val approx_equal : ?eps:float -> arr -> arr -> bool
val approx_equal_scalar : ?eps:float -> arr -> float -> bool
val approx_elt_equal : ?eps:float -> arr -> arr -> arr
val approx_elt_equal_scalar : ?eps:float -> arr -> float -> arr
val dot : arr -> arr -> arr
val trace : arr -> elt
Helper functions
val float_to_elt : float -> elt
val elt_to_float : elt -> float
include Owl_base_dense_ndarray_intf.NN with type arr := arr
val conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val dilated_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val transpose_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val max_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val upsampling2d : arr -> int array -> arr
val conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val dilated_conv1d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv1d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val transpose_conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val max_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val upsampling2d_backward : arr -> int array -> arr -> arr
include module type of struct include Owl_base_dense_ndarray.Operator end
include sig ... end
val (+$) : - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_base_dense_ndarray_generic.t
val (-$) : - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_base_dense_ndarray_generic.t
val (*$) : - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_base_dense_ndarray_generic.t
val (/$) : - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_base_dense_ndarray_generic.t
val ($+) : - 'a -> - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - ('a, 'b) Owl_base_dense_ndarray_generic.t
val ($-) : - 'a -> - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - ('a, 'b) Owl_base_dense_ndarray_generic.t
val ($*) : - 'a -> - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - ('a, 'b) Owl_base_dense_ndarray_generic.t
val ($/) : - 'a -> - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - ('a, 'b) Owl_base_dense_ndarray_generic.t
val (=) : - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - bool
val (!=) : - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - bool
val (<>) : - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - bool
val (>) : - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - bool
val (<) : - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - bool
val (>=) : - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - bool
val (<=) : - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - bool
include sig ... end
val (.%{}) : ('a, 'b) Owl_base_dense_ndarray_generic.t -> int -> 'a
val (.%{;..}) : ('a, 'b) Owl_base_dense_ndarray_generic.t -> int array -> 'a
val (.%{}<-) : ('a, 'b) Owl_base_dense_ndarray_generic.t -> int -> 'a -> unit
val (.%{;..}<-) : - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - int array -> - 'a -> - unit
module Scalar = Owl_base_maths
module Mat : sig ... end
module Linalg : sig ... end
\ No newline at end of file diff --git a/owl-base/Owl_base_complex/.dummy b/owl-base/Owl_base_complex/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl-base/Owl_base_dense_common/.dummy b/owl-base/Owl_base_dense_common/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl-base/Owl_base_dense_matrix_c/.dummy b/owl-base/Owl_base_dense_matrix_c/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl-base/Owl_base_dense_matrix_d/.dummy b/owl-base/Owl_base_dense_matrix_d/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl-base/Owl_base_dense_matrix_generic/.dummy b/owl-base/Owl_base_dense_matrix_generic/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl-base/Owl_base_dense_matrix_intf/.dummy b/owl-base/Owl_base_dense_matrix_intf/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl-base/Owl_base_dense_matrix_intf/module-type-Common/index.html b/owl-base/Owl_base_dense_matrix_intf/module-type-Common/index.html deleted file mode 100644 index 5e5bcfe63..000000000 --- a/owl-base/Owl_base_dense_matrix_intf/module-type-Common/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Common (owl-base.Owl_base_dense_matrix_intf.Common)

Module type Owl_base_dense_matrix_intf.Common

type elt
type arr
val diagm : ?k:int -> arr -> arr
val tril : ?k:int -> arr -> arr
val triu : ?k:int -> arr -> arr
\ No newline at end of file diff --git a/owl-base/Owl_base_dense_matrix_s/.dummy b/owl-base/Owl_base_dense_matrix_s/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl-base/Owl_base_dense_matrix_z/.dummy b/owl-base/Owl_base_dense_matrix_z/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl-base/Owl_base_dense_ndarray/.dummy b/owl-base/Owl_base_dense_ndarray/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl-base/Owl_base_dense_ndarray/C/index.html b/owl-base/Owl_base_dense_ndarray/C/index.html deleted file mode 100644 index aafcc9f85..000000000 --- a/owl-base/Owl_base_dense_ndarray/C/index.html +++ /dev/null @@ -1,73 +0,0 @@ - -C (owl-base.Owl_base_dense_ndarray.C)

Module Owl_base_dense_ndarray.C

include module type of struct include Owl_base_dense_ndarray_c end
type elt = Stdlib.Complex.t
type arr = - (Stdlib.Complex.t, Stdlib.Bigarray.complex32_elt, Stdlib.Bigarray.c_layout) - Stdlib.Bigarray.Genarray.t
include Owl_base_dense_ndarray_intf.Common - with type arr := arr - and type elt := elt
val empty : int array -> arr
val zeros : int array -> arr
val ones : int array -> arr
val create : int array -> elt -> arr
val init : int array -> (int -> elt) -> arr
val init_nd : int array -> (int array -> elt) -> arr
val sequential : ?a:elt -> ?step:elt -> int array -> arr
val uniform : ?a:elt -> ?b:elt -> int array -> arr
val gaussian : ?mu:elt -> ?sigma:elt -> int array -> arr
val bernoulli : ?p:float -> int array -> arr
val shape : arr -> int array
val numel : arr -> int
val strides : arr -> int array

Refer to :doc:`owl_dense_ndarray_generic`

val slice_size : arr -> int array

Refer to :doc:`owl_dense_ndarray_generic`

val get : arr -> int array -> elt
val set : arr -> int array -> elt -> unit
val get_slice : int list list -> arr -> arr
val set_slice : int list list -> arr -> arr -> unit
val get_fancy : Owl_types_common.index list -> arr -> arr
val set_fancy : Owl_types_common.index list -> arr -> arr -> unit
val copy : arr -> arr
val copy_ : out:arr -> arr -> unit
val reset : arr -> unit
val reshape : arr -> int array -> arr
val flatten : arr -> arr
val reverse : arr -> arr
val tile : arr -> int array -> arr
val repeat : arr -> int array -> arr
val concatenate : ?axis:int -> arr array -> arr
val stack : ?axis:int -> arr array -> arr
val squeeze : ?axis:int array -> arr -> arr
val expand : ?hi:bool -> arr -> int -> arr
val split : ?axis:int -> int array -> arr -> arr array
val draw : ?axis:int -> arr -> int -> arr * int array
val pad : ?v:elt -> int list list -> arr -> arr
val one_hot : int -> arr -> arr
val print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(elt -> string) -> - arr -> - unit
val abs : arr -> arr
val neg : arr -> arr
val floor : arr -> arr
val ceil : arr -> arr
val round : arr -> arr
val sqr : arr -> arr
val sqrt : arr -> arr
val log : arr -> arr
val log2 : arr -> arr
val log10 : arr -> arr
val exp : arr -> arr
val sin : arr -> arr
val cos : arr -> arr
val tan : arr -> arr
val sinh : arr -> arr
val cosh : arr -> arr
val tanh : arr -> arr
val asin : arr -> arr
val acos : arr -> arr
val atan : arr -> arr
val asinh : arr -> arr
val acosh : arr -> arr
val atanh : arr -> arr
val min : ?axis:int -> ?keep_dims:bool -> arr -> arr
val max : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum_reduce : ?axis:int array -> arr -> arr
val min' : arr -> elt
val max' : arr -> elt
val sum' : arr -> elt
val pow : arr -> arr -> arr
val scalar_pow : elt -> arr -> arr
val pow_scalar : arr -> elt -> arr
val add : arr -> arr -> arr
val sub : arr -> arr -> arr
val mul : arr -> arr -> arr
val div : arr -> arr -> arr
val add_scalar : arr -> elt -> arr
val sub_scalar : arr -> elt -> arr
val mul_scalar : arr -> elt -> arr
val div_scalar : arr -> elt -> arr
val scalar_add : elt -> arr -> arr
val scalar_sub : elt -> arr -> arr
val scalar_mul : elt -> arr -> arr
val scalar_div : elt -> arr -> arr
val fma : arr -> arr -> arr -> arr
Iterate array elements
val iteri : (int -> elt -> unit) -> arr -> unit
val iter : (elt -> unit) -> arr -> unit
val mapi : (int -> elt -> elt) -> arr -> arr
val map : (elt -> elt) -> arr -> arr
val filteri : (int -> elt -> bool) -> arr -> int array
val filter : (elt -> bool) -> arr -> int array
val foldi : ?axis:int -> (int -> elt -> elt -> elt) -> elt -> arr -> arr
val fold : ?axis:int -> (elt -> elt -> elt) -> elt -> arr -> arr
val scani : ?axis:int -> (int -> elt -> elt -> elt) -> arr -> arr
val scan : ?axis:int -> (elt -> elt -> elt) -> arr -> arr
Examination & Comparison
val exists : (elt -> bool) -> arr -> bool
val not_exists : (elt -> bool) -> arr -> bool
val for_all : (elt -> bool) -> arr -> bool
val is_zero : arr -> bool
val is_positive : arr -> bool
val is_negative : arr -> bool
val is_nonpositive : arr -> bool
val is_nonnegative : arr -> bool
val is_normal : arr -> bool
val not_nan : arr -> bool
val not_inf : arr -> bool
val equal : arr -> arr -> bool
val not_equal : arr -> arr -> bool
val greater : arr -> arr -> bool
val less : arr -> arr -> bool
val greater_equal : arr -> arr -> bool
val less_equal : arr -> arr -> bool
val elt_equal : arr -> arr -> arr
val elt_not_equal : arr -> arr -> arr
val elt_less : arr -> arr -> arr
val elt_greater : arr -> arr -> arr
val elt_less_equal : arr -> arr -> arr
val elt_greater_equal : arr -> arr -> arr
val equal_scalar : arr -> elt -> bool
val not_equal_scalar : arr -> elt -> bool
val less_scalar : arr -> elt -> bool
val greater_scalar : arr -> elt -> bool
val less_equal_scalar : arr -> elt -> bool
val greater_equal_scalar : arr -> elt -> bool
val elt_equal_scalar : arr -> elt -> arr
val elt_not_equal_scalar : arr -> elt -> arr
val elt_less_scalar : arr -> elt -> arr
val elt_greater_scalar : arr -> elt -> arr
val elt_less_equal_scalar : arr -> elt -> arr
val elt_greater_equal_scalar : arr -> elt -> arr
val row_num : arr -> int
val col_num : arr -> int
val row : arr -> int -> arr
val rows : arr -> int array -> arr
val copy_row_to : arr -> arr -> int -> unit
val copy_col_to : arr -> arr -> int -> unit
val diag : ?k:int -> arr -> arr
val transpose : ?axis:int array -> arr -> arr
val to_rows : arr -> arr array
val of_rows : arr array -> arr
val to_cols : arr -> arr array
val of_cols : arr array -> arr
val of_array : elt array -> int array -> arr
val of_arrays : elt array array -> arr
include module type of struct include Operator end
include sig ... end
val (+$) : - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_base_dense_ndarray_generic.t
val (-$) : - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_base_dense_ndarray_generic.t
val (*$) : - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_base_dense_ndarray_generic.t
val (/$) : - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_base_dense_ndarray_generic.t
val ($+) : - 'a -> - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - ('a, 'b) Owl_base_dense_ndarray_generic.t
val ($-) : - 'a -> - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - ('a, 'b) Owl_base_dense_ndarray_generic.t
val ($*) : - 'a -> - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - ('a, 'b) Owl_base_dense_ndarray_generic.t
val ($/) : - 'a -> - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - ('a, 'b) Owl_base_dense_ndarray_generic.t
val (=) : - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - bool
val (!=) : - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - bool
val (<>) : - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - bool
val (>) : - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - bool
val (<) : - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - bool
val (>=) : - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - bool
val (<=) : - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - bool
include sig ... end
val (.%{}) : ('a, 'b) Owl_base_dense_ndarray_generic.t -> int -> 'a
val (.%{;..}) : ('a, 'b) Owl_base_dense_ndarray_generic.t -> int array -> 'a
val (.%{}<-) : ('a, 'b) Owl_base_dense_ndarray_generic.t -> int -> 'a -> unit
val (.%{;..}<-) : - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - int array -> - 'a -> - unit
\ No newline at end of file diff --git a/owl-base/Owl_base_dense_ndarray/D/index.html b/owl-base/Owl_base_dense_ndarray/D/index.html deleted file mode 100644 index f56f46071..000000000 --- a/owl-base/Owl_base_dense_ndarray/D/index.html +++ /dev/null @@ -1,225 +0,0 @@ - -D (owl-base.Owl_base_dense_ndarray.D)

Module Owl_base_dense_ndarray.D

include module type of struct include Owl_base_dense_ndarray_d end
type elt = float
type arr = - (float, Stdlib.Bigarray.float64_elt, Stdlib.Bigarray.c_layout) - Stdlib.Bigarray.Genarray.t
include Owl_base_dense_ndarray_intf.Common - with type arr := arr - and type elt := elt
val empty : int array -> arr
val zeros : int array -> arr
val ones : int array -> arr
val create : int array -> elt -> arr
val init : int array -> (int -> elt) -> arr
val init_nd : int array -> (int array -> elt) -> arr
val sequential : ?a:elt -> ?step:elt -> int array -> arr
val uniform : ?a:elt -> ?b:elt -> int array -> arr
val gaussian : ?mu:elt -> ?sigma:elt -> int array -> arr
val bernoulli : ?p:float -> int array -> arr
val shape : arr -> int array
val numel : arr -> int
val strides : arr -> int array

Refer to :doc:`owl_dense_ndarray_generic`

val slice_size : arr -> int array

Refer to :doc:`owl_dense_ndarray_generic`

val get : arr -> int array -> elt
val set : arr -> int array -> elt -> unit
val get_slice : int list list -> arr -> arr
val set_slice : int list list -> arr -> arr -> unit
val get_fancy : Owl_types_common.index list -> arr -> arr
val set_fancy : Owl_types_common.index list -> arr -> arr -> unit
val copy : arr -> arr
val copy_ : out:arr -> arr -> unit
val reset : arr -> unit
val reshape : arr -> int array -> arr
val flatten : arr -> arr
val reverse : arr -> arr
val tile : arr -> int array -> arr
val repeat : arr -> int array -> arr
val concatenate : ?axis:int -> arr array -> arr
val stack : ?axis:int -> arr array -> arr
val squeeze : ?axis:int array -> arr -> arr
val expand : ?hi:bool -> arr -> int -> arr
val split : ?axis:int -> int array -> arr -> arr array
val draw : ?axis:int -> arr -> int -> arr * int array
val pad : ?v:elt -> int list list -> arr -> arr
val one_hot : int -> arr -> arr
val print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(elt -> string) -> - arr -> - unit
val abs : arr -> arr
val neg : arr -> arr
val floor : arr -> arr
val ceil : arr -> arr
val round : arr -> arr
val sqr : arr -> arr
val sqrt : arr -> arr
val log : arr -> arr
val log2 : arr -> arr
val log10 : arr -> arr
val exp : arr -> arr
val sin : arr -> arr
val cos : arr -> arr
val tan : arr -> arr
val sinh : arr -> arr
val cosh : arr -> arr
val tanh : arr -> arr
val asin : arr -> arr
val acos : arr -> arr
val atan : arr -> arr
val asinh : arr -> arr
val acosh : arr -> arr
val atanh : arr -> arr
val min : ?axis:int -> ?keep_dims:bool -> arr -> arr
val max : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum_reduce : ?axis:int array -> arr -> arr
val min' : arr -> elt
val max' : arr -> elt
val sum' : arr -> elt
val pow : arr -> arr -> arr
val scalar_pow : elt -> arr -> arr
val pow_scalar : arr -> elt -> arr
val add : arr -> arr -> arr
val sub : arr -> arr -> arr
val mul : arr -> arr -> arr
val div : arr -> arr -> arr
val add_scalar : arr -> elt -> arr
val sub_scalar : arr -> elt -> arr
val mul_scalar : arr -> elt -> arr
val div_scalar : arr -> elt -> arr
val scalar_add : elt -> arr -> arr
val scalar_sub : elt -> arr -> arr
val scalar_mul : elt -> arr -> arr
val scalar_div : elt -> arr -> arr
val fma : arr -> arr -> arr -> arr
Iterate array elements
val iteri : (int -> elt -> unit) -> arr -> unit
val iter : (elt -> unit) -> arr -> unit
val mapi : (int -> elt -> elt) -> arr -> arr
val map : (elt -> elt) -> arr -> arr
val filteri : (int -> elt -> bool) -> arr -> int array
val filter : (elt -> bool) -> arr -> int array
val foldi : ?axis:int -> (int -> elt -> elt -> elt) -> elt -> arr -> arr
val fold : ?axis:int -> (elt -> elt -> elt) -> elt -> arr -> arr
val scani : ?axis:int -> (int -> elt -> elt -> elt) -> arr -> arr
val scan : ?axis:int -> (elt -> elt -> elt) -> arr -> arr
Examination & Comparison
val exists : (elt -> bool) -> arr -> bool
val not_exists : (elt -> bool) -> arr -> bool
val for_all : (elt -> bool) -> arr -> bool
val is_zero : arr -> bool
val is_positive : arr -> bool
val is_negative : arr -> bool
val is_nonpositive : arr -> bool
val is_nonnegative : arr -> bool
val is_normal : arr -> bool
val not_nan : arr -> bool
val not_inf : arr -> bool
val equal : arr -> arr -> bool
val not_equal : arr -> arr -> bool
val greater : arr -> arr -> bool
val less : arr -> arr -> bool
val greater_equal : arr -> arr -> bool
val less_equal : arr -> arr -> bool
val elt_equal : arr -> arr -> arr
val elt_not_equal : arr -> arr -> arr
val elt_less : arr -> arr -> arr
val elt_greater : arr -> arr -> arr
val elt_less_equal : arr -> arr -> arr
val elt_greater_equal : arr -> arr -> arr
val equal_scalar : arr -> elt -> bool
val not_equal_scalar : arr -> elt -> bool
val less_scalar : arr -> elt -> bool
val greater_scalar : arr -> elt -> bool
val less_equal_scalar : arr -> elt -> bool
val greater_equal_scalar : arr -> elt -> bool
val elt_equal_scalar : arr -> elt -> arr
val elt_not_equal_scalar : arr -> elt -> arr
val elt_less_scalar : arr -> elt -> arr
val elt_greater_scalar : arr -> elt -> arr
val elt_less_equal_scalar : arr -> elt -> arr
val elt_greater_equal_scalar : arr -> elt -> arr
val row_num : arr -> int
val col_num : arr -> int
val row : arr -> int -> arr
val rows : arr -> int array -> arr
val copy_row_to : arr -> arr -> int -> unit
val copy_col_to : arr -> arr -> int -> unit
val diag : ?k:int -> arr -> arr
val transpose : ?axis:int array -> arr -> arr
val to_rows : arr -> arr array
val of_rows : arr array -> arr
val to_cols : arr -> arr array
val of_cols : arr array -> arr
val of_array : elt array -> int array -> arr
val of_arrays : elt array array -> arr
include Owl_base_dense_ndarray_intf.Real - with type arr := arr - and type elt := elt
val log_sum_exp' : arr -> elt
val log_sum_exp : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum_slices : ?axis:int -> arr -> arr
val signum : arr -> arr
val sigmoid : arr -> arr
val relu : arr -> arr
val dawsn : arr -> arr
val l1norm' : arr -> elt
val l2norm' : arr -> elt
val l2norm_sqr' : arr -> elt
val clip_by_value : ?amin:elt -> ?amax:elt -> arr -> arr
val clip_by_l2norm : elt -> arr -> arr
val atan2 : arr -> arr -> arr
val scalar_atan2 : elt -> arr -> arr
val atan2_scalar : arr -> elt -> arr
val approx_equal : ?eps:float -> arr -> arr -> bool
val approx_equal_scalar : ?eps:float -> arr -> float -> bool
val approx_elt_equal : ?eps:float -> arr -> arr -> arr
val approx_elt_equal_scalar : ?eps:float -> arr -> float -> arr
val dot : arr -> arr -> arr
val trace : arr -> elt
Helper functions
val float_to_elt : float -> elt
val elt_to_float : elt -> float
include Owl_base_dense_ndarray_intf.NN with type arr := arr
val conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val dilated_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val transpose_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val max_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val upsampling2d : arr -> int array -> arr
val conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val dilated_conv1d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv1d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val transpose_conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val max_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val upsampling2d_backward : arr -> int array -> arr -> arr
include module type of struct include Operator end
include sig ... end
val (+$) : - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_base_dense_ndarray_generic.t
val (-$) : - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_base_dense_ndarray_generic.t
val (*$) : - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_base_dense_ndarray_generic.t
val (/$) : - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_base_dense_ndarray_generic.t
val ($+) : - 'a -> - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - ('a, 'b) Owl_base_dense_ndarray_generic.t
val ($-) : - 'a -> - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - ('a, 'b) Owl_base_dense_ndarray_generic.t
val ($*) : - 'a -> - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - ('a, 'b) Owl_base_dense_ndarray_generic.t
val ($/) : - 'a -> - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - ('a, 'b) Owl_base_dense_ndarray_generic.t
val (=) : - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - bool
val (!=) : - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - bool
val (<>) : - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - bool
val (>) : - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - bool
val (<) : - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - bool
val (>=) : - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - bool
val (<=) : - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - bool
include sig ... end
val (.%{}) : ('a, 'b) Owl_base_dense_ndarray_generic.t -> int -> 'a
val (.%{;..}) : ('a, 'b) Owl_base_dense_ndarray_generic.t -> int array -> 'a
val (.%{}<-) : ('a, 'b) Owl_base_dense_ndarray_generic.t -> int -> 'a -> unit
val (.%{;..}<-) : - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - int array -> - 'a -> - unit
\ No newline at end of file diff --git a/owl-base/Owl_base_dense_ndarray/Generic/index.html b/owl-base/Owl_base_dense_ndarray/Generic/index.html deleted file mode 100644 index 62f2ce234..000000000 --- a/owl-base/Owl_base_dense_ndarray/Generic/index.html +++ /dev/null @@ -1,309 +0,0 @@ - -Generic (owl-base.Owl_base_dense_ndarray.Generic)

Module Owl_base_dense_ndarray.Generic

include module type of struct include Owl_base_dense_ndarray_generic end

About the comparison of two complex numbers x and y, Owl uses the following conventions: 1) x and y are equal iff both real and imaginary parts are equal; 2) x is less than y if the magnitude of x is less than the magnitude of x; in case both x and y have the same magnitudes, x is less than x if the phase of x is less than the phase of y; 3) less or equal, greater, greater or equal relation can be further defined atop of the aforementioned conventions.

Type definition
type ('a, 'b) t = ('a, 'b, Stdlib.Bigarray.c_layout) Stdlib.Bigarray.Genarray.t

Refer to :doc:`owl_dense_ndarray_generic`

type ('a, 'b) kind = ('a, 'b) Stdlib.Bigarray.kind

Refer to :doc:`owl_dense_ndarray_generic`

Create Ndarrays
val empty : ('a, 'b) kind -> int array -> ('a, 'b) t

Refer to :doc:`owl_dense_ndarray_generic`

val create : ('a, 'b) kind -> int array -> 'a -> ('a, 'b) t

Refer to :doc:`owl_dense_ndarray_generic`

val init : ('a, 'b) kind -> int array -> (int -> 'a) -> ('a, 'b) t

Refer to :doc:`owl_dense_ndarray_generic`

val init_nd : ('a, 'b) kind -> int array -> (int array -> 'a) -> ('a, 'b) t

Refer to :doc:`owl_dense_ndarray_generic`

val zeros : ('a, 'b) kind -> int array -> ('a, 'b) t

Refer to :doc:`owl_dense_ndarray_generic`

val ones : ('a, 'b) kind -> int array -> ('a, 'b) t

Refer to :doc:`owl_dense_ndarray_generic`

val eye : ('a, 'b) kind -> int -> ('a, 'b) t

Refer to :doc:`owl_dense_ndarray_generic`

val uniform : ('a, 'b) kind -> ?a:'a -> ?b:'a -> int array -> ('a, 'b) t

Refer to :doc:`owl_dense_ndarray_generic`

val gaussian : ('a, 'b) kind -> ?mu:'a -> ?sigma:'a -> int array -> ('a, 'b) t

Refer to :doc:`owl_dense_ndarray_generic`

val sequential : ('a, 'b) kind -> ?a:'a -> ?step:'a -> int array -> ('a, 'b) t

Refer to :doc:`owl_dense_ndarray_generic`

val bernoulli : ('a, 'b) kind -> ?p:float -> int array -> ('a, 'b) t

Refer to :doc:`owl_dense_ndarray_generic`

Obtain basic properties
val shape : ('a, 'b) t -> int array

Refer to :doc:`owl_dense_ndarray_generic`

val num_dims : ('a, 'b) t -> int

Refer to :doc:`owl_dense_ndarray_generic`

val nth_dim : ('a, 'b) t -> int -> int

Refer to :doc:`owl_dense_ndarray_generic`

val numel : ('a, 'b) t -> int

Refer to :doc:`owl_dense_ndarray_generic`

val kind : ('a, 'b) t -> ('a, 'b) kind

Refer to :doc:`owl_dense_ndarray_generic`

val strides : ('a, 'b) t -> int array

Refer to :doc:`owl_dense_ndarray_generic`

val slice_size : ('a, 'b) t -> int array

Refer to :doc:`owl_dense_ndarray_generic`

Manipulate Ndarrays
val get : ('a, 'b) t -> int array -> 'a

Refer to :doc:`owl_dense_ndarray_generic`

val set : ('a, 'b) t -> int array -> 'a -> unit

Refer to :doc:`owl_dense_ndarray_generic`

val get_slice : int list list -> ('a, 'b) t -> ('a, 'b) t

Refer to :doc:`owl_dense_ndarray_generic`

val set_slice : int list list -> ('a, 'b) t -> ('a, 'b) t -> unit

Refer to :doc:`owl_dense_ndarray_generic`

val get_fancy : Owl_types.index list -> ('a, 'b) t -> ('a, 'b) t

Refer to :doc:`owl_dense_ndarray_generic`

val set_fancy : Owl_types.index list -> ('a, 'b) t -> ('a, 'b) t -> unit

Refer to :doc:`owl_dense_ndarray_generic`

val reset : ('a, 'b) t -> unit

Refer to :doc:`owl_dense_ndarray_generic`

val fill : ('a, 'b) t -> 'a -> unit

Refer to :doc:`owl_dense_ndarray_generic`

val copy : ('a, 'b) t -> ('a, 'b) t

Refer to :doc:`owl_dense_ndarray_generic`

val copy_ : out:('a, 'b) t -> ('a, 'b) t -> unit

Refer to :doc:`owl_dense_ndarray_generic`

val reshape : ('a, 'b) t -> int array -> ('a, 'b) t

Refer to :doc:`owl_dense_ndarray_generic`

val flatten : ('a, 'b) t -> ('a, 'b) t

Refer to :doc:`owl_dense_ndarray_generic`

val reverse : ('a, 'b) t -> ('a, 'b) t

Refer to :doc:`owl_dense_ndarray_generic`

val transpose : ?axis:int array -> ('a, 'b) t -> ('a, 'b) t

Refer to :doc:`owl_dense_ndarray_generic`

val tile : ('a, 'b) t -> int array -> ('a, 'b) t

Refer to :doc:`owl_dense_ndarray_generic`

val repeat : ('a, 'b) t -> int array -> ('a, 'b) t

Refer to :doc:`owl_dense_ndarray_generic`

val pad : ?v:'a -> int list list -> ('a, 'b) t -> ('a, 'b) t

Refer to :doc:`owl_dense_ndarray_generic`

val concatenate : ?axis:int -> ('a, 'b) t array -> ('a, 'b) t

Refer to :doc:`owl_dense_ndarray_generic`

val stack : ?axis:int -> ('a, 'b) t array -> ('a, 'b) t

Refer to :doc:`owl_dense_ndarray_generic`

val squeeze : ?axis:int array -> ('a, 'b) t -> ('a, 'b) t

Refer to :doc:`owl_dense_ndarray_generic`

val expand : ?hi:bool -> ('a, 'b) t -> int -> ('a, 'b) t

Refer to :doc:`owl_dense_ndarray_generic`

val split : ?axis:int -> int array -> ('a, 'b) t -> ('a, 'b) t array

Refer to :doc:`owl_dense_ndarray_generic`

val draw : ?axis:int -> ('a, 'b) t -> int -> ('a, 'b) t * int array

Refer to :doc:`owl_dense_ndarray_generic`

val one_hot : int -> ('a, 'b) t -> ('a, 'b) t

TODO: not implemented

Iterate array elements
val iteri : (int -> 'a -> unit) -> ('a, 'b) t -> unit

Refer to :doc:`owl_dense_ndarray_generic`

val iter : ('a -> unit) -> ('a, 'b) t -> unit

Refer to :doc:`owl_dense_ndarray_generic`

val mapi : (int -> 'a -> 'a) -> ('a, 'b) t -> ('a, 'b) t

Refer to :doc:`owl_dense_ndarray_generic`

val map : ('a -> 'a) -> ('a, 'b) t -> ('a, 'b) t

Refer to :doc:`owl_dense_ndarray_generic`

val filteri : (int -> 'a -> bool) -> ('a, 'b) t -> int array

Refer to :doc:`owl_dense_ndarray_generic`

val filter : ('a -> bool) -> ('a, 'b) t -> int array

Refer to :doc:`owl_dense_ndarray_generic`

val foldi : - ?axis:int -> - (int -> 'a -> 'a -> 'a) -> - 'a -> - ('a, 'b) t -> - ('a, 'b) t

Refer to :doc:`owl_dense_ndarray_generic`

val fold : ?axis:int -> ('a -> 'a -> 'a) -> 'a -> ('a, 'b) t -> ('a, 'b) t

Refer to :doc:`owl_dense_ndarray_generic`

val scani : ?axis:int -> (int -> 'a -> 'a -> 'a) -> ('a, 'b) t -> ('a, 'b) t

Refer to :doc:`owl_dense_ndarray_generic`

val scan : ?axis:int -> ('a -> 'a -> 'a) -> ('a, 'b) t -> ('a, 'b) t

Refer to :doc:`owl_dense_ndarray_generic`

Examination & Comparison
val exists : ('a -> bool) -> ('a, 'b) t -> bool

Refer to :doc:`owl_dense_ndarray_generic`

val not_exists : ('a -> bool) -> ('a, 'b) t -> bool

Refer to :doc:`owl_dense_ndarray_generic`

val for_all : ('a -> bool) -> ('a, 'b) t -> bool

Refer to :doc:`owl_dense_ndarray_generic`

val is_zero : ('a, 'b) t -> bool

Refer to :doc:`owl_dense_ndarray_generic`

val is_positive : ('a, 'b) t -> bool

Refer to :doc:`owl_dense_ndarray_generic`

val is_negative : ('a, 'b) t -> bool

Refer to :doc:`owl_dense_ndarray_generic`

val is_nonpositive : ('a, 'b) t -> bool

Refer to :doc:`owl_dense_ndarray_generic`

val is_nonnegative : ('a, 'b) t -> bool

Refer to :doc:`owl_dense_ndarray_generic`

val is_normal : ('a, 'b) t -> bool

Refer to :doc:`owl_dense_ndarray_generic`

val not_nan : ('a, 'b) t -> bool

Refer to :doc:`owl_dense_ndarray_generic`

val not_inf : ('a, 'b) t -> bool

Refer to :doc:`owl_dense_ndarray_generic`

val equal : ('a, 'b) t -> ('a, 'b) t -> bool

Refer to :doc:`owl_dense_ndarray_generic`

val not_equal : ('a, 'b) t -> ('a, 'b) t -> bool

Refer to :doc:`owl_dense_ndarray_generic`

val greater : ('a, 'b) t -> ('a, 'b) t -> bool

Refer to :doc:`owl_dense_ndarray_generic`

val less : ('a, 'b) t -> ('a, 'b) t -> bool

Refer to :doc:`owl_dense_ndarray_generic`

val greater_equal : ('a, 'b) t -> ('a, 'b) t -> bool

Refer to :doc:`owl_dense_ndarray_generic`

val less_equal : ('a, 'b) t -> ('a, 'b) t -> bool

Refer to :doc:`owl_dense_ndarray_generic`

val elt_equal : ('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t

Refer to :doc:`owl_dense_ndarray_generic`

val elt_not_equal : ('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t

Refer to :doc:`owl_dense_ndarray_generic`

val elt_less : ('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t

Refer to :doc:`owl_dense_ndarray_generic`

val elt_greater : ('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t

Refer to :doc:`owl_dense_ndarray_generic`

val elt_less_equal : ('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t

Refer to :doc:`owl_dense_ndarray_generic`

val elt_greater_equal : ('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t

Refer to :doc:`owl_dense_ndarray_generic`

val equal_scalar : ('a, 'b) t -> 'a -> bool

Refer to :doc:`owl_dense_ndarray_generic`

val not_equal_scalar : ('a, 'b) t -> 'a -> bool

Refer to :doc:`owl_dense_ndarray_generic`

val less_scalar : ('a, 'b) t -> 'a -> bool

Refer to :doc:`owl_dense_ndarray_generic`

val greater_scalar : ('a, 'b) t -> 'a -> bool

Refer to :doc:`owl_dense_ndarray_generic`

val less_equal_scalar : ('a, 'b) t -> 'a -> bool

Refer to :doc:`owl_dense_ndarray_generic`

val greater_equal_scalar : ('a, 'b) t -> 'a -> bool

Refer to :doc:`owl_dense_ndarray_generic`

val elt_equal_scalar : ('a, 'b) t -> 'a -> ('a, 'b) t

Refer to :doc:`owl_dense_ndarray_generic`

val elt_not_equal_scalar : ('a, 'b) t -> 'a -> ('a, 'b) t

Refer to :doc:`owl_dense_ndarray_generic`

val elt_less_scalar : ('a, 'b) t -> 'a -> ('a, 'b) t

Refer to :doc:`owl_dense_ndarray_generic`

val elt_greater_scalar : ('a, 'b) t -> 'a -> ('a, 'b) t

Refer to :doc:`owl_dense_ndarray_generic`

val elt_less_equal_scalar : ('a, 'b) t -> 'a -> ('a, 'b) t

Refer to :doc:`owl_dense_ndarray_generic`

val elt_greater_equal_scalar : ('a, 'b) t -> 'a -> ('a, 'b) t

Refer to :doc:`owl_dense_ndarray_generic`

val approx_equal : ?eps:float -> (float, 'b) t -> (float, 'b) t -> bool

Refer to :doc:`owl_dense_ndarray_generic`

val approx_equal_scalar : ?eps:float -> (float, 'b) t -> float -> bool

Refer to :doc:`owl_dense_ndarray_generic`

val approx_elt_equal : - ?eps:float -> - (float, 'b) t -> - (float, 'b) t -> - (float, 'b) t

Refer to :doc:`owl_dense_ndarray_generic`

val approx_elt_equal_scalar : - ?eps:float -> - (float, 'b) t -> - float -> - (float, 'b) t

Refer to :doc:`owl_dense_ndarray_generic`

Input/Output functions
val of_array : ('a, 'b) kind -> 'a array -> int array -> ('a, 'b) t

Refer to :doc:`owl_dense_ndarray_generic`

val print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:('a -> string) -> - ('a, 'b) t -> - unit

Refer to :doc:`owl_dense_ndarray_generic`

val load : ('a, 'b) kind -> string -> ('a, 'b) t

Refer to :doc:`owl_dense_ndarray_generic`

Unary math operators
val min : ?axis:int -> ?keep_dims:bool -> ('a, 'b) t -> ('a, 'b) t

Refer to :doc:`owl_dense_ndarray_generic`

val max : ?axis:int -> ?keep_dims:bool -> ('a, 'b) t -> ('a, 'b) t

Refer to :doc:`owl_dense_ndarray_generic`

val sum : ?axis:int -> ?keep_dims:bool -> ('a, 'b) t -> ('a, 'b) t

Refer to :doc:`owl_dense_ndarray_generic`

val sum' : ('a, 'b) t -> 'a

Refer to :doc:`owl_dense_ndarray_generic`

val log_sum_exp' : ('a, 'b) t -> 'a

Refer to :doc:`owl_dense_ndarray_generic`

val log_sum_exp : ?axis:int -> ?keep_dims:bool -> ('a, 'b) t -> ('a, 'b) t

Refer to :doc:`owl_dense_ndarray_generic`

val sum_reduce : ?axis:int array -> ('a, 'b) t -> ('a, 'b) t

Refer to :doc:`owl_dense_ndarray_generic`

val min' : ('a, 'b) t -> 'a

Refer to :doc:`owl_dense_ndarray_generic`

val max' : ('a, 'b) t -> 'a

Refer to :doc:`owl_dense_ndarray_generic`

val abs : ('a, 'b) t -> ('a, 'b) t

Refer to :doc:`owl_dense_ndarray_generic`

val conj : ('a, 'b) t -> ('a, 'b) t

Refer to :doc:`owl_dense_ndarray_generic`

val neg : ('a, 'b) t -> ('a, 'b) t

Refer to :doc:`owl_dense_ndarray_generic`

val reci : ('a, 'b) t -> ('a, 'b) t

Refer to :doc:`owl_dense_ndarray_generic`

val signum : (float, 'a) t -> (float, 'a) t

Refer to :doc:`owl_dense_ndarray_generic`

val sqr : ('a, 'b) t -> ('a, 'b) t

Refer to :doc:`owl_dense_ndarray_generic`

val sqrt : ('a, 'b) t -> ('a, 'b) t

Refer to :doc:`owl_dense_ndarray_generic`

val cbrt : ('a, 'b) t -> ('a, 'b) t

Refer to :doc:`owl_dense_ndarray_generic`

val exp : ('a, 'b) t -> ('a, 'b) t

Refer to :doc:`owl_dense_ndarray_generic`

val exp2 : ('a, 'b) t -> ('a, 'b) t

Refer to :doc:`owl_dense_ndarray_generic`

val exp10 : ('a, 'b) t -> ('a, 'b) t

Refer to :doc:`owl_dense_ndarray_generic`

val expm1 : ('a, 'b) t -> ('a, 'b) t

Refer to :doc:`owl_dense_ndarray_generic`

val log : ('a, 'b) t -> ('a, 'b) t

Refer to :doc:`owl_dense_ndarray_generic`

val log2 : ('a, 'b) t -> ('a, 'b) t

Refer to :doc:`owl_dense_ndarray_generic`

val log10 : ('a, 'b) t -> ('a, 'b) t

Refer to :doc:`owl_dense_ndarray_generic`

val log1p : ('a, 'b) t -> ('a, 'b) t

Refer to :doc:`owl_dense_ndarray_generic`

val sin : ('a, 'b) t -> ('a, 'b) t

Refer to :doc:`owl_dense_ndarray_generic`

val cos : ('a, 'b) t -> ('a, 'b) t

Refer to :doc:`owl_dense_ndarray_generic`

val tan : ('a, 'b) t -> ('a, 'b) t

Refer to :doc:`owl_dense_ndarray_generic`

val asin : ('a, 'b) t -> ('a, 'b) t

Refer to :doc:`owl_dense_ndarray_generic`

val acos : ('a, 'b) t -> ('a, 'b) t

Refer to :doc:`owl_dense_ndarray_generic`

val atan : ('a, 'b) t -> ('a, 'b) t

Refer to :doc:`owl_dense_ndarray_generic`

val sinh : ('a, 'b) t -> ('a, 'b) t

Refer to :doc:`owl_dense_ndarray_generic`

val cosh : ('a, 'b) t -> ('a, 'b) t

Refer to :doc:`owl_dense_ndarray_generic`

val tanh : ('a, 'b) t -> ('a, 'b) t

Refer to :doc:`owl_dense_ndarray_generic`

val asinh : ('a, 'b) t -> ('a, 'b) t

Refer to :doc:`owl_dense_ndarray_generic`

val acosh : ('a, 'b) t -> ('a, 'b) t

Refer to :doc:`owl_dense_ndarray_generic`

val atanh : ('a, 'b) t -> ('a, 'b) t

Refer to :doc:`owl_dense_ndarray_generic`

val floor : ('a, 'b) t -> ('a, 'b) t

Refer to :doc:`owl_dense_ndarray_generic`

val ceil : ('a, 'b) t -> ('a, 'b) t

Refer to :doc:`owl_dense_ndarray_generic`

val round : ('a, 'b) t -> ('a, 'b) t

Refer to :doc:`owl_dense_ndarray_generic`

val trunc : ('a, 'b) t -> ('a, 'b) t

Refer to :doc:`owl_dense_ndarray_generic`

val fix : ('a, 'b) t -> ('a, 'b) t

Refer to :doc:`owl_dense_ndarray_generic`

val erf : (float, 'b) t -> (float, 'b) t

Refer to :doc:`owl_dense_ndarray_generic`

val erfc : (float, 'b) t -> (float, 'b) t

Refer to :doc:`owl_dense_ndarray_generic`

val relu : (float, 'a) t -> (float, 'a) t

Refer to :doc:`owl_dense_ndarray_generic`

val dawsn : (float, 'a) t -> (float, 'a) t

Refer to :doc:`owl_dense_ndarray_generic`

val softsign : (float, 'a) t -> (float, 'a) t

Refer to :doc:`owl_dense_ndarray_generic`

val softplus : (float, 'a) t -> (float, 'a) t

Refer to :doc:`owl_dense_ndarray_generic`

val sigmoid : (float, 'a) t -> (float, 'a) t

Refer to :doc:`owl_dense_ndarray_generic`

val softmax : ?axis:int -> (float, 'a) t -> (float, 'a) t

Refer to :doc:`owl_dense_ndarray_generic`

val l1norm' : (float, 'b) t -> float

Refer to :doc:`owl_dense_ndarray_generic`

val l2norm' : (float, 'b) t -> float

Refer to :doc:`owl_dense_ndarray_generic`

val l2norm_sqr' : (float, 'b) t -> float

Refer to :doc:`owl_dense_ndarray_generic`

Binary math operators
val add : ('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t

Refer to :doc:`owl_dense_ndarray_generic`

val sub : ('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t

Refer to :doc:`owl_dense_ndarray_generic`

val mul : ('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t

Refer to :doc:`owl_dense_ndarray_generic`

val div : ('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t

Refer to :doc:`owl_dense_ndarray_generic`

val add_scalar : ('a, 'b) t -> 'a -> ('a, 'b) t

Refer to :doc:`owl_dense_ndarray_generic`

val sub_scalar : ('a, 'b) t -> 'a -> ('a, 'b) t

Refer to :doc:`owl_dense_ndarray_generic`

val mul_scalar : ('a, 'b) t -> 'a -> ('a, 'b) t

Refer to :doc:`owl_dense_ndarray_generic`

val div_scalar : ('a, 'b) t -> 'a -> ('a, 'b) t

Refer to :doc:`owl_dense_ndarray_generic`

val scalar_add : 'a -> ('a, 'b) t -> ('a, 'b) t

Refer to :doc:`owl_dense_ndarray_generic`

val scalar_sub : 'a -> ('a, 'b) t -> ('a, 'b) t

Refer to :doc:`owl_dense_ndarray_generic`

val scalar_mul : 'a -> ('a, 'b) t -> ('a, 'b) t

Refer to :doc:`owl_dense_ndarray_generic`

val scalar_div : 'a -> ('a, 'b) t -> ('a, 'b) t

Refer to :doc:`owl_dense_ndarray_generic`

val pow : ('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t

Refer to :doc:`owl_dense_ndarray_generic`

val scalar_pow : 'a -> ('a, 'b) t -> ('a, 'b) t

Refer to :doc:`owl_dense_ndarray_generic`

val pow_scalar : ('a, 'b) t -> 'a -> ('a, 'b) t

Refer to :doc:`owl_dense_ndarray_generic`

val atan2 : (float, 'a) t -> (float, 'a) t -> (float, 'a) t

Refer to :doc:`owl_dense_ndarray_generic`

val scalar_atan2 : float -> (float, 'a) t -> (float, 'a) t

Refer to :doc:`owl_dense_ndarray_generic`

val atan2_scalar : (float, 'a) t -> float -> (float, 'a) t

Refer to :doc:`owl_dense_ndarray_generic`

val min2 : ('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t

Refer to :doc:`owl_dense_ndarray_generic`

val max2 : ('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t

Refer to :doc:`owl_dense_ndarray_generic`

val fmod : (float, 'a) t -> (float, 'a) t -> (float, 'a) t

Refer to :doc:`owl_dense_ndarray_generic`

val fmod_scalar : (float, 'a) t -> float -> (float, 'a) t

Refer to :doc:`owl_dense_ndarray_generic`

val scalar_fmod : float -> (float, 'a) t -> (float, 'a) t

Refer to :doc:`owl_dense_ndarray_generic`

val clip_by_value : - ?amin:float -> - ?amax:float -> - (float, 'b) t -> - (float, 'b) t

Refer to :doc:`owl_dense_ndarray_generic`

val clip_by_l2norm : float -> (float, 'a) t -> (float, 'a) t

Refer to :doc:`owl_dense_ndarray_generic`

val fma : ('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t

Refer to :doc:`owl_dense_ndarray_generic`

val conv1d : - ?padding:Owl_types.padding -> - (float, 'a) t -> - (float, 'a) t -> - int array -> - (float, 'a) t

Refer to :doc:`owl_dense_ndarray_generic`

val conv2d : - ?padding:Owl_types.padding -> - (float, 'a) t -> - (float, 'a) t -> - int array -> - (float, 'a) t

Refer to :doc:`owl_dense_ndarray_generic`

val conv3d : - ?padding:Owl_types.padding -> - (float, 'a) t -> - (float, 'a) t -> - int array -> - (float, 'a) t

Refer to :doc:`owl_dense_ndarray_generic`

val dilated_conv1d : - ?padding:Owl_types.padding -> - (float, 'a) t -> - (float, 'a) t -> - int array -> - int array -> - (float, 'a) t

Refer to :doc:`owl_dense_ndarray_generic`

val dilated_conv2d : - ?padding:Owl_types.padding -> - (float, 'a) t -> - (float, 'a) t -> - int array -> - int array -> - (float, 'a) t

Refer to :doc:`owl_dense_ndarray_generic`

val dilated_conv3d : - ?padding:Owl_types.padding -> - (float, 'a) t -> - (float, 'a) t -> - int array -> - int array -> - (float, 'a) t

Refer to :doc:`owl_dense_ndarray_generic`

val transpose_conv1d : - ?padding:Owl_types.padding -> - (float, 'a) t -> - (float, 'a) t -> - int array -> - (float, 'a) t

Refer to :doc:`owl_dense_ndarray_generic`

val transpose_conv2d : - ?padding:Owl_types.padding -> - (float, 'a) t -> - (float, 'a) t -> - int array -> - (float, 'a) t

Refer to :doc:`owl_dense_ndarray_generic`

val transpose_conv3d : - ?padding:Owl_types.padding -> - (float, 'a) t -> - (float, 'a) t -> - int array -> - (float, 'a) t

Refer to :doc:`owl_dense_ndarray_generic`

val max_pool1d : - ?padding:Owl_types.padding -> - (float, 'a) t -> - int array -> - int array -> - (float, 'a) t

Refer to :doc:`owl_dense_ndarray_generic`

val max_pool2d : - ?padding:Owl_types.padding -> - (float, 'a) t -> - int array -> - int array -> - (float, 'a) t

Refer to :doc:`owl_dense_ndarray_generic`

val max_pool3d : - ?padding:Owl_types.padding -> - (float, 'a) t -> - int array -> - int array -> - (float, 'a) t

Refer to :doc:`owl_dense_ndarray_generic`

val avg_pool1d : - ?padding:Owl_types.padding -> - (float, 'a) t -> - int array -> - int array -> - (float, 'a) t

Refer to :doc:`owl_dense_ndarray_generic`

val avg_pool2d : - ?padding:Owl_types.padding -> - (float, 'a) t -> - int array -> - int array -> - (float, 'a) t

Refer to :doc:`owl_dense_ndarray_generic`

val avg_pool3d : - ?padding:Owl_types.padding -> - (float, 'a) t -> - int array -> - int array -> - (float, 'a) t

Refer to :doc:`owl_dense_ndarray_generic`

val upsampling2d : (float, 'a) t -> int array -> (float, 'a) t

Refer to :doc:`owl_dense_ndarray_generic`

val conv1d_backward_input : - (float, 'a) t -> - (float, 'a) t -> - int array -> - (float, 'a) t -> - (float, 'a) t

Refer to :doc:`owl_dense_ndarray_generic`

val conv1d_backward_kernel : - (float, 'a) t -> - (float, 'a) t -> - int array -> - (float, 'a) t -> - (float, 'a) t

Refer to :doc:`owl_dense_ndarray_generic`

val conv2d_backward_input : - (float, 'a) t -> - (float, 'a) t -> - int array -> - (float, 'a) t -> - (float, 'a) t

Refer to :doc:`owl_dense_ndarray_generic`

val conv2d_backward_kernel : - (float, 'a) t -> - (float, 'a) t -> - int array -> - (float, 'a) t -> - (float, 'a) t

Refer to :doc:`owl_dense_ndarray_generic`

val conv3d_backward_input : - (float, 'a) t -> - (float, 'a) t -> - int array -> - (float, 'a) t -> - (float, 'a) t

Refer to :doc:`owl_dense_ndarray_generic`

val conv3d_backward_kernel : - (float, 'a) t -> - (float, 'a) t -> - int array -> - (float, 'a) t -> - (float, 'a) t

Refer to :doc:`owl_dense_ndarray_generic`

val dilated_conv1d_backward_input : - (float, 'a) t -> - (float, 'a) t -> - int array -> - int array -> - (float, 'a) t -> - (float, 'a) t

Refer to :doc:`owl_dense_ndarray_generic`

val dilated_conv1d_backward_kernel : - (float, 'a) t -> - (float, 'a) t -> - int array -> - int array -> - (float, 'a) t -> - (float, 'a) t

Refer to :doc:`owl_dense_ndarray_generic`

val dilated_conv2d_backward_input : - (float, 'a) t -> - (float, 'a) t -> - int array -> - int array -> - (float, 'a) t -> - (float, 'a) t

Refer to :doc:`owl_dense_ndarray_generic`

val dilated_conv2d_backward_kernel : - (float, 'a) t -> - (float, 'a) t -> - int array -> - int array -> - (float, 'a) t -> - (float, 'a) t

Refer to :doc:`owl_dense_ndarray_generic`

val dilated_conv3d_backward_input : - (float, 'a) t -> - (float, 'a) t -> - int array -> - int array -> - (float, 'a) t -> - (float, 'a) t

Refer to :doc:`owl_dense_ndarray_generic`

val dilated_conv3d_backward_kernel : - (float, 'a) t -> - (float, 'a) t -> - int array -> - int array -> - (float, 'a) t -> - (float, 'a) t

Refer to :doc:`owl_dense_ndarray_generic`

val transpose_conv1d_backward_input : - (float, 'a) t -> - (float, 'a) t -> - int array -> - (float, 'a) t -> - (float, 'a) t

Refer to :doc:`owl_dense_ndarray_generic`

val transpose_conv1d_backward_kernel : - (float, 'a) t -> - (float, 'a) t -> - int array -> - (float, 'a) t -> - (float, 'a) t

Refer to :doc:`owl_dense_ndarray_generic`

val transpose_conv2d_backward_input : - (float, 'a) t -> - (float, 'a) t -> - int array -> - (float, 'a) t -> - (float, 'a) t

Refer to :doc:`owl_dense_ndarray_generic`

val transpose_conv2d_backward_kernel : - (float, 'a) t -> - (float, 'a) t -> - int array -> - (float, 'a) t -> - (float, 'a) t

Refer to :doc:`owl_dense_ndarray_generic`

val transpose_conv3d_backward_input : - (float, 'a) t -> - (float, 'a) t -> - int array -> - (float, 'a) t -> - (float, 'a) t

Refer to :doc:`owl_dense_ndarray_generic`

val transpose_conv3d_backward_kernel : - (float, 'a) t -> - (float, 'a) t -> - int array -> - (float, 'a) t -> - (float, 'a) t

Refer to :doc:`owl_dense_ndarray_generic`

val max_pool1d_backward : - Owl_types.padding -> - (float, 'a) t -> - int array -> - int array -> - (float, 'a) t -> - (float, 'a) t

Refer to :doc:`owl_dense_ndarray_generic`

val max_pool2d_backward : - Owl_types.padding -> - (float, 'a) t -> - int array -> - int array -> - (float, 'a) t -> - (float, 'a) t

Refer to :doc:`owl_dense_ndarray_generic`

val max_pool3d_backward : - Owl_types.padding -> - (float, 'a) t -> - int array -> - int array -> - (float, 'a) t -> - (float, 'a) t

Refer to :doc:`owl_dense_ndarray_generic`

val avg_pool1d_backward : - Owl_types.padding -> - (float, 'a) t -> - int array -> - int array -> - (float, 'a) t -> - (float, 'a) t

Refer to :doc:`owl_dense_ndarray_generic`

val avg_pool2d_backward : - Owl_types.padding -> - (float, 'a) t -> - int array -> - int array -> - (float, 'a) t -> - (float, 'a) t

Refer to :doc:`owl_dense_ndarray_generic`

val avg_pool3d_backward : - Owl_types.padding -> - (float, 'a) t -> - int array -> - int array -> - (float, 'a) t -> - (float, 'a) t

Refer to :doc:`owl_dense_ndarray_generic`

val upsampling2d_backward : - (float, 'a) t -> - int array -> - (float, 'a) t -> - (float, 'a) t

Refer to :doc:`owl_dense_ndarray_generic`

Helper functions
val sum_slices : ?axis:int -> (float, 'b) t -> (float, 'b) t

Refer to :doc:`owl_dense_ndarray_generic`

Matrix functions
val row_num : ('a, 'b) t -> int

Refer to :doc:`owl_dense_matrix_generic`

val col_num : ('a, 'b) t -> int

Refer to :doc:`owl_dense_matrix_generic`

val row : ('a, 'b) t -> int -> ('a, 'b) t

Refer to :doc:`owl_dense_matrix_generic`

val rows : ('a, 'b) t -> int array -> ('a, 'b) t

Refer to :doc:`owl_dense_matrix_generic`

val copy_row_to : ('a, 'b) t -> ('a, 'b) t -> int -> unit

Refer to :doc:`owl_dense_matrix_generic`

val copy_col_to : ('a, 'b) t -> ('a, 'b) t -> int -> unit

Refer to :doc:`owl_dense_matrix_generic`

val dot : (float, 'b) t -> (float, 'b) t -> (float, 'b) t

Refer to :doc:`owl_dense_matrix_generic`

val diag : ?k:int -> ('a, 'b) t -> ('a, 'b) t

Refer to :doc:`owl_dense_matrix_generic`

val trace : (float, 'b) t -> float

Refer to :doc:`owl_dense_matrix_generic`

val to_rows : ('a, 'b) t -> ('a, 'b) t array

Refer to :doc:`owl_dense_matrix_generic`

val of_rows : ('a, 'b) t array -> ('a, 'b) t

Refer to :doc:`owl_dense_matrix_generic`

val to_cols : ('a, 'b) t -> ('a, 'b) t array

TODO

val of_cols : ('a, 'b) t array -> ('a, 'b) t

TODO

val of_arrays : ('a, 'b) kind -> 'a array array -> ('a, 'b) t

Refer to :doc:`owl_dense_matrix_generic`

val draw_rows : - ?replacement:bool -> - ('a, 'b) t -> - int -> - ('a, 'b) t * int array

Refer to :doc:`owl_dense_matrix_generic`

val draw_rows2 : - ?replacement:bool -> - ('a, 'b) t -> - ('a, 'b) t -> - int -> - ('a, 'b) t * ('a, 'b) t * int array

Refer to :doc:`owl_dense_matrix_generic`

Helper functions
val float_to_elt : 'a -> 'a

Identity function to deal with the type conversion required by other functors.

val elt_to_float : 'a -> 'a

Identity function to deal with the type conversion required by other functors.

include module type of struct include Operator end
include sig ... end
val (+$) : - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_base_dense_ndarray_generic.t
val (-$) : - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_base_dense_ndarray_generic.t
val (*$) : - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_base_dense_ndarray_generic.t
val (/$) : - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_base_dense_ndarray_generic.t
val ($+) : - 'a -> - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - ('a, 'b) Owl_base_dense_ndarray_generic.t
val ($-) : - 'a -> - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - ('a, 'b) Owl_base_dense_ndarray_generic.t
val ($*) : - 'a -> - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - ('a, 'b) Owl_base_dense_ndarray_generic.t
val ($/) : - 'a -> - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - ('a, 'b) Owl_base_dense_ndarray_generic.t
val (=) : - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - bool
val (!=) : - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - bool
val (<>) : - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - bool
val (>) : - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - bool
val (<) : - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - bool
val (>=) : - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - bool
val (<=) : - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - bool
include sig ... end
val (.%{}) : ('a, 'b) Owl_base_dense_ndarray_generic.t -> int -> 'a
val (.%{;..}) : ('a, 'b) Owl_base_dense_ndarray_generic.t -> int array -> 'a
val (.%{}<-) : ('a, 'b) Owl_base_dense_ndarray_generic.t -> int -> 'a -> unit
val (.%{;..}<-) : - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - int array -> - 'a -> - unit
\ No newline at end of file diff --git a/owl-base/Owl_base_dense_ndarray/Operator/index.html b/owl-base/Owl_base_dense_ndarray/Operator/index.html deleted file mode 100644 index 190764b93..000000000 --- a/owl-base/Owl_base_dense_ndarray/Operator/index.html +++ /dev/null @@ -1,63 +0,0 @@ - -Operator (owl-base.Owl_base_dense_ndarray.Operator)

Module Owl_base_dense_ndarray.Operator

include sig ... end
val (+$) : - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_base_dense_ndarray_generic.t
val (-$) : - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_base_dense_ndarray_generic.t
val (*$) : - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_base_dense_ndarray_generic.t
val (/$) : - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_base_dense_ndarray_generic.t
val ($+) : - 'a -> - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - ('a, 'b) Owl_base_dense_ndarray_generic.t
val ($-) : - 'a -> - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - ('a, 'b) Owl_base_dense_ndarray_generic.t
val ($*) : - 'a -> - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - ('a, 'b) Owl_base_dense_ndarray_generic.t
val ($/) : - 'a -> - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - ('a, 'b) Owl_base_dense_ndarray_generic.t
val (=) : - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - bool
val (!=) : - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - bool
val (<>) : - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - bool
val (>) : - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - bool
val (<) : - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - bool
val (>=) : - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - bool
val (<=) : - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - bool
include sig ... end
val (.%{}) : ('a, 'b) Owl_base_dense_ndarray_generic.t -> int -> 'a
val (.%{;..}) : ('a, 'b) Owl_base_dense_ndarray_generic.t -> int array -> 'a
val (.%{}<-) : ('a, 'b) Owl_base_dense_ndarray_generic.t -> int -> 'a -> unit
val (.%{;..}<-) : - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - int array -> - 'a -> - unit
\ No newline at end of file diff --git a/owl-base/Owl_base_dense_ndarray/S/index.html b/owl-base/Owl_base_dense_ndarray/S/index.html deleted file mode 100644 index ae7835afa..000000000 --- a/owl-base/Owl_base_dense_ndarray/S/index.html +++ /dev/null @@ -1,225 +0,0 @@ - -S (owl-base.Owl_base_dense_ndarray.S)

Module Owl_base_dense_ndarray.S

include module type of struct include Owl_base_dense_ndarray_s end
type elt = float
type arr = - (float, Stdlib.Bigarray.float32_elt, Stdlib.Bigarray.c_layout) - Stdlib.Bigarray.Genarray.t
include Owl_base_dense_ndarray_intf.Common - with type arr := arr - and type elt := elt
val empty : int array -> arr
val zeros : int array -> arr
val ones : int array -> arr
val create : int array -> elt -> arr
val init : int array -> (int -> elt) -> arr
val init_nd : int array -> (int array -> elt) -> arr
val sequential : ?a:elt -> ?step:elt -> int array -> arr
val uniform : ?a:elt -> ?b:elt -> int array -> arr
val gaussian : ?mu:elt -> ?sigma:elt -> int array -> arr
val bernoulli : ?p:float -> int array -> arr
val shape : arr -> int array
val numel : arr -> int
val strides : arr -> int array

Refer to :doc:`owl_dense_ndarray_generic`

val slice_size : arr -> int array

Refer to :doc:`owl_dense_ndarray_generic`

val get : arr -> int array -> elt
val set : arr -> int array -> elt -> unit
val get_slice : int list list -> arr -> arr
val set_slice : int list list -> arr -> arr -> unit
val get_fancy : Owl_types_common.index list -> arr -> arr
val set_fancy : Owl_types_common.index list -> arr -> arr -> unit
val copy : arr -> arr
val copy_ : out:arr -> arr -> unit
val reset : arr -> unit
val reshape : arr -> int array -> arr
val flatten : arr -> arr
val reverse : arr -> arr
val tile : arr -> int array -> arr
val repeat : arr -> int array -> arr
val concatenate : ?axis:int -> arr array -> arr
val stack : ?axis:int -> arr array -> arr
val squeeze : ?axis:int array -> arr -> arr
val expand : ?hi:bool -> arr -> int -> arr
val split : ?axis:int -> int array -> arr -> arr array
val draw : ?axis:int -> arr -> int -> arr * int array
val pad : ?v:elt -> int list list -> arr -> arr
val one_hot : int -> arr -> arr
val print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(elt -> string) -> - arr -> - unit
val abs : arr -> arr
val neg : arr -> arr
val floor : arr -> arr
val ceil : arr -> arr
val round : arr -> arr
val sqr : arr -> arr
val sqrt : arr -> arr
val log : arr -> arr
val log2 : arr -> arr
val log10 : arr -> arr
val exp : arr -> arr
val sin : arr -> arr
val cos : arr -> arr
val tan : arr -> arr
val sinh : arr -> arr
val cosh : arr -> arr
val tanh : arr -> arr
val asin : arr -> arr
val acos : arr -> arr
val atan : arr -> arr
val asinh : arr -> arr
val acosh : arr -> arr
val atanh : arr -> arr
val min : ?axis:int -> ?keep_dims:bool -> arr -> arr
val max : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum_reduce : ?axis:int array -> arr -> arr
val min' : arr -> elt
val max' : arr -> elt
val sum' : arr -> elt
val pow : arr -> arr -> arr
val scalar_pow : elt -> arr -> arr
val pow_scalar : arr -> elt -> arr
val add : arr -> arr -> arr
val sub : arr -> arr -> arr
val mul : arr -> arr -> arr
val div : arr -> arr -> arr
val add_scalar : arr -> elt -> arr
val sub_scalar : arr -> elt -> arr
val mul_scalar : arr -> elt -> arr
val div_scalar : arr -> elt -> arr
val scalar_add : elt -> arr -> arr
val scalar_sub : elt -> arr -> arr
val scalar_mul : elt -> arr -> arr
val scalar_div : elt -> arr -> arr
val fma : arr -> arr -> arr -> arr
Iterate array elements
val iteri : (int -> elt -> unit) -> arr -> unit
val iter : (elt -> unit) -> arr -> unit
val mapi : (int -> elt -> elt) -> arr -> arr
val map : (elt -> elt) -> arr -> arr
val filteri : (int -> elt -> bool) -> arr -> int array
val filter : (elt -> bool) -> arr -> int array
val foldi : ?axis:int -> (int -> elt -> elt -> elt) -> elt -> arr -> arr
val fold : ?axis:int -> (elt -> elt -> elt) -> elt -> arr -> arr
val scani : ?axis:int -> (int -> elt -> elt -> elt) -> arr -> arr
val scan : ?axis:int -> (elt -> elt -> elt) -> arr -> arr
Examination & Comparison
val exists : (elt -> bool) -> arr -> bool
val not_exists : (elt -> bool) -> arr -> bool
val for_all : (elt -> bool) -> arr -> bool
val is_zero : arr -> bool
val is_positive : arr -> bool
val is_negative : arr -> bool
val is_nonpositive : arr -> bool
val is_nonnegative : arr -> bool
val is_normal : arr -> bool
val not_nan : arr -> bool
val not_inf : arr -> bool
val equal : arr -> arr -> bool
val not_equal : arr -> arr -> bool
val greater : arr -> arr -> bool
val less : arr -> arr -> bool
val greater_equal : arr -> arr -> bool
val less_equal : arr -> arr -> bool
val elt_equal : arr -> arr -> arr
val elt_not_equal : arr -> arr -> arr
val elt_less : arr -> arr -> arr
val elt_greater : arr -> arr -> arr
val elt_less_equal : arr -> arr -> arr
val elt_greater_equal : arr -> arr -> arr
val equal_scalar : arr -> elt -> bool
val not_equal_scalar : arr -> elt -> bool
val less_scalar : arr -> elt -> bool
val greater_scalar : arr -> elt -> bool
val less_equal_scalar : arr -> elt -> bool
val greater_equal_scalar : arr -> elt -> bool
val elt_equal_scalar : arr -> elt -> arr
val elt_not_equal_scalar : arr -> elt -> arr
val elt_less_scalar : arr -> elt -> arr
val elt_greater_scalar : arr -> elt -> arr
val elt_less_equal_scalar : arr -> elt -> arr
val elt_greater_equal_scalar : arr -> elt -> arr
val row_num : arr -> int
val col_num : arr -> int
val row : arr -> int -> arr
val rows : arr -> int array -> arr
val copy_row_to : arr -> arr -> int -> unit
val copy_col_to : arr -> arr -> int -> unit
val diag : ?k:int -> arr -> arr
val transpose : ?axis:int array -> arr -> arr
val to_rows : arr -> arr array
val of_rows : arr array -> arr
val to_cols : arr -> arr array
val of_cols : arr array -> arr
val of_array : elt array -> int array -> arr
val of_arrays : elt array array -> arr
include Owl_base_dense_ndarray_intf.Real - with type arr := arr - and type elt := elt
val log_sum_exp' : arr -> elt
val log_sum_exp : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum_slices : ?axis:int -> arr -> arr
val signum : arr -> arr
val sigmoid : arr -> arr
val relu : arr -> arr
val dawsn : arr -> arr
val l1norm' : arr -> elt
val l2norm' : arr -> elt
val l2norm_sqr' : arr -> elt
val clip_by_value : ?amin:elt -> ?amax:elt -> arr -> arr
val clip_by_l2norm : elt -> arr -> arr
val atan2 : arr -> arr -> arr
val scalar_atan2 : elt -> arr -> arr
val atan2_scalar : arr -> elt -> arr
val approx_equal : ?eps:float -> arr -> arr -> bool
val approx_equal_scalar : ?eps:float -> arr -> float -> bool
val approx_elt_equal : ?eps:float -> arr -> arr -> arr
val approx_elt_equal_scalar : ?eps:float -> arr -> float -> arr
val dot : arr -> arr -> arr
val trace : arr -> elt
Helper functions
val float_to_elt : float -> elt
val elt_to_float : elt -> float
include Owl_base_dense_ndarray_intf.NN with type arr := arr
val conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val dilated_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val transpose_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val max_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val upsampling2d : arr -> int array -> arr
val conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val dilated_conv1d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv1d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val transpose_conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val max_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val upsampling2d_backward : arr -> int array -> arr -> arr
include module type of struct include Operator end
include sig ... end
val (+$) : - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_base_dense_ndarray_generic.t
val (-$) : - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_base_dense_ndarray_generic.t
val (*$) : - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_base_dense_ndarray_generic.t
val (/$) : - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_base_dense_ndarray_generic.t
val ($+) : - 'a -> - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - ('a, 'b) Owl_base_dense_ndarray_generic.t
val ($-) : - 'a -> - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - ('a, 'b) Owl_base_dense_ndarray_generic.t
val ($*) : - 'a -> - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - ('a, 'b) Owl_base_dense_ndarray_generic.t
val ($/) : - 'a -> - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - ('a, 'b) Owl_base_dense_ndarray_generic.t
val (=) : - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - bool
val (!=) : - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - bool
val (<>) : - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - bool
val (>) : - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - bool
val (<) : - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - bool
val (>=) : - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - bool
val (<=) : - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - bool
include sig ... end
val (.%{}) : ('a, 'b) Owl_base_dense_ndarray_generic.t -> int -> 'a
val (.%{;..}) : ('a, 'b) Owl_base_dense_ndarray_generic.t -> int array -> 'a
val (.%{}<-) : ('a, 'b) Owl_base_dense_ndarray_generic.t -> int -> 'a -> unit
val (.%{;..}<-) : - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - int array -> - 'a -> - unit
\ No newline at end of file diff --git a/owl-base/Owl_base_dense_ndarray/Z/index.html b/owl-base/Owl_base_dense_ndarray/Z/index.html deleted file mode 100644 index 4b47bfb0c..000000000 --- a/owl-base/Owl_base_dense_ndarray/Z/index.html +++ /dev/null @@ -1,73 +0,0 @@ - -Z (owl-base.Owl_base_dense_ndarray.Z)

Module Owl_base_dense_ndarray.Z

include module type of struct include Owl_base_dense_ndarray_z end
type elt = Stdlib.Complex.t
type arr = - (Stdlib.Complex.t, Stdlib.Bigarray.complex64_elt, Stdlib.Bigarray.c_layout) - Stdlib.Bigarray.Genarray.t
include Owl_base_dense_ndarray_intf.Common - with type arr := arr - and type elt := elt
val empty : int array -> arr
val zeros : int array -> arr
val ones : int array -> arr
val create : int array -> elt -> arr
val init : int array -> (int -> elt) -> arr
val init_nd : int array -> (int array -> elt) -> arr
val sequential : ?a:elt -> ?step:elt -> int array -> arr
val uniform : ?a:elt -> ?b:elt -> int array -> arr
val gaussian : ?mu:elt -> ?sigma:elt -> int array -> arr
val bernoulli : ?p:float -> int array -> arr
val shape : arr -> int array
val numel : arr -> int
val strides : arr -> int array

Refer to :doc:`owl_dense_ndarray_generic`

val slice_size : arr -> int array

Refer to :doc:`owl_dense_ndarray_generic`

val get : arr -> int array -> elt
val set : arr -> int array -> elt -> unit
val get_slice : int list list -> arr -> arr
val set_slice : int list list -> arr -> arr -> unit
val get_fancy : Owl_types_common.index list -> arr -> arr
val set_fancy : Owl_types_common.index list -> arr -> arr -> unit
val copy : arr -> arr
val copy_ : out:arr -> arr -> unit
val reset : arr -> unit
val reshape : arr -> int array -> arr
val flatten : arr -> arr
val reverse : arr -> arr
val tile : arr -> int array -> arr
val repeat : arr -> int array -> arr
val concatenate : ?axis:int -> arr array -> arr
val stack : ?axis:int -> arr array -> arr
val squeeze : ?axis:int array -> arr -> arr
val expand : ?hi:bool -> arr -> int -> arr
val split : ?axis:int -> int array -> arr -> arr array
val draw : ?axis:int -> arr -> int -> arr * int array
val pad : ?v:elt -> int list list -> arr -> arr
val one_hot : int -> arr -> arr
val print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(elt -> string) -> - arr -> - unit
val abs : arr -> arr
val neg : arr -> arr
val floor : arr -> arr
val ceil : arr -> arr
val round : arr -> arr
val sqr : arr -> arr
val sqrt : arr -> arr
val log : arr -> arr
val log2 : arr -> arr
val log10 : arr -> arr
val exp : arr -> arr
val sin : arr -> arr
val cos : arr -> arr
val tan : arr -> arr
val sinh : arr -> arr
val cosh : arr -> arr
val tanh : arr -> arr
val asin : arr -> arr
val acos : arr -> arr
val atan : arr -> arr
val asinh : arr -> arr
val acosh : arr -> arr
val atanh : arr -> arr
val min : ?axis:int -> ?keep_dims:bool -> arr -> arr
val max : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum_reduce : ?axis:int array -> arr -> arr
val min' : arr -> elt
val max' : arr -> elt
val sum' : arr -> elt
val pow : arr -> arr -> arr
val scalar_pow : elt -> arr -> arr
val pow_scalar : arr -> elt -> arr
val add : arr -> arr -> arr
val sub : arr -> arr -> arr
val mul : arr -> arr -> arr
val div : arr -> arr -> arr
val add_scalar : arr -> elt -> arr
val sub_scalar : arr -> elt -> arr
val mul_scalar : arr -> elt -> arr
val div_scalar : arr -> elt -> arr
val scalar_add : elt -> arr -> arr
val scalar_sub : elt -> arr -> arr
val scalar_mul : elt -> arr -> arr
val scalar_div : elt -> arr -> arr
val fma : arr -> arr -> arr -> arr
Iterate array elements
val iteri : (int -> elt -> unit) -> arr -> unit
val iter : (elt -> unit) -> arr -> unit
val mapi : (int -> elt -> elt) -> arr -> arr
val map : (elt -> elt) -> arr -> arr
val filteri : (int -> elt -> bool) -> arr -> int array
val filter : (elt -> bool) -> arr -> int array
val foldi : ?axis:int -> (int -> elt -> elt -> elt) -> elt -> arr -> arr
val fold : ?axis:int -> (elt -> elt -> elt) -> elt -> arr -> arr
val scani : ?axis:int -> (int -> elt -> elt -> elt) -> arr -> arr
val scan : ?axis:int -> (elt -> elt -> elt) -> arr -> arr
Examination & Comparison
val exists : (elt -> bool) -> arr -> bool
val not_exists : (elt -> bool) -> arr -> bool
val for_all : (elt -> bool) -> arr -> bool
val is_zero : arr -> bool
val is_positive : arr -> bool
val is_negative : arr -> bool
val is_nonpositive : arr -> bool
val is_nonnegative : arr -> bool
val is_normal : arr -> bool
val not_nan : arr -> bool
val not_inf : arr -> bool
val equal : arr -> arr -> bool
val not_equal : arr -> arr -> bool
val greater : arr -> arr -> bool
val less : arr -> arr -> bool
val greater_equal : arr -> arr -> bool
val less_equal : arr -> arr -> bool
val elt_equal : arr -> arr -> arr
val elt_not_equal : arr -> arr -> arr
val elt_less : arr -> arr -> arr
val elt_greater : arr -> arr -> arr
val elt_less_equal : arr -> arr -> arr
val elt_greater_equal : arr -> arr -> arr
val equal_scalar : arr -> elt -> bool
val not_equal_scalar : arr -> elt -> bool
val less_scalar : arr -> elt -> bool
val greater_scalar : arr -> elt -> bool
val less_equal_scalar : arr -> elt -> bool
val greater_equal_scalar : arr -> elt -> bool
val elt_equal_scalar : arr -> elt -> arr
val elt_not_equal_scalar : arr -> elt -> arr
val elt_less_scalar : arr -> elt -> arr
val elt_greater_scalar : arr -> elt -> arr
val elt_less_equal_scalar : arr -> elt -> arr
val elt_greater_equal_scalar : arr -> elt -> arr
val row_num : arr -> int
val col_num : arr -> int
val row : arr -> int -> arr
val rows : arr -> int array -> arr
val copy_row_to : arr -> arr -> int -> unit
val copy_col_to : arr -> arr -> int -> unit
val diag : ?k:int -> arr -> arr
val transpose : ?axis:int array -> arr -> arr
val to_rows : arr -> arr array
val of_rows : arr array -> arr
val to_cols : arr -> arr array
val of_cols : arr array -> arr
val of_array : elt array -> int array -> arr
val of_arrays : elt array array -> arr
include module type of struct include Operator end
include sig ... end
val (+$) : - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_base_dense_ndarray_generic.t
val (-$) : - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_base_dense_ndarray_generic.t
val (*$) : - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_base_dense_ndarray_generic.t
val (/$) : - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_base_dense_ndarray_generic.t
val ($+) : - 'a -> - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - ('a, 'b) Owl_base_dense_ndarray_generic.t
val ($-) : - 'a -> - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - ('a, 'b) Owl_base_dense_ndarray_generic.t
val ($*) : - 'a -> - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - ('a, 'b) Owl_base_dense_ndarray_generic.t
val ($/) : - 'a -> - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - ('a, 'b) Owl_base_dense_ndarray_generic.t
val (=) : - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - bool
val (!=) : - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - bool
val (<>) : - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - bool
val (>) : - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - bool
val (<) : - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - bool
val (>=) : - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - bool
val (<=) : - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - bool
include sig ... end
val (.%{}) : ('a, 'b) Owl_base_dense_ndarray_generic.t -> int -> 'a
val (.%{;..}) : ('a, 'b) Owl_base_dense_ndarray_generic.t -> int array -> 'a
val (.%{}<-) : ('a, 'b) Owl_base_dense_ndarray_generic.t -> int -> 'a -> unit
val (.%{;..}<-) : - ('a, 'b) Owl_base_dense_ndarray_generic.t -> - int array -> - 'a -> - unit
\ No newline at end of file diff --git a/owl-base/Owl_base_dense_ndarray_c/.dummy b/owl-base/Owl_base_dense_ndarray_c/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl-base/Owl_base_dense_ndarray_d/.dummy b/owl-base/Owl_base_dense_ndarray_d/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl-base/Owl_base_dense_ndarray_generic/.dummy b/owl-base/Owl_base_dense_ndarray_generic/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl-base/Owl_base_dense_ndarray_intf/.dummy b/owl-base/Owl_base_dense_ndarray_intf/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl-base/Owl_base_dense_ndarray_intf/module-type-Common/index.html b/owl-base/Owl_base_dense_ndarray_intf/module-type-Common/index.html deleted file mode 100644 index 061a98b42..000000000 --- a/owl-base/Owl_base_dense_ndarray_intf/module-type-Common/index.html +++ /dev/null @@ -1,8 +0,0 @@ - -Common (owl-base.Owl_base_dense_ndarray_intf.Common)

Module type Owl_base_dense_ndarray_intf.Common

type arr
type elt
val empty : int array -> arr
val zeros : int array -> arr
val ones : int array -> arr
val create : int array -> elt -> arr
val init : int array -> (int -> elt) -> arr
val init_nd : int array -> (int array -> elt) -> arr
val sequential : ?a:elt -> ?step:elt -> int array -> arr
val uniform : ?a:elt -> ?b:elt -> int array -> arr
val gaussian : ?mu:elt -> ?sigma:elt -> int array -> arr
val bernoulli : ?p:float -> int array -> arr
val shape : arr -> int array
val numel : arr -> int
val strides : arr -> int array

Refer to :doc:`owl_dense_ndarray_generic`

val slice_size : arr -> int array

Refer to :doc:`owl_dense_ndarray_generic`

val get : arr -> int array -> elt
val set : arr -> int array -> elt -> unit
val get_slice : int list list -> arr -> arr
val set_slice : int list list -> arr -> arr -> unit
val get_fancy : Owl_types_common.index list -> arr -> arr
val set_fancy : Owl_types_common.index list -> arr -> arr -> unit
val copy : arr -> arr
val copy_ : out:arr -> arr -> unit
val reset : arr -> unit
val reshape : arr -> int array -> arr
val flatten : arr -> arr
val reverse : arr -> arr
val tile : arr -> int array -> arr
val repeat : arr -> int array -> arr
val concatenate : ?axis:int -> arr array -> arr
val stack : ?axis:int -> arr array -> arr
val squeeze : ?axis:int array -> arr -> arr
val expand : ?hi:bool -> arr -> int -> arr
val split : ?axis:int -> int array -> arr -> arr array
val draw : ?axis:int -> arr -> int -> arr * int array
val pad : ?v:elt -> int list list -> arr -> arr
val one_hot : int -> arr -> arr
val print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(elt -> string) -> - arr -> - unit
val abs : arr -> arr
val neg : arr -> arr
val floor : arr -> arr
val ceil : arr -> arr
val round : arr -> arr
val sqr : arr -> arr
val sqrt : arr -> arr
val log : arr -> arr
val log2 : arr -> arr
val log10 : arr -> arr
val exp : arr -> arr
val sin : arr -> arr
val cos : arr -> arr
val tan : arr -> arr
val sinh : arr -> arr
val cosh : arr -> arr
val tanh : arr -> arr
val asin : arr -> arr
val acos : arr -> arr
val atan : arr -> arr
val asinh : arr -> arr
val acosh : arr -> arr
val atanh : arr -> arr
val min : ?axis:int -> ?keep_dims:bool -> arr -> arr
val max : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum_reduce : ?axis:int array -> arr -> arr
val min' : arr -> elt
val max' : arr -> elt
val sum' : arr -> elt
val pow : arr -> arr -> arr
val scalar_pow : elt -> arr -> arr
val pow_scalar : arr -> elt -> arr
val add : arr -> arr -> arr
val sub : arr -> arr -> arr
val mul : arr -> arr -> arr
val div : arr -> arr -> arr
val add_scalar : arr -> elt -> arr
val sub_scalar : arr -> elt -> arr
val mul_scalar : arr -> elt -> arr
val div_scalar : arr -> elt -> arr
val scalar_add : elt -> arr -> arr
val scalar_sub : elt -> arr -> arr
val scalar_mul : elt -> arr -> arr
val scalar_div : elt -> arr -> arr
val fma : arr -> arr -> arr -> arr
Iterate array elements
val iteri : (int -> elt -> unit) -> arr -> unit
val iter : (elt -> unit) -> arr -> unit
val mapi : (int -> elt -> elt) -> arr -> arr
val map : (elt -> elt) -> arr -> arr
val filteri : (int -> elt -> bool) -> arr -> int array
val filter : (elt -> bool) -> arr -> int array
val foldi : ?axis:int -> (int -> elt -> elt -> elt) -> elt -> arr -> arr
val fold : ?axis:int -> (elt -> elt -> elt) -> elt -> arr -> arr
val scani : ?axis:int -> (int -> elt -> elt -> elt) -> arr -> arr
val scan : ?axis:int -> (elt -> elt -> elt) -> arr -> arr
Examination & Comparison
val exists : (elt -> bool) -> arr -> bool
val not_exists : (elt -> bool) -> arr -> bool
val for_all : (elt -> bool) -> arr -> bool
val is_zero : arr -> bool
val is_positive : arr -> bool
val is_negative : arr -> bool
val is_nonpositive : arr -> bool
val is_nonnegative : arr -> bool
val is_normal : arr -> bool
val not_nan : arr -> bool
val not_inf : arr -> bool
val equal : arr -> arr -> bool
val not_equal : arr -> arr -> bool
val greater : arr -> arr -> bool
val less : arr -> arr -> bool
val greater_equal : arr -> arr -> bool
val less_equal : arr -> arr -> bool
val elt_equal : arr -> arr -> arr
val elt_not_equal : arr -> arr -> arr
val elt_less : arr -> arr -> arr
val elt_greater : arr -> arr -> arr
val elt_less_equal : arr -> arr -> arr
val elt_greater_equal : arr -> arr -> arr
val equal_scalar : arr -> elt -> bool
val not_equal_scalar : arr -> elt -> bool
val less_scalar : arr -> elt -> bool
val greater_scalar : arr -> elt -> bool
val less_equal_scalar : arr -> elt -> bool
val greater_equal_scalar : arr -> elt -> bool
val elt_equal_scalar : arr -> elt -> arr
val elt_not_equal_scalar : arr -> elt -> arr
val elt_less_scalar : arr -> elt -> arr
val elt_greater_scalar : arr -> elt -> arr
val elt_less_equal_scalar : arr -> elt -> arr
val elt_greater_equal_scalar : arr -> elt -> arr
val row_num : arr -> int
val col_num : arr -> int
val row : arr -> int -> arr
val rows : arr -> int array -> arr
val copy_row_to : arr -> arr -> int -> unit
val copy_col_to : arr -> arr -> int -> unit
val diag : ?k:int -> arr -> arr
val transpose : ?axis:int array -> arr -> arr
val to_rows : arr -> arr array
val of_rows : arr array -> arr
val to_cols : arr -> arr array
val of_cols : arr array -> arr
val of_array : elt array -> int array -> arr
val of_arrays : elt array array -> arr
\ No newline at end of file diff --git a/owl-base/Owl_base_dense_ndarray_intf/module-type-NN/index.html b/owl-base/Owl_base_dense_ndarray_intf/module-type-NN/index.html deleted file mode 100644 index 65e3e98e3..000000000 --- a/owl-base/Owl_base_dense_ndarray_intf/module-type-NN/index.html +++ /dev/null @@ -1,152 +0,0 @@ - -NN (owl-base.Owl_base_dense_ndarray_intf.NN)

Module type Owl_base_dense_ndarray_intf.NN

type arr
val conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val dilated_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val transpose_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val max_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val upsampling2d : arr -> int array -> arr
val conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val dilated_conv1d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv1d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val transpose_conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val max_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val upsampling2d_backward : arr -> int array -> arr -> arr
\ No newline at end of file diff --git a/owl-base/Owl_base_dense_ndarray_intf/module-type-Real/index.html b/owl-base/Owl_base_dense_ndarray_intf/module-type-Real/index.html deleted file mode 100644 index 205e6deca..000000000 --- a/owl-base/Owl_base_dense_ndarray_intf/module-type-Real/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Real (owl-base.Owl_base_dense_ndarray_intf.Real)

Module type Owl_base_dense_ndarray_intf.Real

type elt
type arr
val log_sum_exp' : arr -> elt
val log_sum_exp : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum_slices : ?axis:int -> arr -> arr
val signum : arr -> arr
val sigmoid : arr -> arr
val relu : arr -> arr
val dawsn : arr -> arr
val l1norm' : arr -> elt
val l2norm' : arr -> elt
val l2norm_sqr' : arr -> elt
val clip_by_value : ?amin:elt -> ?amax:elt -> arr -> arr
val clip_by_l2norm : elt -> arr -> arr
val atan2 : arr -> arr -> arr
val scalar_atan2 : elt -> arr -> arr
val atan2_scalar : arr -> elt -> arr
val approx_equal : ?eps:float -> arr -> arr -> bool
val approx_equal_scalar : ?eps:float -> arr -> float -> bool
val approx_elt_equal : ?eps:float -> arr -> arr -> arr
val approx_elt_equal_scalar : ?eps:float -> arr -> float -> arr
val dot : arr -> arr -> arr
val trace : arr -> elt
Helper functions
val float_to_elt : float -> elt
val elt_to_float : elt -> float
\ No newline at end of file diff --git a/owl-base/Owl_base_dense_ndarray_s/.dummy b/owl-base/Owl_base_dense_ndarray_s/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl-base/Owl_base_dense_ndarray_z/.dummy b/owl-base/Owl_base_dense_ndarray_z/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl-base/Owl_base_linalg_c/.dummy b/owl-base/Owl_base_linalg_c/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl-base/Owl_base_linalg_d/.dummy b/owl-base/Owl_base_linalg_d/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl-base/Owl_base_linalg_generic/.dummy b/owl-base/Owl_base_linalg_generic/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl-base/Owl_base_linalg_intf/.dummy b/owl-base/Owl_base_linalg_intf/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl-base/Owl_base_linalg_intf/module-type-Common/index.html b/owl-base/Owl_base_linalg_intf/module-type-Common/index.html deleted file mode 100644 index 27e5713d7..000000000 --- a/owl-base/Owl_base_linalg_intf/module-type-Common/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -Common (owl-base.Owl_base_linalg_intf.Common)

Module type Owl_base_linalg_intf.Common

type elt
type mat
type complex_mat
type int32_mat
Basic functions
val inv : mat -> mat
val det : mat -> elt
val logdet : mat -> elt
val is_triu : mat -> bool
val is_tril : mat -> bool
val is_symmetric : mat -> bool
val is_diag : mat -> bool
Factorisation
val svd : ?thin:bool -> mat -> mat * mat * mat
val chol : ?upper:bool -> mat -> mat
val qr : ?thin:bool -> ?pivot:bool -> mat -> mat * mat * int32_mat
val lq : ?thin:bool -> mat -> mat * mat
Linear system of equations
val linsolve : ?trans:bool -> ?typ:[ `n | `u | `l ] -> mat -> mat -> mat
val sylvester : mat -> mat -> mat -> mat
val lyapunov : mat -> mat -> mat
val discrete_lyapunov : - ?solver:[ `default | `direct | `bilinear ] -> - mat -> - mat -> - mat
\ No newline at end of file diff --git a/owl-base/Owl_base_linalg_intf/module-type-Real/index.html b/owl-base/Owl_base_linalg_intf/module-type-Real/index.html deleted file mode 100644 index 245391651..000000000 --- a/owl-base/Owl_base_linalg_intf/module-type-Real/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Real (owl-base.Owl_base_linalg_intf.Real)

Module type Owl_base_linalg_intf.Real

type elt
type mat
val care : ?diag_r:bool -> mat -> mat -> mat -> mat -> mat
val dare : ?diag_r:bool -> mat -> mat -> mat -> mat -> mat
\ No newline at end of file diff --git a/owl-base/Owl_base_linalg_s/.dummy b/owl-base/Owl_base_linalg_s/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl-base/Owl_base_linalg_z/.dummy b/owl-base/Owl_base_linalg_z/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl-base/Owl_base_maths/.dummy b/owl-base/Owl_base_maths/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl-base/Owl_base_slicing/.dummy b/owl-base/Owl_base_slicing/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl-base/Owl_base_stats/.dummy b/owl-base/Owl_base_stats/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl-base/Owl_base_stats_dist_bernoulli/.dummy b/owl-base/Owl_base_stats_dist_bernoulli/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl-base/Owl_base_stats_dist_cauchy/.dummy b/owl-base/Owl_base_stats_dist_cauchy/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl-base/Owl_base_stats_dist_exponential/.dummy b/owl-base/Owl_base_stats_dist_exponential/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl-base/Owl_base_stats_dist_gamma/.dummy b/owl-base/Owl_base_stats_dist_gamma/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl-base/Owl_base_stats_dist_gaussian/.dummy b/owl-base/Owl_base_stats_dist_gaussian/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl-base/Owl_base_stats_dist_gumbel1/.dummy b/owl-base/Owl_base_stats_dist_gumbel1/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl-base/Owl_base_stats_dist_gumbel2/.dummy b/owl-base/Owl_base_stats_dist_gumbel2/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl-base/Owl_base_stats_dist_uniform/.dummy b/owl-base/Owl_base_stats_dist_uniform/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl-base/Owl_base_stats_prng/.dummy b/owl-base/Owl_base_stats_prng/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl-base/Owl_computation/.dummy b/owl-base/Owl_computation/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl-base/Owl_computation_cpu_device/.dummy b/owl-base/Owl_computation_cpu_device/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl-base/Owl_computation_cpu_device/Make/argument-1-A/Linalg/index.html b/owl-base/Owl_computation_cpu_device/Make/argument-1-A/Linalg/index.html deleted file mode 100644 index 05572e712..000000000 --- a/owl-base/Owl_computation_cpu_device/Make/argument-1-A/Linalg/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -Linalg (owl-base.Owl_computation_cpu_device.Make.A.Linalg)

Module A.Linalg

val inv : arr -> arr
val logdet : arr -> elt
val chol : ?upper:bool -> arr -> arr
val svd : ?thin:bool -> arr -> arr * arr * arr
val qr : arr -> arr * arr
val lq : arr -> arr * arr
val sylvester : arr -> arr -> arr -> arr
val lyapunov : arr -> arr -> arr
val discrete_lyapunov : - ?solver:[ `default | `bilinear | `direct ] -> - arr -> - arr -> - arr
val linsolve : ?trans:bool -> ?typ:[ `n | `u | `l ] -> arr -> arr -> arr
val care : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
val dare : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
\ No newline at end of file diff --git a/owl-base/Owl_computation_cpu_device/Make/argument-1-A/Mat/index.html b/owl-base/Owl_computation_cpu_device/Make/argument-1-A/Mat/index.html deleted file mode 100644 index a7fbfd8cf..000000000 --- a/owl-base/Owl_computation_cpu_device/Make/argument-1-A/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl-base.Owl_computation_cpu_device.Make.A.Mat)

Module A.Mat

val diagm : ?k:int -> arr -> arr
val triu : ?k:int -> arr -> arr
val tril : ?k:int -> arr -> arr
val eye : int -> arr
\ No newline at end of file diff --git a/owl-base/Owl_computation_cpu_device/Make/argument-1-A/Scalar/index.html b/owl-base/Owl_computation_cpu_device/Make/argument-1-A/Scalar/index.html deleted file mode 100644 index bde3a222b..000000000 --- a/owl-base/Owl_computation_cpu_device/Make/argument-1-A/Scalar/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Scalar (owl-base.Owl_computation_cpu_device.Make.A.Scalar)

Module A.Scalar

val add : elt -> elt -> elt
val sub : elt -> elt -> elt
val mul : elt -> elt -> elt
val div : elt -> elt -> elt
val pow : elt -> elt -> elt
val atan2 : elt -> elt -> elt
val abs : elt -> elt
val neg : elt -> elt
val sqr : elt -> elt
val sqrt : elt -> elt
val exp : elt -> elt
val log : elt -> elt
val log2 : elt -> elt
val log10 : elt -> elt
val signum : elt -> elt
val floor : elt -> elt
val ceil : elt -> elt
val round : elt -> elt
val sin : elt -> elt
val cos : elt -> elt
val tan : elt -> elt
val sinh : elt -> elt
val cosh : elt -> elt
val tanh : elt -> elt
val asin : elt -> elt
val acos : elt -> elt
val atan : elt -> elt
val asinh : elt -> elt
val acosh : elt -> elt
val atanh : elt -> elt
val relu : elt -> elt
val dawsn : elt -> elt
val sigmoid : elt -> elt
\ No newline at end of file diff --git a/owl-base/Owl_computation_cpu_device/Make/argument-1-A/index.html b/owl-base/Owl_computation_cpu_device/Make/argument-1-A/index.html deleted file mode 100644 index 1a66aa359..000000000 --- a/owl-base/Owl_computation_cpu_device/Make/argument-1-A/index.html +++ /dev/null @@ -1,379 +0,0 @@ - -A (owl-base.Owl_computation_cpu_device.Make.A)

Parameter Make.A

include Owl_types_ndarray_mutable.Sig
include Owl_types_ndarray_algodiff.Sig
include Owl_types_ndarray_eltcmp.Sig
include Owl_types_ndarray_basic.Sig
type arr
type elt
val empty : int array -> arr
val zeros : int array -> arr
val ones : int array -> arr
val create : int array -> elt -> arr
val sequential : ?a:elt -> ?step:elt -> int array -> arr
val uniform : ?a:elt -> ?b:elt -> int array -> arr
val gaussian : ?mu:elt -> ?sigma:elt -> int array -> arr
val bernoulli : ?p:elt -> int array -> arr
val init : int array -> (int -> elt) -> arr
val init_nd : int array -> (int array -> elt) -> arr
val shape : arr -> int array
val numel : arr -> int
val get : arr -> int array -> elt
val set : arr -> int array -> elt -> unit
val get_slice : int list list -> arr -> arr
val set_slice : int list list -> arr -> arr -> unit
val get_fancy : Owl_types_common.index list -> arr -> arr
val set_fancy : Owl_types_common.index list -> arr -> arr -> unit
val copy : arr -> arr
val copy_ : out:arr -> arr -> unit
val reset : arr -> unit
val reshape : arr -> int array -> arr
val reverse : arr -> arr
val tile : arr -> int array -> arr
val repeat : arr -> int array -> arr
val concatenate : ?axis:int -> arr array -> arr
val stack : ?axis:int -> arr array -> arr
val split : ?axis:int -> int array -> arr -> arr array
val expand : ?hi:bool -> arr -> int -> arr
val squeeze : ?axis:int array -> arr -> arr
val draw : ?axis:int -> arr -> int -> arr * int array
val map : (elt -> elt) -> arr -> arr
val fold : ?axis:int -> (elt -> elt -> elt) -> elt -> arr -> arr
val scan : ?axis:int -> (elt -> elt -> elt) -> arr -> arr
val one_hot : int -> arr -> arr
val pad : ?v:elt -> int list list -> arr -> arr
val print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(elt -> string) -> - arr -> - unit
val abs : arr -> arr
val neg : arr -> arr
val floor : arr -> arr
val ceil : arr -> arr
val round : arr -> arr
val sqr : arr -> arr
val sqrt : arr -> arr
val log : arr -> arr
val log2 : arr -> arr
val log10 : arr -> arr
val exp : arr -> arr
val sin : arr -> arr
val cos : arr -> arr
val tan : arr -> arr
val sinh : arr -> arr
val cosh : arr -> arr
val tanh : arr -> arr
val asin : arr -> arr
val acos : arr -> arr
val atan : arr -> arr
val asinh : arr -> arr
val acosh : arr -> arr
val atanh : arr -> arr
val sum_reduce : ?axis:int array -> arr -> arr
val signum : arr -> arr
val sigmoid : arr -> arr
val relu : arr -> arr
val dawsn : arr -> arr
val min' : arr -> elt
val max' : arr -> elt
val sum' : arr -> elt
val log_sum_exp' : arr -> elt
val log_sum_exp : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm' : arr -> elt
val l2norm' : arr -> elt
val l2norm_sqr' : arr -> elt
val clip_by_value : ?amin:elt -> ?amax:elt -> arr -> arr
val clip_by_l2norm : elt -> arr -> arr
val pow : arr -> arr -> arr
val scalar_pow : elt -> arr -> arr
val pow_scalar : arr -> elt -> arr
val atan2 : arr -> arr -> arr
val scalar_atan2 : elt -> arr -> arr
val atan2_scalar : arr -> elt -> arr
val add : arr -> arr -> arr
val sub : arr -> arr -> arr
val mul : arr -> arr -> arr
val div : arr -> arr -> arr
val add_scalar : arr -> elt -> arr
val sub_scalar : arr -> elt -> arr
val mul_scalar : arr -> elt -> arr
val div_scalar : arr -> elt -> arr
val scalar_add : elt -> arr -> arr
val scalar_sub : elt -> arr -> arr
val scalar_mul : elt -> arr -> arr
val scalar_div : elt -> arr -> arr
val fma : arr -> arr -> arr -> arr
val conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val dilated_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val transpose_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val max_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val upsampling2d : arr -> int array -> arr
val conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val dilated_conv1d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv1d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val transpose_conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val max_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val upsampling2d_backward : arr -> int array -> arr -> arr
val row_num : arr -> int
val col_num : arr -> int
val row : arr -> int -> arr
val rows : arr -> int array -> arr
val copy_row_to : arr -> arr -> int -> unit
val copy_col_to : arr -> arr -> int -> unit
val dot : arr -> arr -> arr
val diag : ?k:int -> arr -> arr
val trace : arr -> elt
val transpose : ?axis:int array -> arr -> arr
val to_rows : arr -> arr array
val of_rows : arr array -> arr
val to_cols : arr -> arr array
val of_cols : arr array -> arr
val of_array : elt array -> int array -> arr
val of_arrays : elt array array -> arr
val float_to_elt : float -> elt
val elt_to_float : elt -> float
val elt_equal : arr -> arr -> arr
val elt_not_equal : arr -> arr -> arr
val elt_less : arr -> arr -> arr
val elt_greater : arr -> arr -> arr
val elt_less_equal : arr -> arr -> arr
val elt_greater_equal : arr -> arr -> arr
val elt_equal_scalar : arr -> elt -> arr
val elt_not_equal_scalar : arr -> elt -> arr
val elt_less_scalar : arr -> elt -> arr
val elt_greater_scalar : arr -> elt -> arr
val elt_less_equal_scalar : arr -> elt -> arr
val elt_greater_equal_scalar : arr -> elt -> arr
module Scalar : sig ... end
module Mat : sig ... end
module Linalg : sig ... end
val create_ : out:arr -> elt -> unit
val uniform_ : ?a:elt -> ?b:elt -> out:arr -> unit
val gaussian_ : ?mu:elt -> ?sigma:elt -> out:arr -> unit
val sequential_ : ?a:elt -> ?step:elt -> out:arr -> unit
val bernoulli_ : ?p:elt -> out:arr -> unit
val zeros_ : out:arr -> unit
val ones_ : out:arr -> unit
val one_hot_ : out:arr -> int -> arr -> unit
val get_slice_ : out:arr -> int list list -> arr -> unit
val set_slice_ : out:arr -> int list list -> arr -> arr -> unit
val sub_left : arr -> int -> int -> arr
val reshape_ : out:arr -> arr -> unit
val reverse_ : out:arr -> arr -> unit
val transpose_ : out:arr -> ?axis:int array -> arr -> unit
val repeat_ : out:arr -> arr -> int array -> unit
val tile_ : out:arr -> arr -> int array -> unit
val pad_ : out:arr -> ?v:elt -> int list list -> arr -> unit
val hypot : arr -> arr -> arr
val fmod : arr -> arr -> arr
val min2 : arr -> arr -> arr
val max2 : arr -> arr -> arr
val add_ : ?out:arr -> arr -> arr -> unit
val sub_ : ?out:arr -> arr -> arr -> unit
val mul_ : ?out:arr -> arr -> arr -> unit
val div_ : ?out:arr -> arr -> arr -> unit
val pow_ : ?out:arr -> arr -> arr -> unit
val atan2_ : ?out:arr -> arr -> arr -> unit
val hypot_ : ?out:arr -> arr -> arr -> unit
val fmod_ : ?out:arr -> arr -> arr -> unit
val min2_ : ?out:arr -> arr -> arr -> unit
val max2_ : ?out:arr -> arr -> arr -> unit
val add_scalar_ : ?out:arr -> arr -> elt -> unit
val sub_scalar_ : ?out:arr -> arr -> elt -> unit
val mul_scalar_ : ?out:arr -> arr -> elt -> unit
val div_scalar_ : ?out:arr -> arr -> elt -> unit
val pow_scalar_ : ?out:arr -> arr -> elt -> unit
val atan2_scalar_ : ?out:arr -> arr -> elt -> unit
val fmod_scalar_ : ?out:arr -> arr -> elt -> unit
val scalar_add_ : ?out:arr -> elt -> arr -> unit
val scalar_sub_ : ?out:arr -> elt -> arr -> unit
val scalar_mul_ : ?out:arr -> elt -> arr -> unit
val scalar_div_ : ?out:arr -> elt -> arr -> unit
val scalar_pow_ : ?out:arr -> elt -> arr -> unit
val scalar_atan2_ : ?out:arr -> elt -> arr -> unit
val scalar_fmod_ : ?out:arr -> elt -> arr -> unit
val fma_ : ?out:arr -> arr -> arr -> arr -> unit
val clip_by_value_ : ?out:arr -> ?amin:elt -> ?amax:elt -> arr -> unit
val clip_by_l2norm_ : ?out:arr -> elt -> arr -> unit
val dot_ : - ?transa:bool -> - ?transb:bool -> - ?alpha:elt -> - ?beta:elt -> - c:arr -> - arr -> - arr -> - unit
val abs_ : ?out:arr -> arr -> unit
val neg_ : ?out:arr -> arr -> unit
val conj_ : ?out:arr -> arr -> unit
val reci_ : ?out:arr -> arr -> unit
val signum_ : ?out:arr -> arr -> unit
val sqr_ : ?out:arr -> arr -> unit
val sqrt_ : ?out:arr -> arr -> unit
val cbrt_ : ?out:arr -> arr -> unit
val exp_ : ?out:arr -> arr -> unit
val exp2_ : ?out:arr -> arr -> unit
val exp10_ : ?out:arr -> arr -> unit
val expm1_ : ?out:arr -> arr -> unit
val log_ : ?out:arr -> arr -> unit
val log2_ : ?out:arr -> arr -> unit
val log10_ : ?out:arr -> arr -> unit
val log1p_ : ?out:arr -> arr -> unit
val sin_ : ?out:arr -> arr -> unit
val cos_ : ?out:arr -> arr -> unit
val tan_ : ?out:arr -> arr -> unit
val asin_ : ?out:arr -> arr -> unit
val acos_ : ?out:arr -> arr -> unit
val atan_ : ?out:arr -> arr -> unit
val sinh_ : ?out:arr -> arr -> unit
val cosh_ : ?out:arr -> arr -> unit
val tanh_ : ?out:arr -> arr -> unit
val asinh_ : ?out:arr -> arr -> unit
val acosh_ : ?out:arr -> arr -> unit
val atanh_ : ?out:arr -> arr -> unit
val floor_ : ?out:arr -> arr -> unit
val ceil_ : ?out:arr -> arr -> unit
val round_ : ?out:arr -> arr -> unit
val trunc_ : ?out:arr -> arr -> unit
val fix_ : ?out:arr -> arr -> unit
val erf_ : ?out:arr -> arr -> unit
val erfc_ : ?out:arr -> arr -> unit
val relu_ : ?out:arr -> arr -> unit
val softplus_ : ?out:arr -> arr -> unit
val softsign_ : ?out:arr -> arr -> unit
val softmax_ : ?out:arr -> ?axis:int -> arr -> unit
val sigmoid_ : ?out:arr -> arr -> unit
val sum_ : out:arr -> axis:int -> arr -> unit
val min_ : out:arr -> axis:int -> arr -> unit
val max_ : out:arr -> axis:int -> arr -> unit
val sum : ?axis:int -> ?keep_dims:bool -> arr -> arr
val prod : ?axis:int -> ?keep_dims:bool -> arr -> arr
val min : ?axis:int -> ?keep_dims:bool -> arr -> arr
val max : ?axis:int -> ?keep_dims:bool -> arr -> arr
val mean : ?axis:int -> ?keep_dims:bool -> arr -> arr
val var : ?axis:int -> ?keep_dims:bool -> arr -> arr
val std : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l2norm : ?axis:int -> ?keep_dims:bool -> arr -> arr
val cumsum_ : ?out:arr -> ?axis:int -> arr -> unit
val cumprod_ : ?out:arr -> ?axis:int -> arr -> unit
val cummin_ : ?out:arr -> ?axis:int -> arr -> unit
val cummax_ : ?out:arr -> ?axis:int -> arr -> unit
val dropout_ : ?out:arr -> ?rate:float -> arr -> unit
val prod' : arr -> elt
val mean' : arr -> elt
val var' : arr -> elt
val std' : arr -> elt
val elt_equal_ : ?out:arr -> arr -> arr -> unit
val elt_not_equal_ : ?out:arr -> arr -> arr -> unit
val elt_less_ : ?out:arr -> arr -> arr -> unit
val elt_greater_ : ?out:arr -> arr -> arr -> unit
val elt_less_equal_ : ?out:arr -> arr -> arr -> unit
val elt_greater_equal_ : ?out:arr -> arr -> arr -> unit
val elt_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_not_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_less_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_greater_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_less_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_greater_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val conv1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val conv2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val conv3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val dilated_conv1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val dilated_conv2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val dilated_conv3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val transpose_conv1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val transpose_conv2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val transpose_conv3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val max_pool1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val max_pool2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val max_pool3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val upsampling2d_ : out:arr -> arr -> int array -> unit
val conv1d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv1d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv2d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv2d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv3d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv3d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val dilated_conv1d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv1d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv2d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv2d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv3d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv3d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val transpose_conv1d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv1d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv2d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv2d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv3d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv3d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val max_pool1d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val max_pool2d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val max_pool3d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool1d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool2d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool3d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val upsampling2d_backward_ : out:arr -> arr -> int array -> arr -> unit
val fused_adagrad_ : ?out:arr -> rate:float -> eps:float -> arr -> unit
\ No newline at end of file diff --git a/owl-base/Owl_computation_cpu_device/Make/index.html b/owl-base/Owl_computation_cpu_device/Make/index.html deleted file mode 100644 index 32d0a0b36..000000000 --- a/owl-base/Owl_computation_cpu_device/Make/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Make (owl-base.Owl_computation_cpu_device.Make)

Module Owl_computation_cpu_device.Make

Parameters

Signature

module A = A
type device = {
  1. device_type : Owl_types.device_type;
  2. initialised : bool;
}
type value =
  1. | ArrVal of A.arr
  2. | EltVal of A.elt
val make_device : unit -> device
val arr_to_value : A.arr -> value
val value_to_arr : value -> A.arr
val elt_to_value : A.elt -> value
val value_to_elt : value -> A.elt
val value_to_float : value -> float
val is_arr : value -> bool
val is_elt : value -> bool
\ No newline at end of file diff --git a/owl-base/Owl_computation_cpu_engine/.dummy b/owl-base/Owl_computation_cpu_engine/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl-base/Owl_computation_cpu_engine/Make/Graph/Optimiser/Operator/Linalg/index.html b/owl-base/Owl_computation_cpu_engine/Make/Graph/Optimiser/Operator/Linalg/index.html deleted file mode 100644 index b66483b4e..000000000 --- a/owl-base/Owl_computation_cpu_engine/Make/Graph/Optimiser/Operator/Linalg/index.html +++ /dev/null @@ -1,33 +0,0 @@ - -Linalg (owl-base.Owl_computation_cpu_engine.Make.Graph.Optimiser.Operator.Linalg)

Module Operator.Linalg

\ No newline at end of file diff --git a/owl-base/Owl_computation_cpu_engine/Make/Graph/Optimiser/Operator/Mat/index.html b/owl-base/Owl_computation_cpu_engine/Make/Graph/Optimiser/Operator/Mat/index.html deleted file mode 100644 index 9d7b1db50..000000000 --- a/owl-base/Owl_computation_cpu_engine/Make/Graph/Optimiser/Operator/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl-base.Owl_computation_cpu_engine.Make.Graph.Optimiser.Operator.Mat)

Module Operator.Mat

\ No newline at end of file diff --git a/owl-base/Owl_computation_cpu_engine/Make/Graph/Optimiser/Operator/Scalar/index.html b/owl-base/Owl_computation_cpu_engine/Make/Graph/Optimiser/Operator/Scalar/index.html deleted file mode 100644 index a89b442cb..000000000 --- a/owl-base/Owl_computation_cpu_engine/Make/Graph/Optimiser/Operator/Scalar/index.html +++ /dev/null @@ -1,20 +0,0 @@ - -Scalar (owl-base.Owl_computation_cpu_engine.Make.Graph.Optimiser.Operator.Scalar)

Module Operator.Scalar

\ No newline at end of file diff --git a/owl-base/Owl_computation_cpu_engine/Make/Graph/Optimiser/Operator/Symbol/Shape/Type/Device/A/Linalg/index.html b/owl-base/Owl_computation_cpu_engine/Make/Graph/Optimiser/Operator/Symbol/Shape/Type/Device/A/Linalg/index.html deleted file mode 100644 index b89846e59..000000000 --- a/owl-base/Owl_computation_cpu_engine/Make/Graph/Optimiser/Operator/Symbol/Shape/Type/Device/A/Linalg/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -Linalg (owl-base.Owl_computation_cpu_engine.Make.Graph.Optimiser.Operator.Symbol.Shape.Type.Device.A.Linalg)

Module A.Linalg

val inv : arr -> arr
val logdet : arr -> elt
val chol : ?upper:bool -> arr -> arr
val svd : ?thin:bool -> arr -> arr * arr * arr
val qr : arr -> arr * arr
val lq : arr -> arr * arr
val sylvester : arr -> arr -> arr -> arr
val lyapunov : arr -> arr -> arr
val discrete_lyapunov : - ?solver:[ `bilinear | `default | `direct ] -> - arr -> - arr -> - arr
val linsolve : ?trans:bool -> ?typ:[ `l | `n | `u ] -> arr -> arr -> arr
val care : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
val dare : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
\ No newline at end of file diff --git a/owl-base/Owl_computation_cpu_engine/Make/Graph/Optimiser/Operator/Symbol/Shape/Type/Device/A/Mat/index.html b/owl-base/Owl_computation_cpu_engine/Make/Graph/Optimiser/Operator/Symbol/Shape/Type/Device/A/Mat/index.html deleted file mode 100644 index 10cc8ad94..000000000 --- a/owl-base/Owl_computation_cpu_engine/Make/Graph/Optimiser/Operator/Symbol/Shape/Type/Device/A/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl-base.Owl_computation_cpu_engine.Make.Graph.Optimiser.Operator.Symbol.Shape.Type.Device.A.Mat)

Module A.Mat

val diagm : ?k:int -> arr -> arr
val triu : ?k:int -> arr -> arr
val tril : ?k:int -> arr -> arr
val eye : int -> arr
\ No newline at end of file diff --git a/owl-base/Owl_computation_cpu_engine/Make/Graph/Optimiser/Operator/Symbol/Shape/Type/Device/A/Scalar/index.html b/owl-base/Owl_computation_cpu_engine/Make/Graph/Optimiser/Operator/Symbol/Shape/Type/Device/A/Scalar/index.html deleted file mode 100644 index 02bc89a1c..000000000 --- a/owl-base/Owl_computation_cpu_engine/Make/Graph/Optimiser/Operator/Symbol/Shape/Type/Device/A/Scalar/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Scalar (owl-base.Owl_computation_cpu_engine.Make.Graph.Optimiser.Operator.Symbol.Shape.Type.Device.A.Scalar)

Module A.Scalar

val add : elt -> elt -> elt
val sub : elt -> elt -> elt
val mul : elt -> elt -> elt
val div : elt -> elt -> elt
val pow : elt -> elt -> elt
val atan2 : elt -> elt -> elt
val abs : elt -> elt
val neg : elt -> elt
val sqr : elt -> elt
val sqrt : elt -> elt
val exp : elt -> elt
val log : elt -> elt
val log2 : elt -> elt
val log10 : elt -> elt
val signum : elt -> elt
val floor : elt -> elt
val ceil : elt -> elt
val round : elt -> elt
val sin : elt -> elt
val cos : elt -> elt
val tan : elt -> elt
val sinh : elt -> elt
val cosh : elt -> elt
val tanh : elt -> elt
val asin : elt -> elt
val acos : elt -> elt
val atan : elt -> elt
val asinh : elt -> elt
val acosh : elt -> elt
val atanh : elt -> elt
val relu : elt -> elt
val dawsn : elt -> elt
val sigmoid : elt -> elt
\ No newline at end of file diff --git a/owl-base/Owl_computation_cpu_engine/Make/Graph/Optimiser/Operator/Symbol/Shape/Type/Device/A/index.html b/owl-base/Owl_computation_cpu_engine/Make/Graph/Optimiser/Operator/Symbol/Shape/Type/Device/A/index.html deleted file mode 100644 index 74c30c7ef..000000000 --- a/owl-base/Owl_computation_cpu_engine/Make/Graph/Optimiser/Operator/Symbol/Shape/Type/Device/A/index.html +++ /dev/null @@ -1,381 +0,0 @@ - -A (owl-base.Owl_computation_cpu_engine.Make.Graph.Optimiser.Operator.Symbol.Shape.Type.Device.A)

Module Device.A

val empty : int array -> arr
val zeros : int array -> arr
val ones : int array -> arr
val create : int array -> elt -> arr
val sequential : ?a:elt -> ?step:elt -> int array -> arr
val uniform : ?a:elt -> ?b:elt -> int array -> arr
val gaussian : ?mu:elt -> ?sigma:elt -> int array -> arr
val bernoulli : ?p:elt -> int array -> arr
val init : int array -> (int -> elt) -> arr
val init_nd : int array -> (int array -> elt) -> arr
val shape : arr -> int array
val numel : arr -> int
val get : arr -> int array -> elt
val set : arr -> int array -> elt -> unit
val get_slice : int list list -> arr -> arr
val set_slice : int list list -> arr -> arr -> unit
val get_fancy : Owl_types_common.index list -> arr -> arr
val set_fancy : Owl_types_common.index list -> arr -> arr -> unit
val copy : arr -> arr
val copy_ : out:arr -> arr -> unit
val reset : arr -> unit
val reshape : arr -> int array -> arr
val reverse : arr -> arr
val tile : arr -> int array -> arr
val repeat : arr -> int array -> arr
val concatenate : ?axis:int -> arr array -> arr
val stack : ?axis:int -> arr array -> arr
val split : ?axis:int -> int array -> arr -> arr array
val expand : ?hi:bool -> arr -> int -> arr
val squeeze : ?axis:int array -> arr -> arr
val draw : ?axis:int -> arr -> int -> arr * int array
val map : (elt -> elt) -> arr -> arr
val fold : ?axis:int -> (elt -> elt -> elt) -> elt -> arr -> arr
val scan : ?axis:int -> (elt -> elt -> elt) -> arr -> arr
val one_hot : int -> arr -> arr
val pad : ?v:elt -> int list list -> arr -> arr
val print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(elt -> string) -> - arr -> - unit
val abs : arr -> arr
val neg : arr -> arr
val floor : arr -> arr
val ceil : arr -> arr
val round : arr -> arr
val sqr : arr -> arr
val sqrt : arr -> arr
val log : arr -> arr
val log2 : arr -> arr
val log10 : arr -> arr
val exp : arr -> arr
val sin : arr -> arr
val cos : arr -> arr
val tan : arr -> arr
val sinh : arr -> arr
val cosh : arr -> arr
val tanh : arr -> arr
val asin : arr -> arr
val acos : arr -> arr
val atan : arr -> arr
val asinh : arr -> arr
val acosh : arr -> arr
val atanh : arr -> arr
val sum_reduce : ?axis:int array -> arr -> arr
val signum : arr -> arr
val sigmoid : arr -> arr
val relu : arr -> arr
val dawsn : arr -> arr
val min' : arr -> elt
val max' : arr -> elt
val sum' : arr -> elt
val log_sum_exp' : arr -> elt
val log_sum_exp : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm' : arr -> elt
val l2norm' : arr -> elt
val l2norm_sqr' : arr -> elt
val clip_by_value : ?amin:elt -> ?amax:elt -> arr -> arr
val clip_by_l2norm : elt -> arr -> arr
val pow : arr -> arr -> arr
val scalar_pow : elt -> arr -> arr
val pow_scalar : arr -> elt -> arr
val atan2 : arr -> arr -> arr
val scalar_atan2 : elt -> arr -> arr
val atan2_scalar : arr -> elt -> arr
val add : arr -> arr -> arr
val sub : arr -> arr -> arr
val mul : arr -> arr -> arr
val div : arr -> arr -> arr
val add_scalar : arr -> elt -> arr
val sub_scalar : arr -> elt -> arr
val mul_scalar : arr -> elt -> arr
val div_scalar : arr -> elt -> arr
val scalar_add : elt -> arr -> arr
val scalar_sub : elt -> arr -> arr
val scalar_mul : elt -> arr -> arr
val scalar_div : elt -> arr -> arr
val fma : arr -> arr -> arr -> arr
val conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val dilated_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val transpose_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val max_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val upsampling2d : arr -> int array -> arr
val conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val dilated_conv1d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv1d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val transpose_conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val max_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val upsampling2d_backward : arr -> int array -> arr -> arr
val row_num : arr -> int
val col_num : arr -> int
val row : arr -> int -> arr
val rows : arr -> int array -> arr
val copy_row_to : arr -> arr -> int -> unit
val copy_col_to : arr -> arr -> int -> unit
val dot : arr -> arr -> arr
val diag : ?k:int -> arr -> arr
val trace : arr -> elt
val transpose : ?axis:int array -> arr -> arr
val to_rows : arr -> arr array
val of_rows : arr array -> arr
val to_cols : arr -> arr array
val of_cols : arr array -> arr
val of_array : elt array -> int array -> arr
val of_arrays : elt array array -> arr
val float_to_elt : float -> elt
val elt_to_float : elt -> float
val elt_equal : arr -> arr -> arr
val elt_not_equal : arr -> arr -> arr
val elt_less : arr -> arr -> arr
val elt_greater : arr -> arr -> arr
val elt_less_equal : arr -> arr -> arr
val elt_greater_equal : arr -> arr -> arr
val elt_equal_scalar : arr -> elt -> arr
val elt_not_equal_scalar : arr -> elt -> arr
val elt_less_scalar : arr -> elt -> arr
val elt_greater_scalar : arr -> elt -> arr
val elt_less_equal_scalar : arr -> elt -> arr
val elt_greater_equal_scalar : arr -> elt -> arr
module Scalar : sig ... end
module Mat : sig ... end
module Linalg : sig ... end
val create_ : out:arr -> elt -> unit
val uniform_ : ?a:elt -> ?b:elt -> out:arr -> unit
val gaussian_ : ?mu:elt -> ?sigma:elt -> out:arr -> unit
val sequential_ : ?a:elt -> ?step:elt -> out:arr -> unit
val bernoulli_ : ?p:elt -> out:arr -> unit
val zeros_ : out:arr -> unit
val ones_ : out:arr -> unit
val one_hot_ : out:arr -> int -> arr -> unit
val get_slice_ : out:arr -> int list list -> arr -> unit
val set_slice_ : out:arr -> int list list -> arr -> arr -> unit
val sub_left : arr -> int -> int -> arr
val reshape_ : out:arr -> arr -> unit
val reverse_ : out:arr -> arr -> unit
val transpose_ : out:arr -> ?axis:int array -> arr -> unit
val repeat_ : out:arr -> arr -> int array -> unit
val tile_ : out:arr -> arr -> int array -> unit
val pad_ : out:arr -> ?v:elt -> int list list -> arr -> unit
val hypot : arr -> arr -> arr
val fmod : arr -> arr -> arr
val min2 : arr -> arr -> arr
val max2 : arr -> arr -> arr
val add_ : ?out:arr -> arr -> arr -> unit
val sub_ : ?out:arr -> arr -> arr -> unit
val mul_ : ?out:arr -> arr -> arr -> unit
val div_ : ?out:arr -> arr -> arr -> unit
val pow_ : ?out:arr -> arr -> arr -> unit
val atan2_ : ?out:arr -> arr -> arr -> unit
val hypot_ : ?out:arr -> arr -> arr -> unit
val fmod_ : ?out:arr -> arr -> arr -> unit
val min2_ : ?out:arr -> arr -> arr -> unit
val max2_ : ?out:arr -> arr -> arr -> unit
val add_scalar_ : ?out:arr -> arr -> elt -> unit
val sub_scalar_ : ?out:arr -> arr -> elt -> unit
val mul_scalar_ : ?out:arr -> arr -> elt -> unit
val div_scalar_ : ?out:arr -> arr -> elt -> unit
val pow_scalar_ : ?out:arr -> arr -> elt -> unit
val atan2_scalar_ : ?out:arr -> arr -> elt -> unit
val fmod_scalar_ : ?out:arr -> arr -> elt -> unit
val scalar_add_ : ?out:arr -> elt -> arr -> unit
val scalar_sub_ : ?out:arr -> elt -> arr -> unit
val scalar_mul_ : ?out:arr -> elt -> arr -> unit
val scalar_div_ : ?out:arr -> elt -> arr -> unit
val scalar_pow_ : ?out:arr -> elt -> arr -> unit
val scalar_atan2_ : ?out:arr -> elt -> arr -> unit
val scalar_fmod_ : ?out:arr -> elt -> arr -> unit
val fma_ : ?out:arr -> arr -> arr -> arr -> unit
val clip_by_value_ : ?out:arr -> ?amin:elt -> ?amax:elt -> arr -> unit
val clip_by_l2norm_ : ?out:arr -> elt -> arr -> unit
val dot_ : - ?transa:bool -> - ?transb:bool -> - ?alpha:elt -> - ?beta:elt -> - c:arr -> - arr -> - arr -> - unit
val abs_ : ?out:arr -> arr -> unit
val neg_ : ?out:arr -> arr -> unit
val conj_ : ?out:arr -> arr -> unit
val reci_ : ?out:arr -> arr -> unit
val signum_ : ?out:arr -> arr -> unit
val sqr_ : ?out:arr -> arr -> unit
val sqrt_ : ?out:arr -> arr -> unit
val cbrt_ : ?out:arr -> arr -> unit
val exp_ : ?out:arr -> arr -> unit
val exp2_ : ?out:arr -> arr -> unit
val exp10_ : ?out:arr -> arr -> unit
val expm1_ : ?out:arr -> arr -> unit
val log_ : ?out:arr -> arr -> unit
val log2_ : ?out:arr -> arr -> unit
val log10_ : ?out:arr -> arr -> unit
val log1p_ : ?out:arr -> arr -> unit
val sin_ : ?out:arr -> arr -> unit
val cos_ : ?out:arr -> arr -> unit
val tan_ : ?out:arr -> arr -> unit
val asin_ : ?out:arr -> arr -> unit
val acos_ : ?out:arr -> arr -> unit
val atan_ : ?out:arr -> arr -> unit
val sinh_ : ?out:arr -> arr -> unit
val cosh_ : ?out:arr -> arr -> unit
val tanh_ : ?out:arr -> arr -> unit
val asinh_ : ?out:arr -> arr -> unit
val acosh_ : ?out:arr -> arr -> unit
val atanh_ : ?out:arr -> arr -> unit
val floor_ : ?out:arr -> arr -> unit
val ceil_ : ?out:arr -> arr -> unit
val round_ : ?out:arr -> arr -> unit
val trunc_ : ?out:arr -> arr -> unit
val fix_ : ?out:arr -> arr -> unit
val erf_ : ?out:arr -> arr -> unit
val erfc_ : ?out:arr -> arr -> unit
val relu_ : ?out:arr -> arr -> unit
val softplus_ : ?out:arr -> arr -> unit
val softsign_ : ?out:arr -> arr -> unit
val softmax_ : ?out:arr -> ?axis:int -> arr -> unit
val sigmoid_ : ?out:arr -> arr -> unit
val sum_ : out:arr -> axis:int -> arr -> unit
val min_ : out:arr -> axis:int -> arr -> unit
val max_ : out:arr -> axis:int -> arr -> unit
val sum : ?axis:int -> ?keep_dims:bool -> arr -> arr
val prod : ?axis:int -> ?keep_dims:bool -> arr -> arr
val min : ?axis:int -> ?keep_dims:bool -> arr -> arr
val max : ?axis:int -> ?keep_dims:bool -> arr -> arr
val mean : ?axis:int -> ?keep_dims:bool -> arr -> arr
val var : ?axis:int -> ?keep_dims:bool -> arr -> arr
val std : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l2norm : ?axis:int -> ?keep_dims:bool -> arr -> arr
val cumsum_ : ?out:arr -> ?axis:int -> arr -> unit
val cumprod_ : ?out:arr -> ?axis:int -> arr -> unit
val cummin_ : ?out:arr -> ?axis:int -> arr -> unit
val cummax_ : ?out:arr -> ?axis:int -> arr -> unit
val dropout_ : ?out:arr -> ?rate:float -> arr -> unit
val prod' : arr -> elt
val mean' : arr -> elt
val var' : arr -> elt
val std' : arr -> elt
val elt_equal_ : ?out:arr -> arr -> arr -> unit
val elt_not_equal_ : ?out:arr -> arr -> arr -> unit
val elt_less_ : ?out:arr -> arr -> arr -> unit
val elt_greater_ : ?out:arr -> arr -> arr -> unit
val elt_less_equal_ : ?out:arr -> arr -> arr -> unit
val elt_greater_equal_ : ?out:arr -> arr -> arr -> unit
val elt_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_not_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_less_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_greater_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_less_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_greater_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val conv1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val conv2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val conv3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val dilated_conv1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val dilated_conv2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val dilated_conv3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val transpose_conv1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val transpose_conv2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val transpose_conv3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val max_pool1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val max_pool2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val max_pool3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val upsampling2d_ : out:arr -> arr -> int array -> unit
val conv1d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv1d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv2d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv2d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv3d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv3d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val dilated_conv1d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv1d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv2d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv2d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv3d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv3d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val transpose_conv1d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv1d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv2d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv2d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv3d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv3d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val max_pool1d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val max_pool2d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val max_pool3d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool1d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool2d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool3d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val upsampling2d_backward_ : out:arr -> arr -> int array -> arr -> unit
val fused_adagrad_ : ?out:arr -> rate:float -> eps:float -> arr -> unit
\ No newline at end of file diff --git a/owl-base/Owl_computation_cpu_engine/Make/Graph/Optimiser/Operator/Symbol/Shape/Type/Device/index.html b/owl-base/Owl_computation_cpu_engine/Make/Graph/Optimiser/Operator/Symbol/Shape/Type/Device/index.html deleted file mode 100644 index 87f1d4a35..000000000 --- a/owl-base/Owl_computation_cpu_engine/Make/Graph/Optimiser/Operator/Symbol/Shape/Type/Device/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Device (owl-base.Owl_computation_cpu_engine.Make.Graph.Optimiser.Operator.Symbol.Shape.Type.Device)

Module Type.Device

module A : sig ... end
val make_device : unit -> device
val arr_to_value : A.arr -> value
val value_to_arr : value -> A.arr
val elt_to_value : A.elt -> value
val value_to_elt : value -> A.elt
val value_to_float : value -> float
val is_arr : value -> bool
val is_elt : value -> bool
\ No newline at end of file diff --git a/owl-base/Owl_computation_cpu_engine/Make/Graph/Optimiser/Operator/Symbol/Shape/Type/index.html b/owl-base/Owl_computation_cpu_engine/Make/Graph/Optimiser/Operator/Symbol/Shape/Type/index.html deleted file mode 100644 index 0e04d08db..000000000 --- a/owl-base/Owl_computation_cpu_engine/Make/Graph/Optimiser/Operator/Symbol/Shape/Type/index.html +++ /dev/null @@ -1,17 +0,0 @@ - -Type (owl-base.Owl_computation_cpu_engine.Make.Graph.Optimiser.Operator.Symbol.Shape.Type)

Module Shape.Type

module Device : sig ... end
and block = - Make_Nested(Owl_computation_engine.Make_Graph(Owl_computation_cpu_device.Make(A))).Graph.Optimiser.Operator.Symbol.Shape.Type.block = - {
  1. size : int;
  2. block_id : int;
  3. mutable active : t option;
  4. mutable memory : Device.value;
  5. mutable nodes : t list;
}
and attr = - Make_Nested(Owl_computation_engine.Make_Graph(Owl_computation_cpu_device.Make(A))).Graph.Optimiser.Operator.Symbol.Shape.Type.attr = - {
  1. mutable op : op;
  2. mutable freeze : bool;
  3. mutable reuse : bool;
  4. mutable state : state;
  5. mutable shape : int array option array;
  6. mutable value : Device.value array;
  7. mutable block : block array option;
}
and op = - Make_Nested(Owl_computation_engine.Make_Graph(Owl_computation_cpu_device.Make(A))).Graph.Optimiser.Operator.Symbol.Shape.Type.op = -
  1. | Noop
  2. | Var
  3. | Const
  4. | Empty of int array
  5. | Zeros of int array
  6. | Ones of int array
  7. | Create of int array
  8. | Sequential of int array
  9. | Uniform of int array
  10. | Gaussian of int array
  11. | Bernoulli of int array
  12. | Init of int array * int -> elt
  13. | Get of int array
  14. | Set of int array
  15. | GetSlice of int list list
  16. | SetSlice of int list list
  17. | GetFancy of Owl_types_common.index list
  18. | SetFancy of Owl_types_common.index list
  19. | Copy
  20. | Reset
  21. | Reshape of int array
  22. | Reverse
  23. | Tile of int array
  24. | Repeat of int array
  25. | Pad of elt * int list list
  26. | Concatenate of int
  27. | Stack of int
  28. | Split of int * int array
  29. | Draw of int * int
  30. | Map of elt -> elt
  31. | Fold of int * elt -> elt -> elt
  32. | Scan of int * elt -> elt -> elt
  33. | OneHot of int
  34. | OfArray of int array
  35. | Delay of Device.A.arr -> Device.A.arr
  36. | DelayArray of int array * Device.A.arr array -> Device.A.arr
  37. | LazyPrint of int option - * int option - * bool option - * (Device.A.elt -> string) option
  38. | Abs
  39. | Neg
  40. | Floor
  41. | Ceil
  42. | Round
  43. | Sqr
  44. | Sqrt
  45. | Log
  46. | Log2
  47. | Log10
  48. | Exp
  49. | Sin
  50. | Cos
  51. | Tan
  52. | Sinh
  53. | Cosh
  54. | Tanh
  55. | Asin
  56. | Acos
  57. | Atan
  58. | Asinh
  59. | Acosh
  60. | Atanh
  61. | Min of bool * int
  62. | Max of bool * int
  63. | Sum of bool * int
  64. | SumReduce of int array
  65. | Signum
  66. | Sigmoid
  67. | Relu
  68. | Dawsn
  69. | Min'
  70. | Max'
  71. | Sum'
  72. | LogSumExp'
  73. | LogSumExp of bool * int
  74. | L1norm'
  75. | L2norm'
  76. | L2NormSqr'
  77. | ClipByValue
  78. | ClipByL2norm
  79. | Pow
  80. | ScalarPow
  81. | PowScalar
  82. | Atan2
  83. | ScalarAtan2
  84. | Atan2Scalar
  85. | Hypot
  86. | Min2
  87. | Max2
  88. | Add
  89. | Sub
  90. | Mul
  91. | Div
  92. | AddScalar
  93. | SubScalar
  94. | MulScalar
  95. | DivScalar
  96. | ScalarAdd
  97. | ScalarSub
  98. | ScalarMul
  99. | ScalarDiv
  100. | FMA
  101. | EltEqual
  102. | EltNotEqual
  103. | EltLess
  104. | EltGreater
  105. | EltLessEqual
  106. | EltGreaterEqual
  107. | EltEqualScalar
  108. | EltNotEqualScalar
  109. | EltLessScalar
  110. | EltGreaterScalar
  111. | EltLessEqualScalar
  112. | EltGreaterEqualScalar
  113. | Conv1d of Owl_types_common.padding * int array
  114. | Conv2d of Owl_types_common.padding * int array
  115. | Conv3d of Owl_types_common.padding * int array
  116. | TransposeConv1d of Owl_types_common.padding * int array
  117. | TransposeConv2d of Owl_types_common.padding * int array
  118. | TransposeConv3d of Owl_types_common.padding * int array
  119. | DilatedConv1d of Owl_types_common.padding * int array * int array
  120. | DilatedConv2d of Owl_types_common.padding * int array * int array
  121. | DilatedConv3d of Owl_types_common.padding * int array * int array
  122. | MaxPool1d of Owl_types_common.padding * int array * int array
  123. | MaxPool2d of Owl_types_common.padding * int array * int array
  124. | MaxPool3d of Owl_types_common.padding * int array * int array
  125. | AvgPool1d of Owl_types_common.padding * int array * int array
  126. | AvgPool2d of Owl_types_common.padding * int array * int array
  127. | AvgPool3d of Owl_types_common.padding * int array * int array
  128. | UpSampling2d of int array
  129. | Conv1dBackwardInput of int array
  130. | Conv1dBackwardKernel of int array
  131. | Conv2dBackwardInput of int array
  132. | Conv2dBackwardKernel of int array
  133. | Conv3dBackwardInput of int array
  134. | Conv3dBackwardKernel of int array
  135. | TransposeConv1dBackwardInput of int array
  136. | TransposeConv1dBackwardKernel of int array
  137. | TransposeConv2dBackwardInput of int array
  138. | TransposeConv2dBackwardKernel of int array
  139. | TransposeConv3dBackwardInput of int array
  140. | TransposeConv3dBackwardKernel of int array
  141. | DilatedConv1dBackwardInput of int array * int array
  142. | DilatedConv1dBackwardKernel of int array * int array
  143. | DilatedConv2dBackwardInput of int array * int array
  144. | DilatedConv2dBackwardKernel of int array * int array
  145. | DilatedConv3dBackwardInput of int array * int array
  146. | DilatedConv3dBackwardKernel of int array * int array
  147. | MaxPool1dBackward of Owl_types_common.padding * int array * int array
  148. | MaxPool2dBackward of Owl_types_common.padding * int array * int array
  149. | MaxPool3dBackward of Owl_types_common.padding * int array * int array
  150. | AvgPool1dBackward of Owl_types_common.padding * int array * int array
  151. | AvgPool2dBackward of Owl_types_common.padding * int array * int array
  152. | AvgPool3dBackward of Owl_types_common.padding * int array * int array
  153. | UpSampling2dBackward of int array
  154. | RowNum
  155. | ColNum
  156. | Row
  157. | Rows of int array
  158. | CopyRowTo
  159. | CopyColTo
  160. | Dot of bool * bool * elt * elt
  161. | Inv
  162. | Trace
  163. | Transpose of int array
  164. | ToRows
  165. | OfRows
  166. | Scalar_Add
  167. | Scalar_Sub
  168. | Scalar_Mul
  169. | Scalar_Div
  170. | Scalar_Pow
  171. | Scalar_Atan2
  172. | Scalar_Abs
  173. | Scalar_Neg
  174. | Scalar_Sqr
  175. | Scalar_Sqrt
  176. | Scalar_Exp
  177. | Scalar_Log
  178. | Scalar_Log2
  179. | Scalar_Log10
  180. | Scalar_Signum
  181. | Scalar_Floor
  182. | Scalar_Ceil
  183. | Scalar_Round
  184. | Scalar_Sin
  185. | Scalar_Cos
  186. | Scalar_Tan
  187. | Scalar_Sinh
  188. | Scalar_Cosh
  189. | Scalar_Tanh
  190. | Scalar_Asin
  191. | Scalar_Acos
  192. | Scalar_Atan
  193. | Scalar_Asinh
  194. | Scalar_Acosh
  195. | Scalar_Atanh
  196. | Scalar_Relu
  197. | Scalar_Dawsn
  198. | Scalar_Sigmoid
  199. | Fused_Adagrad of float * float
\ No newline at end of file diff --git a/owl-base/Owl_computation_cpu_engine/Make/Graph/Optimiser/Operator/Symbol/Shape/index.html b/owl-base/Owl_computation_cpu_engine/Make/Graph/Optimiser/Operator/Symbol/Shape/index.html deleted file mode 100644 index 481a4b9a3..000000000 --- a/owl-base/Owl_computation_cpu_engine/Make/Graph/Optimiser/Operator/Symbol/Shape/index.html +++ /dev/null @@ -1,5 +0,0 @@ - -Shape (owl-base.Owl_computation_cpu_engine.Make.Graph.Optimiser.Operator.Symbol.Shape)

Module Symbol.Shape

module Type : sig ... end
val infer_shape : - Type.op -> - Type.attr Owl_graph.node array -> - int array option array
\ No newline at end of file diff --git a/owl-base/Owl_computation_cpu_engine/Make/Graph/Optimiser/Operator/Symbol/index.html b/owl-base/Owl_computation_cpu_engine/Make/Graph/Optimiser/Operator/Symbol/index.html deleted file mode 100644 index 447a1730a..000000000 --- a/owl-base/Owl_computation_cpu_engine/Make/Graph/Optimiser/Operator/Symbol/index.html +++ /dev/null @@ -1,28 +0,0 @@ - -Symbol (owl-base.Owl_computation_cpu_engine.Make.Graph.Optimiser.Operator.Symbol)

Module Operator.Symbol

module Shape : sig ... end
val op_to_str : Shape.Type.op -> string
val is_random_variable : Shape.Type.op -> bool
val refnum : 'a Owl_graph.node -> int
val node_shape : Shape.Type.attr Owl_graph.node -> int array
val node_numel : Shape.Type.attr Owl_graph.node -> int
val is_shape_unknown : Shape.Type.attr Owl_graph.node -> bool
val infer_shape_graph : Shape.Type.attr Owl_graph.node array -> unit
val shape_to_str : int array option array -> string
val node_to_str : Shape.Type.attr Owl_graph.node -> string
val node_to_arr : Shape.Type.t -> Shape.Type.arr
val arr_to_node : Shape.Type.arr -> Shape.Type.t
val node_to_elt : Shape.Type.t -> Shape.Type.elt
val elt_to_node : Shape.Type.elt -> Shape.Type.t
val make_node : - ?name:string -> - ?value:Shape.Type.Device.value array -> - ?shape:int array option array -> - ?freeze:bool -> - ?reuse:bool -> - ?state:Shape.Type.state -> - Shape.Type.op -> - Shape.Type.attr Owl_graph.node
val make_then_connect : - ?shape:int array option array -> - Shape.Type.op -> - Shape.Type.attr Owl_graph.node array -> - Shape.Type.attr Owl_graph.node
val var_arr : ?shape:int array -> string -> Shape.Type.arr
val var_elt : string -> Shape.Type.elt
val const_arr : string -> Shape.Type.Device.A.arr -> Shape.Type.arr
val const_elt : string -> Shape.Type.Device.A.elt -> Shape.Type.elt
val new_block_id : unit -> int
val make_empty_block : ?block_id:int -> int -> Shape.Type.block
val make_value_block : - Shape.Type.Device.value -> - Shape.Type.attr Owl_graph.node -> - unit
val add_node_to_block : - Shape.Type.attr Owl_graph.node -> - Shape.Type.block -> - unit
val get_active_node : Shape.Type.block -> Shape.Type.attr Owl_graph.node option
val set_active_node : - Shape.Type.block -> - Shape.Type.attr Owl_graph.node -> - unit
val get_block_id : Shape.Type.attr Owl_graph.node -> int
val set_value : - Shape.Type.attr Owl_graph.node -> - Shape.Type.Device.value array -> - unit
val set_operator : Shape.Type.attr Owl_graph.node -> Shape.Type.op -> unit
val set_reuse : Shape.Type.attr Owl_graph.node -> bool -> unit
val get_reuse : Shape.Type.attr Owl_graph.node -> bool
val is_shared : Shape.Type.attr Owl_graph.node -> bool
val get_shared_nodes : - Shape.Type.attr Owl_graph.node -> - Shape.Type.attr Owl_graph.node array
val is_var : Shape.Type.attr Owl_graph.node -> bool
val is_const : Shape.Type.attr Owl_graph.node -> bool
val is_node_arr : Shape.Type.attr Owl_graph.node -> bool
val is_node_elt : Shape.Type.attr Owl_graph.node -> bool
val is_assigned : Shape.Type.attr Owl_graph.node -> bool
val check_assigned : Shape.Type.attr Owl_graph.node -> unit
val is_valid : Shape.Type.attr Owl_graph.node -> bool
val validate : Shape.Type.attr Owl_graph.node -> unit
val invalidate : Shape.Type.attr Owl_graph.node -> unit
val invalidate_graph : Shape.Type.attr Owl_graph.node -> unit
val is_freeze : Shape.Type.attr Owl_graph.node -> bool
val freeze : Shape.Type.attr Owl_graph.node -> unit
val freeze_descendants : Shape.Type.attr Owl_graph.node array -> unit
val freeze_ancestors : Shape.Type.attr Owl_graph.node array -> unit
val unsafe_assign_arr : Shape.Type.arr -> Shape.Type.Device.A.arr -> unit
val assign_arr : Shape.Type.arr -> Shape.Type.Device.A.arr -> unit
val assign_elt : Shape.Type.elt -> Shape.Type.Device.A.elt -> unit
val float_to_elt : float -> Shape.Type.elt
val elt_to_float : Shape.Type.elt -> float
\ No newline at end of file diff --git a/owl-base/Owl_computation_cpu_engine/Make/Graph/Optimiser/Operator/index.html b/owl-base/Owl_computation_cpu_engine/Make/Graph/Optimiser/Operator/index.html deleted file mode 100644 index b9361f3e6..000000000 --- a/owl-base/Owl_computation_cpu_engine/Make/Graph/Optimiser/Operator/index.html +++ /dev/null @@ -1,420 +0,0 @@ - -Operator (owl-base.Owl_computation_cpu_engine.Make.Graph.Optimiser.Operator)

Module Optimiser.Operator

module Symbol : sig ... end
val empty : int array -> Symbol.Shape.Type.arr
val zeros : int array -> Symbol.Shape.Type.arr
val ones : int array -> Symbol.Shape.Type.arr
val create : int array -> Symbol.Shape.Type.elt -> Symbol.Shape.Type.arr
val sequential : - ?a:Symbol.Shape.Type.elt -> - ?step:Symbol.Shape.Type.elt -> - int array -> - Symbol.Shape.Type.arr
val uniform : - ?a:Symbol.Shape.Type.elt -> - ?b:Symbol.Shape.Type.elt -> - int array -> - Symbol.Shape.Type.arr
val gaussian : - ?mu:Symbol.Shape.Type.elt -> - ?sigma:Symbol.Shape.Type.elt -> - int array -> - Symbol.Shape.Type.arr
val bernoulli : ?p:Symbol.Shape.Type.elt -> int array -> Symbol.Shape.Type.arr
val init : int array -> (int -> Symbol.Shape.Type.elt) -> Symbol.Shape.Type.arr
val init_nd : - int array -> - (int array -> Symbol.Shape.Type.elt) -> - Symbol.Shape.Type.arr
val shape : Symbol.Shape.Type.arr -> int array
val numel : Symbol.Shape.Type.arr -> int
val set : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.elt -> unit
val get_slice : int list list -> Symbol.Shape.Type.arr -> Symbol.Shape.Type.arr
val set_slice : - int list list -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - unit
val set_fancy : - Owl_types.index list -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - unit
val copy_ : out:'a -> 'b -> 'c
val reset : Symbol.Shape.Type.arr -> unit
val reshape : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.arr
val tile : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.arr
val repeat : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.arr
val pad : - ?v:Symbol.Shape.Type.elt -> - int list list -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val expand : ?hi:bool -> Symbol.Shape.Type.arr -> int -> Symbol.Shape.Type.arr
val squeeze : ?axis:int array -> Symbol.Shape.Type.arr -> Symbol.Shape.Type.arr
val concatenate : - ?axis:int -> - Symbol.Shape.Type.arr array -> - Symbol.Shape.Type.arr
val stack : ?axis:int -> Symbol.Shape.Type.arr array -> Symbol.Shape.Type.arr
val split : ?axis:int -> 'a -> 'b -> 'c
val draw : - ?axis:int -> - Symbol.Shape.Type.arr -> - int -> - Symbol.Shape.Type.arr * 'a array
val lazy_print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(Symbol.Shape.Type.Device.A.elt -> string) -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val print : ?max_row:'a -> ?max_col:'b -> ?header:'c -> ?fmt:'d -> 'e -> unit
val min : - ?axis:int -> - ?keep_dims:bool -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val max : - ?axis:int -> - ?keep_dims:bool -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val sum : - ?axis:int -> - ?keep_dims:bool -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val sum_reduce : - ?axis:int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val log_sum_exp : - ?axis:int -> - ?keep_dims:bool -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val elt_less_equal_scalar : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.elt -> - Symbol.Shape.Type.arr
val elt_greater_equal_scalar : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.elt -> - Symbol.Shape.Type.arr
val conv1d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr
val conv2d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr
val conv3d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr
val transpose_conv1d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr
val transpose_conv2d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr
val transpose_conv3d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr
val dilated_conv1d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr
val dilated_conv2d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr
val dilated_conv3d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr
val max_pool1d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr
val max_pool2d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr
val max_pool3d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr
val avg_pool1d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr
val avg_pool2d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr
val avg_pool3d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr
val upsampling2d : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.arr
val conv1d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val conv1d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val conv2d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val conv2d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val conv3d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val conv3d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val transpose_conv1d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val transpose_conv1d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val transpose_conv2d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val transpose_conv2d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val transpose_conv3d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val transpose_conv3d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val dilated_conv1d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val dilated_conv1d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val dilated_conv2d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val dilated_conv2d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val dilated_conv3d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val dilated_conv3d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val max_pool1d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val max_pool2d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val max_pool3d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val avg_pool1d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val avg_pool2d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val avg_pool3d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val upsampling2d_backward : - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val row_num : Symbol.Shape.Type.arr -> int
val col_num : Symbol.Shape.Type.arr -> int
val rows : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.arr
val copy_row_to : Symbol.Shape.Type.arr -> 'a -> 'b -> unit
val copy_col_to : Symbol.Shape.Type.arr -> 'a -> 'b -> unit
val transpose : - ?axis:int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val to_rows : Symbol.Shape.Type.arr -> 'a array
val to_cols : Symbol.Shape.Type.arr -> 'a array
val of_array : - Symbol.Shape.Type.elt array -> - int array -> - Symbol.Shape.Type.arr
val of_arrays : Symbol.Shape.Type.elt array array -> Symbol.Shape.Type.arr
val to_arrays : Symbol.Shape.Type.arr -> Symbol.Shape.Type.elt array array
module Scalar : sig ... end
module Mat : sig ... end
module Linalg : sig ... end
\ No newline at end of file diff --git a/owl-base/Owl_computation_cpu_engine/Make/Graph/Optimiser/index.html b/owl-base/Owl_computation_cpu_engine/Make/Graph/Optimiser/index.html deleted file mode 100644 index 04dd205bd..000000000 --- a/owl-base/Owl_computation_cpu_engine/Make/Graph/Optimiser/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Optimiser (owl-base.Owl_computation_cpu_engine.Make.Graph.Optimiser)

Module Graph.Optimiser

module Operator : sig ... end
val estimate_complexity : 'a Owl_graph.node array -> int * int
val optimise_nodes : - Operator.Symbol.Shape.Type.attr Owl_graph.node array -> - unit
\ No newline at end of file diff --git a/owl-base/Owl_computation_cpu_engine/Make/Graph/index.html b/owl-base/Owl_computation_cpu_engine/Make/Graph/index.html deleted file mode 100644 index 22f045e21..000000000 --- a/owl-base/Owl_computation_cpu_engine/Make/Graph/index.html +++ /dev/null @@ -1,34 +0,0 @@ - -Graph (owl-base.Owl_computation_cpu_engine.Make.Graph)

Module Make.Graph

module Optimiser : sig ... end
val shape_or_value : Optimiser.Operator.Symbol.Shape.Type.t -> string
val graph_to_dot : graph -> string
val graph_to_trace : graph -> string
val save_graph : 'a -> string -> unit
val load_graph : string -> 'a * 'b
val invalidate_rvs : graph -> unit
val is_iopair_safe : 'a Owl_graph.node -> 'a Owl_graph.node -> bool
val update_iopair : graph -> unit
val remove_unused_iopair : - 'a Owl_graph.node array -> - 'b array -> - 'a Owl_graph.node array * 'b array
val optimise : graph -> unit
\ No newline at end of file diff --git a/owl-base/Owl_computation_cpu_engine/Make/argument-1-A/Linalg/index.html b/owl-base/Owl_computation_cpu_engine/Make/argument-1-A/Linalg/index.html deleted file mode 100644 index 3db76c51c..000000000 --- a/owl-base/Owl_computation_cpu_engine/Make/argument-1-A/Linalg/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -Linalg (owl-base.Owl_computation_cpu_engine.Make.A.Linalg)

Module A.Linalg

val inv : arr -> arr
val logdet : arr -> elt
val chol : ?upper:bool -> arr -> arr
val svd : ?thin:bool -> arr -> arr * arr * arr
val qr : arr -> arr * arr
val lq : arr -> arr * arr
val sylvester : arr -> arr -> arr -> arr
val lyapunov : arr -> arr -> arr
val discrete_lyapunov : - ?solver:[ `default | `bilinear | `direct ] -> - arr -> - arr -> - arr
val linsolve : ?trans:bool -> ?typ:[ `n | `u | `l ] -> arr -> arr -> arr
val care : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
val dare : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
\ No newline at end of file diff --git a/owl-base/Owl_computation_cpu_engine/Make/argument-1-A/Mat/index.html b/owl-base/Owl_computation_cpu_engine/Make/argument-1-A/Mat/index.html deleted file mode 100644 index 1fe44100d..000000000 --- a/owl-base/Owl_computation_cpu_engine/Make/argument-1-A/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl-base.Owl_computation_cpu_engine.Make.A.Mat)

Module A.Mat

val diagm : ?k:int -> arr -> arr
val triu : ?k:int -> arr -> arr
val tril : ?k:int -> arr -> arr
val eye : int -> arr
\ No newline at end of file diff --git a/owl-base/Owl_computation_cpu_engine/Make/argument-1-A/Scalar/index.html b/owl-base/Owl_computation_cpu_engine/Make/argument-1-A/Scalar/index.html deleted file mode 100644 index c0797e9df..000000000 --- a/owl-base/Owl_computation_cpu_engine/Make/argument-1-A/Scalar/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Scalar (owl-base.Owl_computation_cpu_engine.Make.A.Scalar)

Module A.Scalar

val add : elt -> elt -> elt
val sub : elt -> elt -> elt
val mul : elt -> elt -> elt
val div : elt -> elt -> elt
val pow : elt -> elt -> elt
val atan2 : elt -> elt -> elt
val abs : elt -> elt
val neg : elt -> elt
val sqr : elt -> elt
val sqrt : elt -> elt
val exp : elt -> elt
val log : elt -> elt
val log2 : elt -> elt
val log10 : elt -> elt
val signum : elt -> elt
val floor : elt -> elt
val ceil : elt -> elt
val round : elt -> elt
val sin : elt -> elt
val cos : elt -> elt
val tan : elt -> elt
val sinh : elt -> elt
val cosh : elt -> elt
val tanh : elt -> elt
val asin : elt -> elt
val acos : elt -> elt
val atan : elt -> elt
val asinh : elt -> elt
val acosh : elt -> elt
val atanh : elt -> elt
val relu : elt -> elt
val dawsn : elt -> elt
val sigmoid : elt -> elt
\ No newline at end of file diff --git a/owl-base/Owl_computation_cpu_engine/Make/argument-1-A/index.html b/owl-base/Owl_computation_cpu_engine/Make/argument-1-A/index.html deleted file mode 100644 index 9adc1b9ec..000000000 --- a/owl-base/Owl_computation_cpu_engine/Make/argument-1-A/index.html +++ /dev/null @@ -1,379 +0,0 @@ - -A (owl-base.Owl_computation_cpu_engine.Make.A)

Parameter Make.A

include Owl_types_ndarray_mutable.Sig
include Owl_types_ndarray_algodiff.Sig
include Owl_types_ndarray_eltcmp.Sig
include Owl_types_ndarray_basic.Sig
type arr
type elt
val empty : int array -> arr
val zeros : int array -> arr
val ones : int array -> arr
val create : int array -> elt -> arr
val sequential : ?a:elt -> ?step:elt -> int array -> arr
val uniform : ?a:elt -> ?b:elt -> int array -> arr
val gaussian : ?mu:elt -> ?sigma:elt -> int array -> arr
val bernoulli : ?p:elt -> int array -> arr
val init : int array -> (int -> elt) -> arr
val init_nd : int array -> (int array -> elt) -> arr
val shape : arr -> int array
val numel : arr -> int
val get : arr -> int array -> elt
val set : arr -> int array -> elt -> unit
val get_slice : int list list -> arr -> arr
val set_slice : int list list -> arr -> arr -> unit
val get_fancy : Owl_types_common.index list -> arr -> arr
val set_fancy : Owl_types_common.index list -> arr -> arr -> unit
val copy : arr -> arr
val copy_ : out:arr -> arr -> unit
val reset : arr -> unit
val reshape : arr -> int array -> arr
val reverse : arr -> arr
val tile : arr -> int array -> arr
val repeat : arr -> int array -> arr
val concatenate : ?axis:int -> arr array -> arr
val stack : ?axis:int -> arr array -> arr
val split : ?axis:int -> int array -> arr -> arr array
val expand : ?hi:bool -> arr -> int -> arr
val squeeze : ?axis:int array -> arr -> arr
val draw : ?axis:int -> arr -> int -> arr * int array
val map : (elt -> elt) -> arr -> arr
val fold : ?axis:int -> (elt -> elt -> elt) -> elt -> arr -> arr
val scan : ?axis:int -> (elt -> elt -> elt) -> arr -> arr
val one_hot : int -> arr -> arr
val pad : ?v:elt -> int list list -> arr -> arr
val print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(elt -> string) -> - arr -> - unit
val abs : arr -> arr
val neg : arr -> arr
val floor : arr -> arr
val ceil : arr -> arr
val round : arr -> arr
val sqr : arr -> arr
val sqrt : arr -> arr
val log : arr -> arr
val log2 : arr -> arr
val log10 : arr -> arr
val exp : arr -> arr
val sin : arr -> arr
val cos : arr -> arr
val tan : arr -> arr
val sinh : arr -> arr
val cosh : arr -> arr
val tanh : arr -> arr
val asin : arr -> arr
val acos : arr -> arr
val atan : arr -> arr
val asinh : arr -> arr
val acosh : arr -> arr
val atanh : arr -> arr
val sum_reduce : ?axis:int array -> arr -> arr
val signum : arr -> arr
val sigmoid : arr -> arr
val relu : arr -> arr
val dawsn : arr -> arr
val min' : arr -> elt
val max' : arr -> elt
val sum' : arr -> elt
val log_sum_exp' : arr -> elt
val log_sum_exp : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm' : arr -> elt
val l2norm' : arr -> elt
val l2norm_sqr' : arr -> elt
val clip_by_value : ?amin:elt -> ?amax:elt -> arr -> arr
val clip_by_l2norm : elt -> arr -> arr
val pow : arr -> arr -> arr
val scalar_pow : elt -> arr -> arr
val pow_scalar : arr -> elt -> arr
val atan2 : arr -> arr -> arr
val scalar_atan2 : elt -> arr -> arr
val atan2_scalar : arr -> elt -> arr
val add : arr -> arr -> arr
val sub : arr -> arr -> arr
val mul : arr -> arr -> arr
val div : arr -> arr -> arr
val add_scalar : arr -> elt -> arr
val sub_scalar : arr -> elt -> arr
val mul_scalar : arr -> elt -> arr
val div_scalar : arr -> elt -> arr
val scalar_add : elt -> arr -> arr
val scalar_sub : elt -> arr -> arr
val scalar_mul : elt -> arr -> arr
val scalar_div : elt -> arr -> arr
val fma : arr -> arr -> arr -> arr
val conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val dilated_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val transpose_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val max_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val upsampling2d : arr -> int array -> arr
val conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val dilated_conv1d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv1d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val transpose_conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val max_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val upsampling2d_backward : arr -> int array -> arr -> arr
val row_num : arr -> int
val col_num : arr -> int
val row : arr -> int -> arr
val rows : arr -> int array -> arr
val copy_row_to : arr -> arr -> int -> unit
val copy_col_to : arr -> arr -> int -> unit
val dot : arr -> arr -> arr
val diag : ?k:int -> arr -> arr
val trace : arr -> elt
val transpose : ?axis:int array -> arr -> arr
val to_rows : arr -> arr array
val of_rows : arr array -> arr
val to_cols : arr -> arr array
val of_cols : arr array -> arr
val of_array : elt array -> int array -> arr
val of_arrays : elt array array -> arr
val float_to_elt : float -> elt
val elt_to_float : elt -> float
val elt_equal : arr -> arr -> arr
val elt_not_equal : arr -> arr -> arr
val elt_less : arr -> arr -> arr
val elt_greater : arr -> arr -> arr
val elt_less_equal : arr -> arr -> arr
val elt_greater_equal : arr -> arr -> arr
val elt_equal_scalar : arr -> elt -> arr
val elt_not_equal_scalar : arr -> elt -> arr
val elt_less_scalar : arr -> elt -> arr
val elt_greater_scalar : arr -> elt -> arr
val elt_less_equal_scalar : arr -> elt -> arr
val elt_greater_equal_scalar : arr -> elt -> arr
module Scalar : sig ... end
module Mat : sig ... end
module Linalg : sig ... end
val create_ : out:arr -> elt -> unit
val uniform_ : ?a:elt -> ?b:elt -> out:arr -> unit
val gaussian_ : ?mu:elt -> ?sigma:elt -> out:arr -> unit
val sequential_ : ?a:elt -> ?step:elt -> out:arr -> unit
val bernoulli_ : ?p:elt -> out:arr -> unit
val zeros_ : out:arr -> unit
val ones_ : out:arr -> unit
val one_hot_ : out:arr -> int -> arr -> unit
val get_slice_ : out:arr -> int list list -> arr -> unit
val set_slice_ : out:arr -> int list list -> arr -> arr -> unit
val sub_left : arr -> int -> int -> arr
val reshape_ : out:arr -> arr -> unit
val reverse_ : out:arr -> arr -> unit
val transpose_ : out:arr -> ?axis:int array -> arr -> unit
val repeat_ : out:arr -> arr -> int array -> unit
val tile_ : out:arr -> arr -> int array -> unit
val pad_ : out:arr -> ?v:elt -> int list list -> arr -> unit
val hypot : arr -> arr -> arr
val fmod : arr -> arr -> arr
val min2 : arr -> arr -> arr
val max2 : arr -> arr -> arr
val add_ : ?out:arr -> arr -> arr -> unit
val sub_ : ?out:arr -> arr -> arr -> unit
val mul_ : ?out:arr -> arr -> arr -> unit
val div_ : ?out:arr -> arr -> arr -> unit
val pow_ : ?out:arr -> arr -> arr -> unit
val atan2_ : ?out:arr -> arr -> arr -> unit
val hypot_ : ?out:arr -> arr -> arr -> unit
val fmod_ : ?out:arr -> arr -> arr -> unit
val min2_ : ?out:arr -> arr -> arr -> unit
val max2_ : ?out:arr -> arr -> arr -> unit
val add_scalar_ : ?out:arr -> arr -> elt -> unit
val sub_scalar_ : ?out:arr -> arr -> elt -> unit
val mul_scalar_ : ?out:arr -> arr -> elt -> unit
val div_scalar_ : ?out:arr -> arr -> elt -> unit
val pow_scalar_ : ?out:arr -> arr -> elt -> unit
val atan2_scalar_ : ?out:arr -> arr -> elt -> unit
val fmod_scalar_ : ?out:arr -> arr -> elt -> unit
val scalar_add_ : ?out:arr -> elt -> arr -> unit
val scalar_sub_ : ?out:arr -> elt -> arr -> unit
val scalar_mul_ : ?out:arr -> elt -> arr -> unit
val scalar_div_ : ?out:arr -> elt -> arr -> unit
val scalar_pow_ : ?out:arr -> elt -> arr -> unit
val scalar_atan2_ : ?out:arr -> elt -> arr -> unit
val scalar_fmod_ : ?out:arr -> elt -> arr -> unit
val fma_ : ?out:arr -> arr -> arr -> arr -> unit
val clip_by_value_ : ?out:arr -> ?amin:elt -> ?amax:elt -> arr -> unit
val clip_by_l2norm_ : ?out:arr -> elt -> arr -> unit
val dot_ : - ?transa:bool -> - ?transb:bool -> - ?alpha:elt -> - ?beta:elt -> - c:arr -> - arr -> - arr -> - unit
val abs_ : ?out:arr -> arr -> unit
val neg_ : ?out:arr -> arr -> unit
val conj_ : ?out:arr -> arr -> unit
val reci_ : ?out:arr -> arr -> unit
val signum_ : ?out:arr -> arr -> unit
val sqr_ : ?out:arr -> arr -> unit
val sqrt_ : ?out:arr -> arr -> unit
val cbrt_ : ?out:arr -> arr -> unit
val exp_ : ?out:arr -> arr -> unit
val exp2_ : ?out:arr -> arr -> unit
val exp10_ : ?out:arr -> arr -> unit
val expm1_ : ?out:arr -> arr -> unit
val log_ : ?out:arr -> arr -> unit
val log2_ : ?out:arr -> arr -> unit
val log10_ : ?out:arr -> arr -> unit
val log1p_ : ?out:arr -> arr -> unit
val sin_ : ?out:arr -> arr -> unit
val cos_ : ?out:arr -> arr -> unit
val tan_ : ?out:arr -> arr -> unit
val asin_ : ?out:arr -> arr -> unit
val acos_ : ?out:arr -> arr -> unit
val atan_ : ?out:arr -> arr -> unit
val sinh_ : ?out:arr -> arr -> unit
val cosh_ : ?out:arr -> arr -> unit
val tanh_ : ?out:arr -> arr -> unit
val asinh_ : ?out:arr -> arr -> unit
val acosh_ : ?out:arr -> arr -> unit
val atanh_ : ?out:arr -> arr -> unit
val floor_ : ?out:arr -> arr -> unit
val ceil_ : ?out:arr -> arr -> unit
val round_ : ?out:arr -> arr -> unit
val trunc_ : ?out:arr -> arr -> unit
val fix_ : ?out:arr -> arr -> unit
val erf_ : ?out:arr -> arr -> unit
val erfc_ : ?out:arr -> arr -> unit
val relu_ : ?out:arr -> arr -> unit
val softplus_ : ?out:arr -> arr -> unit
val softsign_ : ?out:arr -> arr -> unit
val softmax_ : ?out:arr -> ?axis:int -> arr -> unit
val sigmoid_ : ?out:arr -> arr -> unit
val sum_ : out:arr -> axis:int -> arr -> unit
val min_ : out:arr -> axis:int -> arr -> unit
val max_ : out:arr -> axis:int -> arr -> unit
val sum : ?axis:int -> ?keep_dims:bool -> arr -> arr
val prod : ?axis:int -> ?keep_dims:bool -> arr -> arr
val min : ?axis:int -> ?keep_dims:bool -> arr -> arr
val max : ?axis:int -> ?keep_dims:bool -> arr -> arr
val mean : ?axis:int -> ?keep_dims:bool -> arr -> arr
val var : ?axis:int -> ?keep_dims:bool -> arr -> arr
val std : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l2norm : ?axis:int -> ?keep_dims:bool -> arr -> arr
val cumsum_ : ?out:arr -> ?axis:int -> arr -> unit
val cumprod_ : ?out:arr -> ?axis:int -> arr -> unit
val cummin_ : ?out:arr -> ?axis:int -> arr -> unit
val cummax_ : ?out:arr -> ?axis:int -> arr -> unit
val dropout_ : ?out:arr -> ?rate:float -> arr -> unit
val prod' : arr -> elt
val mean' : arr -> elt
val var' : arr -> elt
val std' : arr -> elt
val elt_equal_ : ?out:arr -> arr -> arr -> unit
val elt_not_equal_ : ?out:arr -> arr -> arr -> unit
val elt_less_ : ?out:arr -> arr -> arr -> unit
val elt_greater_ : ?out:arr -> arr -> arr -> unit
val elt_less_equal_ : ?out:arr -> arr -> arr -> unit
val elt_greater_equal_ : ?out:arr -> arr -> arr -> unit
val elt_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_not_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_less_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_greater_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_less_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_greater_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val conv1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val conv2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val conv3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val dilated_conv1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val dilated_conv2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val dilated_conv3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val transpose_conv1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val transpose_conv2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val transpose_conv3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val max_pool1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val max_pool2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val max_pool3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val upsampling2d_ : out:arr -> arr -> int array -> unit
val conv1d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv1d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv2d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv2d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv3d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv3d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val dilated_conv1d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv1d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv2d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv2d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv3d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv3d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val transpose_conv1d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv1d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv2d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv2d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv3d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv3d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val max_pool1d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val max_pool2d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val max_pool3d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool1d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool2d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool3d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val upsampling2d_backward_ : out:arr -> arr -> int array -> arr -> unit
val fused_adagrad_ : ?out:arr -> rate:float -> eps:float -> arr -> unit
\ No newline at end of file diff --git a/owl-base/Owl_computation_cpu_engine/Make/index.html b/owl-base/Owl_computation_cpu_engine/Make/index.html deleted file mode 100644 index 87cf03755..000000000 --- a/owl-base/Owl_computation_cpu_engine/Make/index.html +++ /dev/null @@ -1,500 +0,0 @@ - -Make (owl-base.Owl_computation_cpu_engine.Make)

Module Owl_computation_cpu_engine.Make

Parameters

Signature

include sig ... end
module Graph : sig ... end
val eval_graph : Graph.graph -> unit
module Optimiser = Graph.Optimiser
val shape_or_value : Optimiser.Operator.Symbol.Shape.Type.t -> string
val graph_to_dot : graph -> string
val graph_to_trace : graph -> string
val save_graph : 'a -> string -> unit
val load_graph : string -> 'a * 'b
val invalidate_rvs : graph -> unit
val is_iopair_safe : 'a Owl_graph.node -> 'a Owl_graph.node -> bool
val update_iopair : graph -> unit
val remove_unused_iopair : - 'a Owl_graph.node array -> - 'b array -> - 'a Owl_graph.node array * 'b array
val optimise : graph -> unit
module Operator = Graph.Optimiser.Operator
val estimate_complexity : 'a Owl_graph.node array -> int * int
val optimise_nodes : - Operator.Symbol.Shape.Type.attr Owl_graph.node array -> - unit
val empty : int array -> Symbol.Shape.Type.arr
val zeros : int array -> Symbol.Shape.Type.arr
val ones : int array -> Symbol.Shape.Type.arr
val create : int array -> Symbol.Shape.Type.elt -> Symbol.Shape.Type.arr
val sequential : - ?a:Symbol.Shape.Type.elt -> - ?step:Symbol.Shape.Type.elt -> - int array -> - Symbol.Shape.Type.arr
val uniform : - ?a:Symbol.Shape.Type.elt -> - ?b:Symbol.Shape.Type.elt -> - int array -> - Symbol.Shape.Type.arr
val gaussian : - ?mu:Symbol.Shape.Type.elt -> - ?sigma:Symbol.Shape.Type.elt -> - int array -> - Symbol.Shape.Type.arr
val bernoulli : ?p:Symbol.Shape.Type.elt -> int array -> Symbol.Shape.Type.arr
val init : int array -> (int -> Symbol.Shape.Type.elt) -> Symbol.Shape.Type.arr
val init_nd : - int array -> - (int array -> Symbol.Shape.Type.elt) -> - Symbol.Shape.Type.arr
val shape : Symbol.Shape.Type.arr -> int array
val numel : Symbol.Shape.Type.arr -> int
val set : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.elt -> unit
val get_slice : int list list -> Symbol.Shape.Type.arr -> Symbol.Shape.Type.arr
val set_slice : - int list list -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - unit
val set_fancy : - Owl_types.index list -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - unit
val copy_ : out:'a -> 'b -> 'c
val reset : Symbol.Shape.Type.arr -> unit
val reshape : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.arr
val tile : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.arr
val repeat : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.arr
val pad : - ?v:Symbol.Shape.Type.elt -> - int list list -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val expand : ?hi:bool -> Symbol.Shape.Type.arr -> int -> Symbol.Shape.Type.arr
val squeeze : ?axis:int array -> Symbol.Shape.Type.arr -> Symbol.Shape.Type.arr
val concatenate : - ?axis:int -> - Symbol.Shape.Type.arr array -> - Symbol.Shape.Type.arr
val stack : ?axis:int -> Symbol.Shape.Type.arr array -> Symbol.Shape.Type.arr
val split : ?axis:int -> 'a -> 'b -> 'c
val draw : - ?axis:int -> - Symbol.Shape.Type.arr -> - int -> - Symbol.Shape.Type.arr * 'a array
val lazy_print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(Symbol.Shape.Type.Device.A.elt -> string) -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val print : ?max_row:'a -> ?max_col:'b -> ?header:'c -> ?fmt:'d -> 'e -> unit
val min : - ?axis:int -> - ?keep_dims:bool -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val max : - ?axis:int -> - ?keep_dims:bool -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val sum : - ?axis:int -> - ?keep_dims:bool -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val sum_reduce : - ?axis:int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val log_sum_exp : - ?axis:int -> - ?keep_dims:bool -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val elt_less_equal_scalar : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.elt -> - Symbol.Shape.Type.arr
val elt_greater_equal_scalar : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.elt -> - Symbol.Shape.Type.arr
val conv1d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr
val conv2d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr
val conv3d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr
val transpose_conv1d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr
val transpose_conv2d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr
val transpose_conv3d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr
val dilated_conv1d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr
val dilated_conv2d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr
val dilated_conv3d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr
val max_pool1d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr
val max_pool2d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr
val max_pool3d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr
val avg_pool1d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr
val avg_pool2d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr
val avg_pool3d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr
val upsampling2d : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.arr
val conv1d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val conv1d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val conv2d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val conv2d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val conv3d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val conv3d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val transpose_conv1d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val transpose_conv1d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val transpose_conv2d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val transpose_conv2d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val transpose_conv3d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val transpose_conv3d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val dilated_conv1d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val dilated_conv1d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val dilated_conv2d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val dilated_conv2d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val dilated_conv3d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val dilated_conv3d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val max_pool1d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val max_pool2d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val max_pool3d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val avg_pool1d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val avg_pool2d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val avg_pool3d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val upsampling2d_backward : - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val row_num : Symbol.Shape.Type.arr -> int
val col_num : Symbol.Shape.Type.arr -> int
val rows : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.arr
val copy_row_to : Symbol.Shape.Type.arr -> 'a -> 'b -> unit
val copy_col_to : Symbol.Shape.Type.arr -> 'a -> 'b -> unit
val transpose : - ?axis:int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val to_rows : Symbol.Shape.Type.arr -> 'a array
val to_cols : Symbol.Shape.Type.arr -> 'a array
val of_array : - Symbol.Shape.Type.elt array -> - int array -> - Symbol.Shape.Type.arr
val of_arrays : Symbol.Shape.Type.elt array array -> Symbol.Shape.Type.arr
val to_arrays : Symbol.Shape.Type.arr -> Symbol.Shape.Type.elt array array
val op_to_str : Shape.Type.op -> string
val is_random_variable : Shape.Type.op -> bool
val refnum : 'a Owl_graph.node -> int
val node_shape : Shape.Type.attr Owl_graph.node -> int array
val node_numel : Shape.Type.attr Owl_graph.node -> int
val is_shape_unknown : Shape.Type.attr Owl_graph.node -> bool
val infer_shape_graph : Shape.Type.attr Owl_graph.node array -> unit
val shape_to_str : int array option array -> string
val node_to_str : Shape.Type.attr Owl_graph.node -> string
val node_to_arr : Shape.Type.t -> Shape.Type.arr
val arr_to_node : Shape.Type.arr -> Shape.Type.t
val node_to_elt : Shape.Type.t -> Shape.Type.elt
val elt_to_node : Shape.Type.elt -> Shape.Type.t
val make_node : - ?name:string -> - ?value:Shape.Type.Device.value array -> - ?shape:int array option array -> - ?freeze:bool -> - ?reuse:bool -> - ?state:Shape.Type.state -> - Shape.Type.op -> - Shape.Type.attr Owl_graph.node
val make_then_connect : - ?shape:int array option array -> - Shape.Type.op -> - Shape.Type.attr Owl_graph.node array -> - Shape.Type.attr Owl_graph.node
val var_arr : ?shape:int array -> string -> Shape.Type.arr
val var_elt : string -> Shape.Type.elt
val const_arr : string -> Shape.Type.Device.A.arr -> Shape.Type.arr
val const_elt : string -> Shape.Type.Device.A.elt -> Shape.Type.elt
val new_block_id : unit -> int
val make_empty_block : ?block_id:int -> int -> Shape.Type.block
val make_value_block : - Shape.Type.Device.value -> - Shape.Type.attr Owl_graph.node -> - unit
val add_node_to_block : - Shape.Type.attr Owl_graph.node -> - Shape.Type.block -> - unit
val get_active_node : Shape.Type.block -> Shape.Type.attr Owl_graph.node option
val set_active_node : - Shape.Type.block -> - Shape.Type.attr Owl_graph.node -> - unit
val get_block_id : Shape.Type.attr Owl_graph.node -> int
val set_value : - Shape.Type.attr Owl_graph.node -> - Shape.Type.Device.value array -> - unit
val set_operator : Shape.Type.attr Owl_graph.node -> Shape.Type.op -> unit
val set_reuse : Shape.Type.attr Owl_graph.node -> bool -> unit
val get_reuse : Shape.Type.attr Owl_graph.node -> bool
val is_shared : Shape.Type.attr Owl_graph.node -> bool
val get_shared_nodes : - Shape.Type.attr Owl_graph.node -> - Shape.Type.attr Owl_graph.node array
val is_var : Shape.Type.attr Owl_graph.node -> bool
val is_const : Shape.Type.attr Owl_graph.node -> bool
val is_node_arr : Shape.Type.attr Owl_graph.node -> bool
val is_node_elt : Shape.Type.attr Owl_graph.node -> bool
val is_assigned : Shape.Type.attr Owl_graph.node -> bool
val check_assigned : Shape.Type.attr Owl_graph.node -> unit
val is_valid : Shape.Type.attr Owl_graph.node -> bool
val validate : Shape.Type.attr Owl_graph.node -> unit
val invalidate : Shape.Type.attr Owl_graph.node -> unit
val invalidate_graph : Shape.Type.attr Owl_graph.node -> unit
val is_freeze : Shape.Type.attr Owl_graph.node -> bool
val freeze : Shape.Type.attr Owl_graph.node -> unit
val freeze_descendants : Shape.Type.attr Owl_graph.node array -> unit
val freeze_ancestors : Shape.Type.attr Owl_graph.node array -> unit
val unsafe_assign_arr : Shape.Type.arr -> Shape.Type.Device.A.arr -> unit
val assign_arr : Shape.Type.arr -> Shape.Type.Device.A.arr -> unit
val assign_elt : Shape.Type.elt -> Shape.Type.Device.A.elt -> unit
val float_to_elt : float -> Shape.Type.elt
val elt_to_float : Shape.Type.elt -> float
val infer_shape : - Type.op -> - Type.attr Owl_graph.node array -> - int array option array
and block = - Make_Nested(Owl_computation_engine.Make_Graph(Owl_computation_cpu_device.Make(A))).Graph.Optimiser.Operator.Symbol.Shape.Type.block = - {
  1. size : int;
  2. block_id : int;
  3. mutable active : t option;
  4. mutable memory : Device.value;
  5. mutable nodes : t list;
}
and attr = - Make_Nested(Owl_computation_engine.Make_Graph(Owl_computation_cpu_device.Make(A))).Graph.Optimiser.Operator.Symbol.Shape.Type.attr = - {
  1. mutable op : op;
  2. mutable freeze : bool;
  3. mutable reuse : bool;
  4. mutable state : state;
  5. mutable shape : int array option array;
  6. mutable value : Device.value array;
  7. mutable block : block array option;
}
and op = - Make_Nested(Owl_computation_engine.Make_Graph(Owl_computation_cpu_device.Make(A))).Graph.Optimiser.Operator.Symbol.Shape.Type.op = -
  1. | Noop
  2. | Var
  3. | Const
  4. | Empty of int array
  5. | Zeros of int array
  6. | Ones of int array
  7. | Create of int array
  8. | Sequential of int array
  9. | Uniform of int array
  10. | Gaussian of int array
  11. | Bernoulli of int array
  12. | Init of int array * int -> elt
  13. | Get of int array
  14. | Set of int array
  15. | GetSlice of int list list
  16. | SetSlice of int list list
  17. | GetFancy of Owl_types_common.index list
  18. | SetFancy of Owl_types_common.index list
  19. | Copy
  20. | Reset
  21. | Reshape of int array
  22. | Reverse
  23. | Tile of int array
  24. | Repeat of int array
  25. | Pad of elt * int list list
  26. | Concatenate of int
  27. | Stack of int
  28. | Split of int * int array
  29. | Draw of int * int
  30. | Map of elt -> elt
  31. | Fold of int * elt -> elt -> elt
  32. | Scan of int * elt -> elt -> elt
  33. | OneHot of int
  34. | OfArray of int array
  35. | Delay of Device.A.arr -> Device.A.arr
  36. | DelayArray of int array * Device.A.arr array -> Device.A.arr
  37. | LazyPrint of int option - * int option - * bool option - * (Device.A.elt -> string) option
  38. | Abs
  39. | Neg
  40. | Floor
  41. | Ceil
  42. | Round
  43. | Sqr
  44. | Sqrt
  45. | Log
  46. | Log2
  47. | Log10
  48. | Exp
  49. | Sin
  50. | Cos
  51. | Tan
  52. | Sinh
  53. | Cosh
  54. | Tanh
  55. | Asin
  56. | Acos
  57. | Atan
  58. | Asinh
  59. | Acosh
  60. | Atanh
  61. | Min of bool * int
  62. | Max of bool * int
  63. | Sum of bool * int
  64. | SumReduce of int array
  65. | Signum
  66. | Sigmoid
  67. | Relu
  68. | Dawsn
  69. | Min'
  70. | Max'
  71. | Sum'
  72. | LogSumExp'
  73. | LogSumExp of bool * int
  74. | L1norm'
  75. | L2norm'
  76. | L2NormSqr'
  77. | ClipByValue
  78. | ClipByL2norm
  79. | Pow
  80. | ScalarPow
  81. | PowScalar
  82. | Atan2
  83. | ScalarAtan2
  84. | Atan2Scalar
  85. | Hypot
  86. | Min2
  87. | Max2
  88. | Add
  89. | Sub
  90. | Mul
  91. | Div
  92. | AddScalar
  93. | SubScalar
  94. | MulScalar
  95. | DivScalar
  96. | ScalarAdd
  97. | ScalarSub
  98. | ScalarMul
  99. | ScalarDiv
  100. | FMA
  101. | EltEqual
  102. | EltNotEqual
  103. | EltLess
  104. | EltGreater
  105. | EltLessEqual
  106. | EltGreaterEqual
  107. | EltEqualScalar
  108. | EltNotEqualScalar
  109. | EltLessScalar
  110. | EltGreaterScalar
  111. | EltLessEqualScalar
  112. | EltGreaterEqualScalar
  113. | Conv1d of Owl_types_common.padding * int array
  114. | Conv2d of Owl_types_common.padding * int array
  115. | Conv3d of Owl_types_common.padding * int array
  116. | TransposeConv1d of Owl_types_common.padding * int array
  117. | TransposeConv2d of Owl_types_common.padding * int array
  118. | TransposeConv3d of Owl_types_common.padding * int array
  119. | DilatedConv1d of Owl_types_common.padding * int array * int array
  120. | DilatedConv2d of Owl_types_common.padding * int array * int array
  121. | DilatedConv3d of Owl_types_common.padding * int array * int array
  122. | MaxPool1d of Owl_types_common.padding * int array * int array
  123. | MaxPool2d of Owl_types_common.padding * int array * int array
  124. | MaxPool3d of Owl_types_common.padding * int array * int array
  125. | AvgPool1d of Owl_types_common.padding * int array * int array
  126. | AvgPool2d of Owl_types_common.padding * int array * int array
  127. | AvgPool3d of Owl_types_common.padding * int array * int array
  128. | UpSampling2d of int array
  129. | Conv1dBackwardInput of int array
  130. | Conv1dBackwardKernel of int array
  131. | Conv2dBackwardInput of int array
  132. | Conv2dBackwardKernel of int array
  133. | Conv3dBackwardInput of int array
  134. | Conv3dBackwardKernel of int array
  135. | TransposeConv1dBackwardInput of int array
  136. | TransposeConv1dBackwardKernel of int array
  137. | TransposeConv2dBackwardInput of int array
  138. | TransposeConv2dBackwardKernel of int array
  139. | TransposeConv3dBackwardInput of int array
  140. | TransposeConv3dBackwardKernel of int array
  141. | DilatedConv1dBackwardInput of int array * int array
  142. | DilatedConv1dBackwardKernel of int array * int array
  143. | DilatedConv2dBackwardInput of int array * int array
  144. | DilatedConv2dBackwardKernel of int array * int array
  145. | DilatedConv3dBackwardInput of int array * int array
  146. | DilatedConv3dBackwardKernel of int array * int array
  147. | MaxPool1dBackward of Owl_types_common.padding * int array * int array
  148. | MaxPool2dBackward of Owl_types_common.padding * int array * int array
  149. | MaxPool3dBackward of Owl_types_common.padding * int array * int array
  150. | AvgPool1dBackward of Owl_types_common.padding * int array * int array
  151. | AvgPool2dBackward of Owl_types_common.padding * int array * int array
  152. | AvgPool3dBackward of Owl_types_common.padding * int array * int array
  153. | UpSampling2dBackward of int array
  154. | RowNum
  155. | ColNum
  156. | Row
  157. | Rows of int array
  158. | CopyRowTo
  159. | CopyColTo
  160. | Dot of bool * bool * elt * elt
  161. | Inv
  162. | Trace
  163. | Transpose of int array
  164. | ToRows
  165. | OfRows
  166. | Scalar_Add
  167. | Scalar_Sub
  168. | Scalar_Mul
  169. | Scalar_Div
  170. | Scalar_Pow
  171. | Scalar_Atan2
  172. | Scalar_Abs
  173. | Scalar_Neg
  174. | Scalar_Sqr
  175. | Scalar_Sqrt
  176. | Scalar_Exp
  177. | Scalar_Log
  178. | Scalar_Log2
  179. | Scalar_Log10
  180. | Scalar_Signum
  181. | Scalar_Floor
  182. | Scalar_Ceil
  183. | Scalar_Round
  184. | Scalar_Sin
  185. | Scalar_Cos
  186. | Scalar_Tan
  187. | Scalar_Sinh
  188. | Scalar_Cosh
  189. | Scalar_Tanh
  190. | Scalar_Asin
  191. | Scalar_Acos
  192. | Scalar_Atan
  193. | Scalar_Asinh
  194. | Scalar_Acosh
  195. | Scalar_Atanh
  196. | Scalar_Relu
  197. | Scalar_Dawsn
  198. | Scalar_Sigmoid
  199. | Fused_Adagrad of float * float
val make_device : unit -> device
val arr_to_value : A.arr -> value
val value_to_arr : value -> A.arr
val elt_to_value : A.elt -> value
val value_to_elt : value -> A.elt
val value_to_float : value -> float
val is_arr : value -> bool
val is_elt : value -> bool
\ No newline at end of file diff --git a/owl-base/Owl_computation_cpu_engine/Make_Nested/CG_Eval/index.html b/owl-base/Owl_computation_cpu_engine/Make_Nested/CG_Eval/index.html deleted file mode 100644 index bcf385d32..000000000 --- a/owl-base/Owl_computation_cpu_engine/Make_Nested/CG_Eval/index.html +++ /dev/null @@ -1,57 +0,0 @@ - -CG_Eval (owl-base.Owl_computation_cpu_engine.Make_Nested.CG_Eval)

Module Make_Nested.CG_Eval

val invalidate_opt : - Graph.Optimiser.Operator.Symbol.Shape.Type.attr Owl_graph.node option -> - unit
\ No newline at end of file diff --git a/owl-base/Owl_computation_cpu_engine/Make_Nested/CG_Init/MultiMap/index.html b/owl-base/Owl_computation_cpu_engine/Make_Nested/CG_Init/MultiMap/index.html deleted file mode 100644 index e0d8662d2..000000000 --- a/owl-base/Owl_computation_cpu_engine/Make_Nested/CG_Init/MultiMap/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -MultiMap (owl-base.Owl_computation_cpu_engine.Make_Nested.CG_Init.MultiMap)

Module CG_Init.MultiMap

type key = int
val empty : 'a t
val is_empty : 'a t -> bool
val mem : key -> 'a t -> bool
val add : key -> 'a -> 'a t -> 'a t
val remove : key -> 'a t -> 'a t
val find : key -> 'a t -> 'a
val max_binding : 'a t -> key * 'a
val find_first_opt : (key -> bool) -> 'a t -> (key * 'a) option
\ No newline at end of file diff --git a/owl-base/Owl_computation_cpu_engine/Make_Nested/CG_Init/index.html b/owl-base/Owl_computation_cpu_engine/Make_Nested/CG_Init/index.html deleted file mode 100644 index f6da68d7e..000000000 --- a/owl-base/Owl_computation_cpu_engine/Make_Nested/CG_Init/index.html +++ /dev/null @@ -1,13 +0,0 @@ - -CG_Init (owl-base.Owl_computation_cpu_engine.Make_Nested.CG_Init)

Module Make_Nested.CG_Init

\ No newline at end of file diff --git a/owl-base/Owl_computation_cpu_engine/Make_Nested/argument-1-Graph/Optimiser/Operator/Linalg/index.html b/owl-base/Owl_computation_cpu_engine/Make_Nested/argument-1-Graph/Optimiser/Operator/Linalg/index.html deleted file mode 100644 index 2014dcac9..000000000 --- a/owl-base/Owl_computation_cpu_engine/Make_Nested/argument-1-Graph/Optimiser/Operator/Linalg/index.html +++ /dev/null @@ -1,33 +0,0 @@ - -Linalg (owl-base.Owl_computation_cpu_engine.Make_Nested.Graph.Optimiser.Operator.Linalg)

Module Operator.Linalg

\ No newline at end of file diff --git a/owl-base/Owl_computation_cpu_engine/Make_Nested/argument-1-Graph/Optimiser/Operator/Mat/index.html b/owl-base/Owl_computation_cpu_engine/Make_Nested/argument-1-Graph/Optimiser/Operator/Mat/index.html deleted file mode 100644 index 06dd89537..000000000 --- a/owl-base/Owl_computation_cpu_engine/Make_Nested/argument-1-Graph/Optimiser/Operator/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl-base.Owl_computation_cpu_engine.Make_Nested.Graph.Optimiser.Operator.Mat)

Module Operator.Mat

val eye : int -> Symbol.Shape.Type.arr

TODO

TODO

TODO

TODO

\ No newline at end of file diff --git a/owl-base/Owl_computation_cpu_engine/Make_Nested/argument-1-Graph/Optimiser/Operator/Scalar/index.html b/owl-base/Owl_computation_cpu_engine/Make_Nested/argument-1-Graph/Optimiser/Operator/Scalar/index.html deleted file mode 100644 index 97d7fc2c6..000000000 --- a/owl-base/Owl_computation_cpu_engine/Make_Nested/argument-1-Graph/Optimiser/Operator/Scalar/index.html +++ /dev/null @@ -1,20 +0,0 @@ - -Scalar (owl-base.Owl_computation_cpu_engine.Make_Nested.Graph.Optimiser.Operator.Scalar)

Module Operator.Scalar

\ No newline at end of file diff --git a/owl-base/Owl_computation_cpu_engine/Make_Nested/argument-1-Graph/Optimiser/Operator/Symbol/Shape/Type/Device/A/Linalg/index.html b/owl-base/Owl_computation_cpu_engine/Make_Nested/argument-1-Graph/Optimiser/Operator/Symbol/Shape/Type/Device/A/Linalg/index.html deleted file mode 100644 index 8b2f7d8a6..000000000 --- a/owl-base/Owl_computation_cpu_engine/Make_Nested/argument-1-Graph/Optimiser/Operator/Symbol/Shape/Type/Device/A/Linalg/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -Linalg (owl-base.Owl_computation_cpu_engine.Make_Nested.Graph.Optimiser.Operator.Symbol.Shape.Type.Device.A.Linalg)

Module A.Linalg

val inv : arr -> arr
val logdet : arr -> elt
val chol : ?upper:bool -> arr -> arr
val svd : ?thin:bool -> arr -> arr * arr * arr
val qr : arr -> arr * arr
val lq : arr -> arr * arr
val sylvester : arr -> arr -> arr -> arr
val lyapunov : arr -> arr -> arr
val discrete_lyapunov : - ?solver:[ `default | `bilinear | `direct ] -> - arr -> - arr -> - arr
val linsolve : ?trans:bool -> ?typ:[ `n | `u | `l ] -> arr -> arr -> arr
val care : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
val dare : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
\ No newline at end of file diff --git a/owl-base/Owl_computation_cpu_engine/Make_Nested/argument-1-Graph/Optimiser/Operator/Symbol/Shape/Type/Device/A/Mat/index.html b/owl-base/Owl_computation_cpu_engine/Make_Nested/argument-1-Graph/Optimiser/Operator/Symbol/Shape/Type/Device/A/Mat/index.html deleted file mode 100644 index 09e7ac209..000000000 --- a/owl-base/Owl_computation_cpu_engine/Make_Nested/argument-1-Graph/Optimiser/Operator/Symbol/Shape/Type/Device/A/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl-base.Owl_computation_cpu_engine.Make_Nested.Graph.Optimiser.Operator.Symbol.Shape.Type.Device.A.Mat)

Module A.Mat

val diagm : ?k:int -> arr -> arr
val triu : ?k:int -> arr -> arr
val tril : ?k:int -> arr -> arr
val eye : int -> arr
\ No newline at end of file diff --git a/owl-base/Owl_computation_cpu_engine/Make_Nested/argument-1-Graph/Optimiser/Operator/Symbol/Shape/Type/Device/A/Scalar/index.html b/owl-base/Owl_computation_cpu_engine/Make_Nested/argument-1-Graph/Optimiser/Operator/Symbol/Shape/Type/Device/A/Scalar/index.html deleted file mode 100644 index c891dd141..000000000 --- a/owl-base/Owl_computation_cpu_engine/Make_Nested/argument-1-Graph/Optimiser/Operator/Symbol/Shape/Type/Device/A/Scalar/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Scalar (owl-base.Owl_computation_cpu_engine.Make_Nested.Graph.Optimiser.Operator.Symbol.Shape.Type.Device.A.Scalar)

Module A.Scalar

val add : elt -> elt -> elt
val sub : elt -> elt -> elt
val mul : elt -> elt -> elt
val div : elt -> elt -> elt
val pow : elt -> elt -> elt
val atan2 : elt -> elt -> elt
val abs : elt -> elt
val neg : elt -> elt
val sqr : elt -> elt
val sqrt : elt -> elt
val exp : elt -> elt
val log : elt -> elt
val log2 : elt -> elt
val log10 : elt -> elt
val signum : elt -> elt
val floor : elt -> elt
val ceil : elt -> elt
val round : elt -> elt
val sin : elt -> elt
val cos : elt -> elt
val tan : elt -> elt
val sinh : elt -> elt
val cosh : elt -> elt
val tanh : elt -> elt
val asin : elt -> elt
val acos : elt -> elt
val atan : elt -> elt
val asinh : elt -> elt
val acosh : elt -> elt
val atanh : elt -> elt
val relu : elt -> elt
val dawsn : elt -> elt
val sigmoid : elt -> elt
\ No newline at end of file diff --git a/owl-base/Owl_computation_cpu_engine/Make_Nested/argument-1-Graph/Optimiser/Operator/Symbol/Shape/Type/Device/A/index.html b/owl-base/Owl_computation_cpu_engine/Make_Nested/argument-1-Graph/Optimiser/Operator/Symbol/Shape/Type/Device/A/index.html deleted file mode 100644 index e95b4d262..000000000 --- a/owl-base/Owl_computation_cpu_engine/Make_Nested/argument-1-Graph/Optimiser/Operator/Symbol/Shape/Type/Device/A/index.html +++ /dev/null @@ -1,379 +0,0 @@ - -A (owl-base.Owl_computation_cpu_engine.Make_Nested.Graph.Optimiser.Operator.Symbol.Shape.Type.Device.A)

Module Device.A

include Owl_types_ndarray_algodiff.Sig
include Owl_types_ndarray_eltcmp.Sig
include Owl_types_ndarray_basic.Sig
type arr
type elt
val empty : int array -> arr
val zeros : int array -> arr
val ones : int array -> arr
val create : int array -> elt -> arr
val sequential : ?a:elt -> ?step:elt -> int array -> arr
val uniform : ?a:elt -> ?b:elt -> int array -> arr
val gaussian : ?mu:elt -> ?sigma:elt -> int array -> arr
val bernoulli : ?p:elt -> int array -> arr
val init : int array -> (int -> elt) -> arr
val init_nd : int array -> (int array -> elt) -> arr
val shape : arr -> int array
val numel : arr -> int
val get : arr -> int array -> elt
val set : arr -> int array -> elt -> unit
val get_slice : int list list -> arr -> arr
val set_slice : int list list -> arr -> arr -> unit
val get_fancy : Owl_types_common.index list -> arr -> arr
val set_fancy : Owl_types_common.index list -> arr -> arr -> unit
val copy : arr -> arr
val copy_ : out:arr -> arr -> unit
val reset : arr -> unit
val reshape : arr -> int array -> arr
val reverse : arr -> arr
val tile : arr -> int array -> arr
val repeat : arr -> int array -> arr
val concatenate : ?axis:int -> arr array -> arr
val stack : ?axis:int -> arr array -> arr
val split : ?axis:int -> int array -> arr -> arr array
val expand : ?hi:bool -> arr -> int -> arr
val squeeze : ?axis:int array -> arr -> arr
val draw : ?axis:int -> arr -> int -> arr * int array
val map : (elt -> elt) -> arr -> arr
val fold : ?axis:int -> (elt -> elt -> elt) -> elt -> arr -> arr
val scan : ?axis:int -> (elt -> elt -> elt) -> arr -> arr
val one_hot : int -> arr -> arr
val pad : ?v:elt -> int list list -> arr -> arr
val print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(elt -> string) -> - arr -> - unit
val abs : arr -> arr
val neg : arr -> arr
val floor : arr -> arr
val ceil : arr -> arr
val round : arr -> arr
val sqr : arr -> arr
val sqrt : arr -> arr
val log : arr -> arr
val log2 : arr -> arr
val log10 : arr -> arr
val exp : arr -> arr
val sin : arr -> arr
val cos : arr -> arr
val tan : arr -> arr
val sinh : arr -> arr
val cosh : arr -> arr
val tanh : arr -> arr
val asin : arr -> arr
val acos : arr -> arr
val atan : arr -> arr
val asinh : arr -> arr
val acosh : arr -> arr
val atanh : arr -> arr
val sum_reduce : ?axis:int array -> arr -> arr
val signum : arr -> arr
val sigmoid : arr -> arr
val relu : arr -> arr
val dawsn : arr -> arr
val min' : arr -> elt
val max' : arr -> elt
val sum' : arr -> elt
val log_sum_exp' : arr -> elt
val log_sum_exp : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm' : arr -> elt
val l2norm' : arr -> elt
val l2norm_sqr' : arr -> elt
val clip_by_value : ?amin:elt -> ?amax:elt -> arr -> arr
val clip_by_l2norm : elt -> arr -> arr
val pow : arr -> arr -> arr
val scalar_pow : elt -> arr -> arr
val pow_scalar : arr -> elt -> arr
val atan2 : arr -> arr -> arr
val scalar_atan2 : elt -> arr -> arr
val atan2_scalar : arr -> elt -> arr
val add : arr -> arr -> arr
val sub : arr -> arr -> arr
val mul : arr -> arr -> arr
val div : arr -> arr -> arr
val add_scalar : arr -> elt -> arr
val sub_scalar : arr -> elt -> arr
val mul_scalar : arr -> elt -> arr
val div_scalar : arr -> elt -> arr
val scalar_add : elt -> arr -> arr
val scalar_sub : elt -> arr -> arr
val scalar_mul : elt -> arr -> arr
val scalar_div : elt -> arr -> arr
val fma : arr -> arr -> arr -> arr
val conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val dilated_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val transpose_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val max_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val upsampling2d : arr -> int array -> arr
val conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val dilated_conv1d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv1d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val transpose_conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val max_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val upsampling2d_backward : arr -> int array -> arr -> arr
val row_num : arr -> int
val col_num : arr -> int
val row : arr -> int -> arr
val rows : arr -> int array -> arr
val copy_row_to : arr -> arr -> int -> unit
val copy_col_to : arr -> arr -> int -> unit
val dot : arr -> arr -> arr
val diag : ?k:int -> arr -> arr
val trace : arr -> elt
val transpose : ?axis:int array -> arr -> arr
val to_rows : arr -> arr array
val of_rows : arr array -> arr
val to_cols : arr -> arr array
val of_cols : arr array -> arr
val of_array : elt array -> int array -> arr
val of_arrays : elt array array -> arr
val float_to_elt : float -> elt
val elt_to_float : elt -> float
val elt_equal : arr -> arr -> arr
val elt_not_equal : arr -> arr -> arr
val elt_less : arr -> arr -> arr
val elt_greater : arr -> arr -> arr
val elt_less_equal : arr -> arr -> arr
val elt_greater_equal : arr -> arr -> arr
val elt_equal_scalar : arr -> elt -> arr
val elt_not_equal_scalar : arr -> elt -> arr
val elt_less_scalar : arr -> elt -> arr
val elt_greater_scalar : arr -> elt -> arr
val elt_less_equal_scalar : arr -> elt -> arr
val elt_greater_equal_scalar : arr -> elt -> arr
module Scalar : sig ... end
module Mat : sig ... end
module Linalg : sig ... end
val create_ : out:arr -> elt -> unit
val uniform_ : ?a:elt -> ?b:elt -> out:arr -> unit
val gaussian_ : ?mu:elt -> ?sigma:elt -> out:arr -> unit
val sequential_ : ?a:elt -> ?step:elt -> out:arr -> unit
val bernoulli_ : ?p:elt -> out:arr -> unit
val zeros_ : out:arr -> unit
val ones_ : out:arr -> unit
val one_hot_ : out:arr -> int -> arr -> unit
val get_slice_ : out:arr -> int list list -> arr -> unit
val set_slice_ : out:arr -> int list list -> arr -> arr -> unit
val sub_left : arr -> int -> int -> arr
val reshape_ : out:arr -> arr -> unit
val reverse_ : out:arr -> arr -> unit
val transpose_ : out:arr -> ?axis:int array -> arr -> unit
val repeat_ : out:arr -> arr -> int array -> unit
val tile_ : out:arr -> arr -> int array -> unit
val pad_ : out:arr -> ?v:elt -> int list list -> arr -> unit
val hypot : arr -> arr -> arr
val fmod : arr -> arr -> arr
val min2 : arr -> arr -> arr
val max2 : arr -> arr -> arr
val add_ : ?out:arr -> arr -> arr -> unit
val sub_ : ?out:arr -> arr -> arr -> unit
val mul_ : ?out:arr -> arr -> arr -> unit
val div_ : ?out:arr -> arr -> arr -> unit
val pow_ : ?out:arr -> arr -> arr -> unit
val atan2_ : ?out:arr -> arr -> arr -> unit
val hypot_ : ?out:arr -> arr -> arr -> unit
val fmod_ : ?out:arr -> arr -> arr -> unit
val min2_ : ?out:arr -> arr -> arr -> unit
val max2_ : ?out:arr -> arr -> arr -> unit
val add_scalar_ : ?out:arr -> arr -> elt -> unit
val sub_scalar_ : ?out:arr -> arr -> elt -> unit
val mul_scalar_ : ?out:arr -> arr -> elt -> unit
val div_scalar_ : ?out:arr -> arr -> elt -> unit
val pow_scalar_ : ?out:arr -> arr -> elt -> unit
val atan2_scalar_ : ?out:arr -> arr -> elt -> unit
val fmod_scalar_ : ?out:arr -> arr -> elt -> unit
val scalar_add_ : ?out:arr -> elt -> arr -> unit
val scalar_sub_ : ?out:arr -> elt -> arr -> unit
val scalar_mul_ : ?out:arr -> elt -> arr -> unit
val scalar_div_ : ?out:arr -> elt -> arr -> unit
val scalar_pow_ : ?out:arr -> elt -> arr -> unit
val scalar_atan2_ : ?out:arr -> elt -> arr -> unit
val scalar_fmod_ : ?out:arr -> elt -> arr -> unit
val fma_ : ?out:arr -> arr -> arr -> arr -> unit
val clip_by_value_ : ?out:arr -> ?amin:elt -> ?amax:elt -> arr -> unit
val clip_by_l2norm_ : ?out:arr -> elt -> arr -> unit
val dot_ : - ?transa:bool -> - ?transb:bool -> - ?alpha:elt -> - ?beta:elt -> - c:arr -> - arr -> - arr -> - unit
val abs_ : ?out:arr -> arr -> unit
val neg_ : ?out:arr -> arr -> unit
val conj_ : ?out:arr -> arr -> unit
val reci_ : ?out:arr -> arr -> unit
val signum_ : ?out:arr -> arr -> unit
val sqr_ : ?out:arr -> arr -> unit
val sqrt_ : ?out:arr -> arr -> unit
val cbrt_ : ?out:arr -> arr -> unit
val exp_ : ?out:arr -> arr -> unit
val exp2_ : ?out:arr -> arr -> unit
val exp10_ : ?out:arr -> arr -> unit
val expm1_ : ?out:arr -> arr -> unit
val log_ : ?out:arr -> arr -> unit
val log2_ : ?out:arr -> arr -> unit
val log10_ : ?out:arr -> arr -> unit
val log1p_ : ?out:arr -> arr -> unit
val sin_ : ?out:arr -> arr -> unit
val cos_ : ?out:arr -> arr -> unit
val tan_ : ?out:arr -> arr -> unit
val asin_ : ?out:arr -> arr -> unit
val acos_ : ?out:arr -> arr -> unit
val atan_ : ?out:arr -> arr -> unit
val sinh_ : ?out:arr -> arr -> unit
val cosh_ : ?out:arr -> arr -> unit
val tanh_ : ?out:arr -> arr -> unit
val asinh_ : ?out:arr -> arr -> unit
val acosh_ : ?out:arr -> arr -> unit
val atanh_ : ?out:arr -> arr -> unit
val floor_ : ?out:arr -> arr -> unit
val ceil_ : ?out:arr -> arr -> unit
val round_ : ?out:arr -> arr -> unit
val trunc_ : ?out:arr -> arr -> unit
val fix_ : ?out:arr -> arr -> unit
val erf_ : ?out:arr -> arr -> unit
val erfc_ : ?out:arr -> arr -> unit
val relu_ : ?out:arr -> arr -> unit
val softplus_ : ?out:arr -> arr -> unit
val softsign_ : ?out:arr -> arr -> unit
val softmax_ : ?out:arr -> ?axis:int -> arr -> unit
val sigmoid_ : ?out:arr -> arr -> unit
val sum_ : out:arr -> axis:int -> arr -> unit
val min_ : out:arr -> axis:int -> arr -> unit
val max_ : out:arr -> axis:int -> arr -> unit
val sum : ?axis:int -> ?keep_dims:bool -> arr -> arr
val prod : ?axis:int -> ?keep_dims:bool -> arr -> arr
val min : ?axis:int -> ?keep_dims:bool -> arr -> arr
val max : ?axis:int -> ?keep_dims:bool -> arr -> arr
val mean : ?axis:int -> ?keep_dims:bool -> arr -> arr
val var : ?axis:int -> ?keep_dims:bool -> arr -> arr
val std : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l2norm : ?axis:int -> ?keep_dims:bool -> arr -> arr
val cumsum_ : ?out:arr -> ?axis:int -> arr -> unit
val cumprod_ : ?out:arr -> ?axis:int -> arr -> unit
val cummin_ : ?out:arr -> ?axis:int -> arr -> unit
val cummax_ : ?out:arr -> ?axis:int -> arr -> unit
val dropout_ : ?out:arr -> ?rate:float -> arr -> unit
val prod' : arr -> elt
val mean' : arr -> elt
val var' : arr -> elt
val std' : arr -> elt
val elt_equal_ : ?out:arr -> arr -> arr -> unit
val elt_not_equal_ : ?out:arr -> arr -> arr -> unit
val elt_less_ : ?out:arr -> arr -> arr -> unit
val elt_greater_ : ?out:arr -> arr -> arr -> unit
val elt_less_equal_ : ?out:arr -> arr -> arr -> unit
val elt_greater_equal_ : ?out:arr -> arr -> arr -> unit
val elt_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_not_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_less_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_greater_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_less_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_greater_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val conv1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val conv2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val conv3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val dilated_conv1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val dilated_conv2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val dilated_conv3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val transpose_conv1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val transpose_conv2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val transpose_conv3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val max_pool1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val max_pool2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val max_pool3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val upsampling2d_ : out:arr -> arr -> int array -> unit
val conv1d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv1d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv2d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv2d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv3d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv3d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val dilated_conv1d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv1d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv2d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv2d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv3d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv3d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val transpose_conv1d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv1d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv2d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv2d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv3d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv3d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val max_pool1d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val max_pool2d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val max_pool3d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool1d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool2d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool3d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val upsampling2d_backward_ : out:arr -> arr -> int array -> arr -> unit
val fused_adagrad_ : ?out:arr -> rate:float -> eps:float -> arr -> unit
\ No newline at end of file diff --git a/owl-base/Owl_computation_cpu_engine/Make_Nested/argument-1-Graph/Optimiser/Operator/Symbol/Shape/Type/Device/index.html b/owl-base/Owl_computation_cpu_engine/Make_Nested/argument-1-Graph/Optimiser/Operator/Symbol/Shape/Type/Device/index.html deleted file mode 100644 index 3bb779278..000000000 --- a/owl-base/Owl_computation_cpu_engine/Make_Nested/argument-1-Graph/Optimiser/Operator/Symbol/Shape/Type/Device/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Device (owl-base.Owl_computation_cpu_engine.Make_Nested.Graph.Optimiser.Operator.Symbol.Shape.Type.Device)

Module Type.Device

Type definition
type device

TODO

type value

TODO

Core functions
val make_device : unit -> device

TODO

val arr_to_value : A.arr -> value

TODO

val value_to_arr : value -> A.arr

TODO

val elt_to_value : A.elt -> value

TODO

val value_to_elt : value -> A.elt

TODO

val value_to_float : value -> float

TODO

val is_arr : value -> bool

TODO

val is_elt : value -> bool

TODO

\ No newline at end of file diff --git a/owl-base/Owl_computation_cpu_engine/Make_Nested/argument-1-Graph/Optimiser/Operator/Symbol/Shape/Type/index.html b/owl-base/Owl_computation_cpu_engine/Make_Nested/argument-1-Graph/Optimiser/Operator/Symbol/Shape/Type/index.html deleted file mode 100644 index 1477c88b5..000000000 --- a/owl-base/Owl_computation_cpu_engine/Make_Nested/argument-1-Graph/Optimiser/Operator/Symbol/Shape/Type/index.html +++ /dev/null @@ -1,5 +0,0 @@ - -Type (owl-base.Owl_computation_cpu_engine.Make_Nested.Graph.Optimiser.Operator.Symbol.Shape.Type)

Module Shape.Type

Type definition
type state =
  1. | Valid
  2. | Invalid
    (*

    TODO

    *)

TODO

and block = {
  1. size : int;
  2. block_id : int;
  3. mutable active : t option;
  4. mutable memory : Device.value;
  5. mutable nodes : t list;
}

block type keeps a reference to a block of memory and to the nodes sharing that block.

and attr = {
  1. mutable op : op;
  2. mutable freeze : bool;
  3. mutable reuse : bool;
  4. mutable state : state;
  5. mutable shape : int array option array;
  6. mutable value : Device.value array;
  7. mutable block : block array option;
}

TODO

and arr =
  1. | Arr of t
and elt =
  1. | Elt of t
and op =
  1. | Noop
  2. | Var
  3. | Const
  4. | Empty of int array
  5. | Zeros of int array
  6. | Ones of int array
  7. | Create of int array
  8. | Sequential of int array
  9. | Uniform of int array
  10. | Gaussian of int array
  11. | Bernoulli of int array
  12. | Init of int array * int -> elt
  13. | Get of int array
  14. | Set of int array
  15. | GetSlice of int list list
  16. | SetSlice of int list list
  17. | GetFancy of Owl_types_common.index list
  18. | SetFancy of Owl_types_common.index list
  19. | Copy
  20. | Reset
  21. | Reshape of int array
  22. | Reverse
  23. | Tile of int array
  24. | Repeat of int array
  25. | Pad of elt * int list list
  26. | Concatenate of int
  27. | Stack of int
  28. | Split of int * int array
  29. | Draw of int * int
  30. | Map of elt -> elt
  31. | Fold of int * elt -> elt -> elt
  32. | Scan of int * elt -> elt -> elt
  33. | OneHot of int
  34. | OfArray of int array
  35. | Delay of Device.A.arr -> Device.A.arr
  36. | DelayArray of int array * Device.A.arr array -> Device.A.arr
  37. | LazyPrint of int option - * int option - * bool option - * (Device.A.elt -> string) option
  38. | Abs
  39. | Neg
  40. | Floor
  41. | Ceil
  42. | Round
  43. | Sqr
  44. | Sqrt
  45. | Log
  46. | Log2
  47. | Log10
  48. | Exp
  49. | Sin
  50. | Cos
  51. | Tan
  52. | Sinh
  53. | Cosh
  54. | Tanh
  55. | Asin
  56. | Acos
  57. | Atan
  58. | Asinh
  59. | Acosh
  60. | Atanh
  61. | Min of bool * int
  62. | Max of bool * int
  63. | Sum of bool * int
  64. | SumReduce of int array
  65. | Signum
  66. | Sigmoid
  67. | Relu
  68. | Dawsn
  69. | Min'
  70. | Max'
  71. | Sum'
  72. | LogSumExp'
  73. | LogSumExp of bool * int
  74. | L1norm'
  75. | L2norm'
  76. | L2NormSqr'
  77. | ClipByValue
  78. | ClipByL2norm
  79. | Pow
  80. | ScalarPow
  81. | PowScalar
  82. | Atan2
  83. | ScalarAtan2
  84. | Atan2Scalar
  85. | Hypot
  86. | Min2
  87. | Max2
  88. | Add
  89. | Sub
  90. | Mul
  91. | Div
  92. | AddScalar
  93. | SubScalar
  94. | MulScalar
  95. | DivScalar
  96. | ScalarAdd
  97. | ScalarSub
  98. | ScalarMul
  99. | ScalarDiv
  100. | FMA
  101. | EltEqual
  102. | EltNotEqual
  103. | EltLess
  104. | EltGreater
  105. | EltLessEqual
  106. | EltGreaterEqual
  107. | EltEqualScalar
  108. | EltNotEqualScalar
  109. | EltLessScalar
  110. | EltGreaterScalar
  111. | EltLessEqualScalar
  112. | EltGreaterEqualScalar
  113. | Conv1d of Owl_types_common.padding * int array
  114. | Conv2d of Owl_types_common.padding * int array
  115. | Conv3d of Owl_types_common.padding * int array
  116. | TransposeConv1d of Owl_types_common.padding * int array
  117. | TransposeConv2d of Owl_types_common.padding * int array
  118. | TransposeConv3d of Owl_types_common.padding * int array
  119. | DilatedConv1d of Owl_types_common.padding * int array * int array
  120. | DilatedConv2d of Owl_types_common.padding * int array * int array
  121. | DilatedConv3d of Owl_types_common.padding * int array * int array
  122. | MaxPool1d of Owl_types_common.padding * int array * int array
  123. | MaxPool2d of Owl_types_common.padding * int array * int array
  124. | MaxPool3d of Owl_types_common.padding * int array * int array
  125. | AvgPool1d of Owl_types_common.padding * int array * int array
  126. | AvgPool2d of Owl_types_common.padding * int array * int array
  127. | AvgPool3d of Owl_types_common.padding * int array * int array
  128. | UpSampling2d of int array
  129. | Conv1dBackwardInput of int array
  130. | Conv1dBackwardKernel of int array
  131. | Conv2dBackwardInput of int array
  132. | Conv2dBackwardKernel of int array
  133. | Conv3dBackwardInput of int array
  134. | Conv3dBackwardKernel of int array
  135. | TransposeConv1dBackwardInput of int array
  136. | TransposeConv1dBackwardKernel of int array
  137. | TransposeConv2dBackwardInput of int array
  138. | TransposeConv2dBackwardKernel of int array
  139. | TransposeConv3dBackwardInput of int array
  140. | TransposeConv3dBackwardKernel of int array
  141. | DilatedConv1dBackwardInput of int array * int array
  142. | DilatedConv1dBackwardKernel of int array * int array
  143. | DilatedConv2dBackwardInput of int array * int array
  144. | DilatedConv2dBackwardKernel of int array * int array
  145. | DilatedConv3dBackwardInput of int array * int array
  146. | DilatedConv3dBackwardKernel of int array * int array
  147. | MaxPool1dBackward of Owl_types_common.padding * int array * int array
  148. | MaxPool2dBackward of Owl_types_common.padding * int array * int array
  149. | MaxPool3dBackward of Owl_types_common.padding * int array * int array
  150. | AvgPool1dBackward of Owl_types_common.padding * int array * int array
  151. | AvgPool2dBackward of Owl_types_common.padding * int array * int array
  152. | AvgPool3dBackward of Owl_types_common.padding * int array * int array
  153. | UpSampling2dBackward of int array
  154. | RowNum
  155. | ColNum
  156. | Row
  157. | Rows of int array
  158. | CopyRowTo
  159. | CopyColTo
  160. | Dot of bool * bool * elt * elt
  161. | Inv
  162. | Trace
  163. | Transpose of int array
  164. | ToRows
  165. | OfRows
  166. | Scalar_Add
  167. | Scalar_Sub
  168. | Scalar_Mul
  169. | Scalar_Div
  170. | Scalar_Pow
  171. | Scalar_Atan2
  172. | Scalar_Abs
  173. | Scalar_Neg
  174. | Scalar_Sqr
  175. | Scalar_Sqrt
  176. | Scalar_Exp
  177. | Scalar_Log
  178. | Scalar_Log2
  179. | Scalar_Log10
  180. | Scalar_Signum
  181. | Scalar_Floor
  182. | Scalar_Ceil
  183. | Scalar_Round
  184. | Scalar_Sin
  185. | Scalar_Cos
  186. | Scalar_Tan
  187. | Scalar_Sinh
  188. | Scalar_Cosh
  189. | Scalar_Tanh
  190. | Scalar_Asin
  191. | Scalar_Acos
  192. | Scalar_Atan
  193. | Scalar_Asinh
  194. | Scalar_Acosh
  195. | Scalar_Atanh
  196. | Scalar_Relu
  197. | Scalar_Dawsn
  198. | Scalar_Sigmoid
  199. | Fused_Adagrad of float * float
    (*

    TODO

    *)
\ No newline at end of file diff --git a/owl-base/Owl_computation_cpu_engine/Make_Nested/argument-1-Graph/Optimiser/Operator/Symbol/Shape/index.html b/owl-base/Owl_computation_cpu_engine/Make_Nested/argument-1-Graph/Optimiser/Operator/Symbol/Shape/index.html deleted file mode 100644 index 582654a66..000000000 --- a/owl-base/Owl_computation_cpu_engine/Make_Nested/argument-1-Graph/Optimiser/Operator/Symbol/Shape/index.html +++ /dev/null @@ -1,5 +0,0 @@ - -Shape (owl-base.Owl_computation_cpu_engine.Make_Nested.Graph.Optimiser.Operator.Symbol.Shape)

Module Symbol.Shape

Core functions
val infer_shape : - Type.op -> - Type.attr Owl_graph.node array -> - int array option array

TODO

\ No newline at end of file diff --git a/owl-base/Owl_computation_cpu_engine/Make_Nested/argument-1-Graph/Optimiser/Operator/Symbol/index.html b/owl-base/Owl_computation_cpu_engine/Make_Nested/argument-1-Graph/Optimiser/Operator/Symbol/index.html deleted file mode 100644 index fc8606bb0..000000000 --- a/owl-base/Owl_computation_cpu_engine/Make_Nested/argument-1-Graph/Optimiser/Operator/Symbol/index.html +++ /dev/null @@ -1,28 +0,0 @@ - -Symbol (owl-base.Owl_computation_cpu_engine.Make_Nested.Graph.Optimiser.Operator.Symbol)

Module Operator.Symbol

Core functions
val op_to_str : Shape.Type.op -> string

TODO

val is_random_variable : Shape.Type.op -> bool

TODO

val refnum : 'a Owl_graph.node -> int

TODO

val node_shape : Shape.Type.attr Owl_graph.node -> int array

TODO

val node_numel : Shape.Type.attr Owl_graph.node -> int

TODO

val is_shape_unknown : Shape.Type.attr Owl_graph.node -> bool

TODO

val infer_shape_graph : Shape.Type.attr Owl_graph.node array -> unit

TODO

val shape_to_str : int array option array -> string

TODO

val node_to_str : Shape.Type.attr Owl_graph.node -> string

TODO

val node_to_arr : Shape.Type.t -> Shape.Type.arr

TODO

val arr_to_node : Shape.Type.arr -> Shape.Type.t

TODO

val node_to_elt : Shape.Type.t -> Shape.Type.elt

TODO

val elt_to_node : Shape.Type.elt -> Shape.Type.t

TODO

val make_node : - ?name:string -> - ?value:Shape.Type.Device.value array -> - ?shape:int array option array -> - ?freeze:bool -> - ?reuse:bool -> - ?state:Shape.Type.state -> - Shape.Type.op -> - Shape.Type.attr Owl_graph.node

TODO

val make_then_connect : - ?shape:int array option array -> - Shape.Type.op -> - Shape.Type.attr Owl_graph.node array -> - Shape.Type.attr Owl_graph.node

TODO

val var_arr : ?shape:int array -> string -> Shape.Type.arr

TODO

val var_elt : string -> Shape.Type.elt

TODO

val const_arr : string -> Shape.Type.Device.A.arr -> Shape.Type.arr

TODO

val const_elt : string -> Shape.Type.Device.A.elt -> Shape.Type.elt

TODO

val new_block_id : unit -> int

new_block_id () returns an unused block id.

val make_empty_block : ?block_id:int -> int -> Shape.Type.block

make_empty_block s returns an empty block of memory of size s.

val make_value_block : - Shape.Type.Device.value -> - Shape.Type.attr Owl_graph.node -> - unit

make_value_block value node creates a block of memory initialised with value and links the new block to node.

get_block node returns the memory block allocated to node. If no block is allocated, throws an exception.

val add_node_to_block : - Shape.Type.attr Owl_graph.node -> - Shape.Type.block -> - unit

Link a node to a reusable block and initialises its memory on the memory of the block.

val get_active_node : Shape.Type.block -> Shape.Type.attr Owl_graph.node option

Return the node that is currently using the memory of the block.

val set_active_node : - Shape.Type.block -> - Shape.Type.attr Owl_graph.node -> - unit

Update the node that is currently using the block of memory.

val get_block_id : Shape.Type.attr Owl_graph.node -> int

get_block_id node returns the id of the block assigned to node. If node has not been assigned yet, returns -1.

val set_value : - Shape.Type.attr Owl_graph.node -> - Shape.Type.Device.value array -> - unit

TODO

val set_operator : Shape.Type.attr Owl_graph.node -> Shape.Type.op -> unit

TODO

TODO

val set_reuse : Shape.Type.attr Owl_graph.node -> bool -> unit

TODO

val get_reuse : Shape.Type.attr Owl_graph.node -> bool

TODO

val is_shared : Shape.Type.attr Owl_graph.node -> bool

TODO

val get_shared_nodes : - Shape.Type.attr Owl_graph.node -> - Shape.Type.attr Owl_graph.node array

get_shared_nodes node returns the nodes sharing the same block of memory as node.

val is_var : Shape.Type.attr Owl_graph.node -> bool

TODO

val is_const : Shape.Type.attr Owl_graph.node -> bool

TODO

val is_node_arr : Shape.Type.attr Owl_graph.node -> bool

TODO

val is_node_elt : Shape.Type.attr Owl_graph.node -> bool

TODO

val is_assigned : Shape.Type.attr Owl_graph.node -> bool

is_assigned node checks if a block of memory has been assigned to node.

val check_assigned : Shape.Type.attr Owl_graph.node -> unit

check_assigned node throws an exception if node has not been assigned to a block.

val is_valid : Shape.Type.attr Owl_graph.node -> bool

TODO

val validate : Shape.Type.attr Owl_graph.node -> unit

TODO

val invalidate : Shape.Type.attr Owl_graph.node -> unit

TODO

val invalidate_graph : Shape.Type.attr Owl_graph.node -> unit

TODO

val is_freeze : Shape.Type.attr Owl_graph.node -> bool

TODO

val freeze : Shape.Type.attr Owl_graph.node -> unit

TODO

val freeze_descendants : Shape.Type.attr Owl_graph.node array -> unit

TODO

val freeze_ancestors : Shape.Type.attr Owl_graph.node array -> unit

TODO

TODO

TODO

val unsafe_assign_arr : Shape.Type.arr -> Shape.Type.Device.A.arr -> unit

TODO

val assign_arr : Shape.Type.arr -> Shape.Type.Device.A.arr -> unit

TODO

val assign_elt : Shape.Type.elt -> Shape.Type.Device.A.elt -> unit

TODO

val float_to_elt : float -> Shape.Type.elt

TODO

val elt_to_float : Shape.Type.elt -> float

TODO

\ No newline at end of file diff --git a/owl-base/Owl_computation_cpu_engine/Make_Nested/argument-1-Graph/Optimiser/Operator/index.html b/owl-base/Owl_computation_cpu_engine/Make_Nested/argument-1-Graph/Optimiser/Operator/index.html deleted file mode 100644 index 5d31d0d27..000000000 --- a/owl-base/Owl_computation_cpu_engine/Make_Nested/argument-1-Graph/Optimiser/Operator/index.html +++ /dev/null @@ -1,420 +0,0 @@ - -Operator (owl-base.Owl_computation_cpu_engine.Make_Nested.Graph.Optimiser.Operator)

Module Optimiser.Operator

Vectorised functions
val empty : int array -> Symbol.Shape.Type.arr

TODO

val zeros : int array -> Symbol.Shape.Type.arr

TODO

val ones : int array -> Symbol.Shape.Type.arr

TODO

val create : int array -> Symbol.Shape.Type.elt -> Symbol.Shape.Type.arr

TODO

val sequential : - ?a:Symbol.Shape.Type.elt -> - ?step:Symbol.Shape.Type.elt -> - int array -> - Symbol.Shape.Type.arr

TODO

val uniform : - ?a:Symbol.Shape.Type.elt -> - ?b:Symbol.Shape.Type.elt -> - int array -> - Symbol.Shape.Type.arr

TODO

val gaussian : - ?mu:Symbol.Shape.Type.elt -> - ?sigma:Symbol.Shape.Type.elt -> - int array -> - Symbol.Shape.Type.arr

TODO

val bernoulli : ?p:Symbol.Shape.Type.elt -> int array -> Symbol.Shape.Type.arr

TODO

val init : int array -> (int -> Symbol.Shape.Type.elt) -> Symbol.Shape.Type.arr

TODO

val init_nd : - int array -> - (int array -> Symbol.Shape.Type.elt) -> - Symbol.Shape.Type.arr

TODO

val shape : Symbol.Shape.Type.arr -> int array

TODO

val numel : Symbol.Shape.Type.arr -> int

TODO

TODO

val set : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.elt -> unit

TODO

val get_slice : int list list -> Symbol.Shape.Type.arr -> Symbol.Shape.Type.arr

TODO

val set_slice : - int list list -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - unit

TODO

TODO

val set_fancy : - Owl_types.index list -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - unit

TODO

val copy_ : out:'a -> 'b -> 'c

TODO

val reset : Symbol.Shape.Type.arr -> unit

TODO

val reshape : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.arr

TODO

val tile : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.arr

TODO

val repeat : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.arr

TODO

val pad : - ?v:Symbol.Shape.Type.elt -> - int list list -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val expand : ?hi:bool -> Symbol.Shape.Type.arr -> int -> Symbol.Shape.Type.arr

TODO

val squeeze : ?axis:int array -> Symbol.Shape.Type.arr -> Symbol.Shape.Type.arr

TODO

val concatenate : - ?axis:int -> - Symbol.Shape.Type.arr array -> - Symbol.Shape.Type.arr

TODO

val stack : ?axis:int -> Symbol.Shape.Type.arr array -> Symbol.Shape.Type.arr

TODO

TODO

val split : ?axis:int -> 'a -> 'b -> 'c

TODO

val draw : - ?axis:int -> - Symbol.Shape.Type.arr -> - int -> - Symbol.Shape.Type.arr * 'a array

TODO

TODO

delay f x returns f x. It allows to use a function that is not tracked by the computation graph and delay its evaluation. The output must have the same shape as the input.

delay_array out_shape f x works in the same way as delay but is applied on an array of ndarrays. Needs the shape of the output as an argument.

val lazy_print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(Symbol.Shape.Type.Device.A.elt -> string) -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

lazy_print x prints the output of x when it is evaluated. Is implemented as an identity node. For information about the optional parameters, refer to the print function of the Ndarray module.

val print : ?max_row:'a -> ?max_col:'b -> ?header:'c -> ?fmt:'d -> 'e -> unit

TODO

val min : - ?axis:int -> - ?keep_dims:bool -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val max : - ?axis:int -> - ?keep_dims:bool -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val sum : - ?axis:int -> - ?keep_dims:bool -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val sum_reduce : - ?axis:int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val log_sum_exp : - ?axis:int -> - ?keep_dims:bool -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

TODO

TODO

TODO

TODO

val elt_less_equal_scalar : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.elt -> - Symbol.Shape.Type.arr

TODO

val elt_greater_equal_scalar : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.elt -> - Symbol.Shape.Type.arr

TODO

val conv1d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr

TODO

val conv2d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr

TODO

val conv3d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr

TODO

val transpose_conv1d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr

TODO

val transpose_conv2d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr

TODO

val transpose_conv3d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr

TODO

val dilated_conv1d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val dilated_conv2d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val dilated_conv3d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val max_pool1d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val max_pool2d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val max_pool3d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val avg_pool1d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val avg_pool2d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val avg_pool3d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val upsampling2d : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.arr

TODO

val conv1d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val conv1d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val conv2d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val conv2d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val conv3d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val conv3d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val transpose_conv1d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val transpose_conv1d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val transpose_conv2d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val transpose_conv2d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val transpose_conv3d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val transpose_conv3d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val dilated_conv1d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val dilated_conv1d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val dilated_conv2d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val dilated_conv2d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val dilated_conv3d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val dilated_conv3d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val max_pool1d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val max_pool2d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val max_pool3d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val avg_pool1d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val avg_pool2d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val avg_pool3d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val upsampling2d_backward : - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val row_num : Symbol.Shape.Type.arr -> int

TODO

val col_num : Symbol.Shape.Type.arr -> int

TODO

val rows : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.arr

TODO

val copy_row_to : Symbol.Shape.Type.arr -> 'a -> 'b -> unit

TODO

val copy_col_to : Symbol.Shape.Type.arr -> 'a -> 'b -> unit

TODO

TODO

val transpose : - ?axis:int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val to_rows : Symbol.Shape.Type.arr -> 'a array

TODO

TODO

val to_cols : Symbol.Shape.Type.arr -> 'a array

TODO

TODO

val of_array : - Symbol.Shape.Type.elt array -> - int array -> - Symbol.Shape.Type.arr

TODO

val of_arrays : Symbol.Shape.Type.elt array array -> Symbol.Shape.Type.arr

TODO

val to_arrays : Symbol.Shape.Type.arr -> Symbol.Shape.Type.elt array array

TODO

Scalar functions
module Scalar : sig ... end
module Mat : sig ... end
module Linalg : sig ... end
\ No newline at end of file diff --git a/owl-base/Owl_computation_cpu_engine/Make_Nested/argument-1-Graph/Optimiser/index.html b/owl-base/Owl_computation_cpu_engine/Make_Nested/argument-1-Graph/Optimiser/index.html deleted file mode 100644 index aa8ab2ca1..000000000 --- a/owl-base/Owl_computation_cpu_engine/Make_Nested/argument-1-Graph/Optimiser/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Optimiser (owl-base.Owl_computation_cpu_engine.Make_Nested.Graph.Optimiser)

Module Graph.Optimiser

Core functions
val estimate_complexity : 'a Owl_graph.node array -> int * int

TODO

val optimise_nodes : - Operator.Symbol.Shape.Type.attr Owl_graph.node array -> - unit

TODO

\ No newline at end of file diff --git a/owl-base/Owl_computation_cpu_engine/Make_Nested/argument-1-Graph/index.html b/owl-base/Owl_computation_cpu_engine/Make_Nested/argument-1-Graph/index.html deleted file mode 100644 index 46703b0ff..000000000 --- a/owl-base/Owl_computation_cpu_engine/Make_Nested/argument-1-Graph/index.html +++ /dev/null @@ -1,33 +0,0 @@ - -Graph (owl-base.Owl_computation_cpu_engine.Make_Nested.Graph)

Parameter Make_Nested.Graph

Type definition
type graph

TODO

Core functions
val shape_or_value : Optimiser.Operator.Symbol.Shape.Type.t -> string

TODO

val graph_to_dot : graph -> string

TODO

val graph_to_trace : graph -> string

TODO

val save_graph : 'a -> string -> unit

TODO

val load_graph : string -> 'a * 'b

TODO

val invalidate_rvs : graph -> unit

TODO

val is_iopair_safe : 'a Owl_graph.node -> 'a Owl_graph.node -> bool

TODO

val update_iopair : graph -> unit

TODO

val remove_unused_iopair : - 'a Owl_graph.node array -> - 'b array -> - 'a Owl_graph.node array * 'b array

TODO

val optimise : graph -> unit

TODO

\ No newline at end of file diff --git a/owl-base/Owl_computation_cpu_engine/Make_Nested/index.html b/owl-base/Owl_computation_cpu_engine/Make_Nested/index.html deleted file mode 100644 index 0d1559d7b..000000000 --- a/owl-base/Owl_computation_cpu_engine/Make_Nested/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Make_Nested (owl-base.Owl_computation_cpu_engine.Make_Nested)

Module Owl_computation_cpu_engine.Make_Nested

Parameters

Signature

module Graph = Graph
module CG_Init : sig ... end
module CG_Eval : sig ... end
val eval_graph : Graph.graph -> unit
\ No newline at end of file diff --git a/owl-base/Owl_computation_cpu_eval/.dummy b/owl-base/Owl_computation_cpu_eval/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl-base/Owl_computation_cpu_eval/Make/argument-1-Graph/Optimiser/Operator/Linalg/index.html b/owl-base/Owl_computation_cpu_eval/Make/argument-1-Graph/Optimiser/Operator/Linalg/index.html deleted file mode 100644 index f06a1aef3..000000000 --- a/owl-base/Owl_computation_cpu_eval/Make/argument-1-Graph/Optimiser/Operator/Linalg/index.html +++ /dev/null @@ -1,33 +0,0 @@ - -Linalg (owl-base.Owl_computation_cpu_eval.Make.Graph.Optimiser.Operator.Linalg)

Module Operator.Linalg

\ No newline at end of file diff --git a/owl-base/Owl_computation_cpu_eval/Make/argument-1-Graph/Optimiser/Operator/Mat/index.html b/owl-base/Owl_computation_cpu_eval/Make/argument-1-Graph/Optimiser/Operator/Mat/index.html deleted file mode 100644 index 627c3027c..000000000 --- a/owl-base/Owl_computation_cpu_eval/Make/argument-1-Graph/Optimiser/Operator/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl-base.Owl_computation_cpu_eval.Make.Graph.Optimiser.Operator.Mat)

Module Operator.Mat

val eye : int -> Symbol.Shape.Type.arr

TODO

TODO

TODO

TODO

\ No newline at end of file diff --git a/owl-base/Owl_computation_cpu_eval/Make/argument-1-Graph/Optimiser/Operator/Scalar/index.html b/owl-base/Owl_computation_cpu_eval/Make/argument-1-Graph/Optimiser/Operator/Scalar/index.html deleted file mode 100644 index bc62eb997..000000000 --- a/owl-base/Owl_computation_cpu_eval/Make/argument-1-Graph/Optimiser/Operator/Scalar/index.html +++ /dev/null @@ -1,20 +0,0 @@ - -Scalar (owl-base.Owl_computation_cpu_eval.Make.Graph.Optimiser.Operator.Scalar)

Module Operator.Scalar

\ No newline at end of file diff --git a/owl-base/Owl_computation_cpu_eval/Make/argument-1-Graph/Optimiser/Operator/Symbol/Shape/Type/Device/A/Linalg/index.html b/owl-base/Owl_computation_cpu_eval/Make/argument-1-Graph/Optimiser/Operator/Symbol/Shape/Type/Device/A/Linalg/index.html deleted file mode 100644 index f72cd4691..000000000 --- a/owl-base/Owl_computation_cpu_eval/Make/argument-1-Graph/Optimiser/Operator/Symbol/Shape/Type/Device/A/Linalg/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -Linalg (owl-base.Owl_computation_cpu_eval.Make.Graph.Optimiser.Operator.Symbol.Shape.Type.Device.A.Linalg)

Module A.Linalg

val inv : arr -> arr
val logdet : arr -> elt
val chol : ?upper:bool -> arr -> arr
val svd : ?thin:bool -> arr -> arr * arr * arr
val qr : arr -> arr * arr
val lq : arr -> arr * arr
val sylvester : arr -> arr -> arr -> arr
val lyapunov : arr -> arr -> arr
val discrete_lyapunov : - ?solver:[ `default | `bilinear | `direct ] -> - arr -> - arr -> - arr
val linsolve : ?trans:bool -> ?typ:[ `n | `u | `l ] -> arr -> arr -> arr
val care : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
val dare : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
\ No newline at end of file diff --git a/owl-base/Owl_computation_cpu_eval/Make/argument-1-Graph/Optimiser/Operator/Symbol/Shape/Type/Device/A/Mat/index.html b/owl-base/Owl_computation_cpu_eval/Make/argument-1-Graph/Optimiser/Operator/Symbol/Shape/Type/Device/A/Mat/index.html deleted file mode 100644 index fc5c44469..000000000 --- a/owl-base/Owl_computation_cpu_eval/Make/argument-1-Graph/Optimiser/Operator/Symbol/Shape/Type/Device/A/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl-base.Owl_computation_cpu_eval.Make.Graph.Optimiser.Operator.Symbol.Shape.Type.Device.A.Mat)

Module A.Mat

val diagm : ?k:int -> arr -> arr
val triu : ?k:int -> arr -> arr
val tril : ?k:int -> arr -> arr
val eye : int -> arr
\ No newline at end of file diff --git a/owl-base/Owl_computation_cpu_eval/Make/argument-1-Graph/Optimiser/Operator/Symbol/Shape/Type/Device/A/Scalar/index.html b/owl-base/Owl_computation_cpu_eval/Make/argument-1-Graph/Optimiser/Operator/Symbol/Shape/Type/Device/A/Scalar/index.html deleted file mode 100644 index 6460e95bf..000000000 --- a/owl-base/Owl_computation_cpu_eval/Make/argument-1-Graph/Optimiser/Operator/Symbol/Shape/Type/Device/A/Scalar/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Scalar (owl-base.Owl_computation_cpu_eval.Make.Graph.Optimiser.Operator.Symbol.Shape.Type.Device.A.Scalar)

Module A.Scalar

val add : elt -> elt -> elt
val sub : elt -> elt -> elt
val mul : elt -> elt -> elt
val div : elt -> elt -> elt
val pow : elt -> elt -> elt
val atan2 : elt -> elt -> elt
val abs : elt -> elt
val neg : elt -> elt
val sqr : elt -> elt
val sqrt : elt -> elt
val exp : elt -> elt
val log : elt -> elt
val log2 : elt -> elt
val log10 : elt -> elt
val signum : elt -> elt
val floor : elt -> elt
val ceil : elt -> elt
val round : elt -> elt
val sin : elt -> elt
val cos : elt -> elt
val tan : elt -> elt
val sinh : elt -> elt
val cosh : elt -> elt
val tanh : elt -> elt
val asin : elt -> elt
val acos : elt -> elt
val atan : elt -> elt
val asinh : elt -> elt
val acosh : elt -> elt
val atanh : elt -> elt
val relu : elt -> elt
val dawsn : elt -> elt
val sigmoid : elt -> elt
\ No newline at end of file diff --git a/owl-base/Owl_computation_cpu_eval/Make/argument-1-Graph/Optimiser/Operator/Symbol/Shape/Type/Device/A/index.html b/owl-base/Owl_computation_cpu_eval/Make/argument-1-Graph/Optimiser/Operator/Symbol/Shape/Type/Device/A/index.html deleted file mode 100644 index 9bbed3fa3..000000000 --- a/owl-base/Owl_computation_cpu_eval/Make/argument-1-Graph/Optimiser/Operator/Symbol/Shape/Type/Device/A/index.html +++ /dev/null @@ -1,379 +0,0 @@ - -A (owl-base.Owl_computation_cpu_eval.Make.Graph.Optimiser.Operator.Symbol.Shape.Type.Device.A)

Module Device.A

include Owl_types_ndarray_algodiff.Sig
include Owl_types_ndarray_eltcmp.Sig
include Owl_types_ndarray_basic.Sig
type arr
type elt
val empty : int array -> arr
val zeros : int array -> arr
val ones : int array -> arr
val create : int array -> elt -> arr
val sequential : ?a:elt -> ?step:elt -> int array -> arr
val uniform : ?a:elt -> ?b:elt -> int array -> arr
val gaussian : ?mu:elt -> ?sigma:elt -> int array -> arr
val bernoulli : ?p:elt -> int array -> arr
val init : int array -> (int -> elt) -> arr
val init_nd : int array -> (int array -> elt) -> arr
val shape : arr -> int array
val numel : arr -> int
val get : arr -> int array -> elt
val set : arr -> int array -> elt -> unit
val get_slice : int list list -> arr -> arr
val set_slice : int list list -> arr -> arr -> unit
val get_fancy : Owl_types_common.index list -> arr -> arr
val set_fancy : Owl_types_common.index list -> arr -> arr -> unit
val copy : arr -> arr
val copy_ : out:arr -> arr -> unit
val reset : arr -> unit
val reshape : arr -> int array -> arr
val reverse : arr -> arr
val tile : arr -> int array -> arr
val repeat : arr -> int array -> arr
val concatenate : ?axis:int -> arr array -> arr
val stack : ?axis:int -> arr array -> arr
val split : ?axis:int -> int array -> arr -> arr array
val expand : ?hi:bool -> arr -> int -> arr
val squeeze : ?axis:int array -> arr -> arr
val draw : ?axis:int -> arr -> int -> arr * int array
val map : (elt -> elt) -> arr -> arr
val fold : ?axis:int -> (elt -> elt -> elt) -> elt -> arr -> arr
val scan : ?axis:int -> (elt -> elt -> elt) -> arr -> arr
val one_hot : int -> arr -> arr
val pad : ?v:elt -> int list list -> arr -> arr
val print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(elt -> string) -> - arr -> - unit
val abs : arr -> arr
val neg : arr -> arr
val floor : arr -> arr
val ceil : arr -> arr
val round : arr -> arr
val sqr : arr -> arr
val sqrt : arr -> arr
val log : arr -> arr
val log2 : arr -> arr
val log10 : arr -> arr
val exp : arr -> arr
val sin : arr -> arr
val cos : arr -> arr
val tan : arr -> arr
val sinh : arr -> arr
val cosh : arr -> arr
val tanh : arr -> arr
val asin : arr -> arr
val acos : arr -> arr
val atan : arr -> arr
val asinh : arr -> arr
val acosh : arr -> arr
val atanh : arr -> arr
val sum_reduce : ?axis:int array -> arr -> arr
val signum : arr -> arr
val sigmoid : arr -> arr
val relu : arr -> arr
val dawsn : arr -> arr
val min' : arr -> elt
val max' : arr -> elt
val sum' : arr -> elt
val log_sum_exp' : arr -> elt
val log_sum_exp : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm' : arr -> elt
val l2norm' : arr -> elt
val l2norm_sqr' : arr -> elt
val clip_by_value : ?amin:elt -> ?amax:elt -> arr -> arr
val clip_by_l2norm : elt -> arr -> arr
val pow : arr -> arr -> arr
val scalar_pow : elt -> arr -> arr
val pow_scalar : arr -> elt -> arr
val atan2 : arr -> arr -> arr
val scalar_atan2 : elt -> arr -> arr
val atan2_scalar : arr -> elt -> arr
val add : arr -> arr -> arr
val sub : arr -> arr -> arr
val mul : arr -> arr -> arr
val div : arr -> arr -> arr
val add_scalar : arr -> elt -> arr
val sub_scalar : arr -> elt -> arr
val mul_scalar : arr -> elt -> arr
val div_scalar : arr -> elt -> arr
val scalar_add : elt -> arr -> arr
val scalar_sub : elt -> arr -> arr
val scalar_mul : elt -> arr -> arr
val scalar_div : elt -> arr -> arr
val fma : arr -> arr -> arr -> arr
val conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val dilated_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val transpose_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val max_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val upsampling2d : arr -> int array -> arr
val conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val dilated_conv1d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv1d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val transpose_conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val max_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val upsampling2d_backward : arr -> int array -> arr -> arr
val row_num : arr -> int
val col_num : arr -> int
val row : arr -> int -> arr
val rows : arr -> int array -> arr
val copy_row_to : arr -> arr -> int -> unit
val copy_col_to : arr -> arr -> int -> unit
val dot : arr -> arr -> arr
val diag : ?k:int -> arr -> arr
val trace : arr -> elt
val transpose : ?axis:int array -> arr -> arr
val to_rows : arr -> arr array
val of_rows : arr array -> arr
val to_cols : arr -> arr array
val of_cols : arr array -> arr
val of_array : elt array -> int array -> arr
val of_arrays : elt array array -> arr
val float_to_elt : float -> elt
val elt_to_float : elt -> float
val elt_equal : arr -> arr -> arr
val elt_not_equal : arr -> arr -> arr
val elt_less : arr -> arr -> arr
val elt_greater : arr -> arr -> arr
val elt_less_equal : arr -> arr -> arr
val elt_greater_equal : arr -> arr -> arr
val elt_equal_scalar : arr -> elt -> arr
val elt_not_equal_scalar : arr -> elt -> arr
val elt_less_scalar : arr -> elt -> arr
val elt_greater_scalar : arr -> elt -> arr
val elt_less_equal_scalar : arr -> elt -> arr
val elt_greater_equal_scalar : arr -> elt -> arr
module Scalar : sig ... end
module Mat : sig ... end
module Linalg : sig ... end
val create_ : out:arr -> elt -> unit
val uniform_ : ?a:elt -> ?b:elt -> out:arr -> unit
val gaussian_ : ?mu:elt -> ?sigma:elt -> out:arr -> unit
val sequential_ : ?a:elt -> ?step:elt -> out:arr -> unit
val bernoulli_ : ?p:elt -> out:arr -> unit
val zeros_ : out:arr -> unit
val ones_ : out:arr -> unit
val one_hot_ : out:arr -> int -> arr -> unit
val get_slice_ : out:arr -> int list list -> arr -> unit
val set_slice_ : out:arr -> int list list -> arr -> arr -> unit
val sub_left : arr -> int -> int -> arr
val reshape_ : out:arr -> arr -> unit
val reverse_ : out:arr -> arr -> unit
val transpose_ : out:arr -> ?axis:int array -> arr -> unit
val repeat_ : out:arr -> arr -> int array -> unit
val tile_ : out:arr -> arr -> int array -> unit
val pad_ : out:arr -> ?v:elt -> int list list -> arr -> unit
val hypot : arr -> arr -> arr
val fmod : arr -> arr -> arr
val min2 : arr -> arr -> arr
val max2 : arr -> arr -> arr
val add_ : ?out:arr -> arr -> arr -> unit
val sub_ : ?out:arr -> arr -> arr -> unit
val mul_ : ?out:arr -> arr -> arr -> unit
val div_ : ?out:arr -> arr -> arr -> unit
val pow_ : ?out:arr -> arr -> arr -> unit
val atan2_ : ?out:arr -> arr -> arr -> unit
val hypot_ : ?out:arr -> arr -> arr -> unit
val fmod_ : ?out:arr -> arr -> arr -> unit
val min2_ : ?out:arr -> arr -> arr -> unit
val max2_ : ?out:arr -> arr -> arr -> unit
val add_scalar_ : ?out:arr -> arr -> elt -> unit
val sub_scalar_ : ?out:arr -> arr -> elt -> unit
val mul_scalar_ : ?out:arr -> arr -> elt -> unit
val div_scalar_ : ?out:arr -> arr -> elt -> unit
val pow_scalar_ : ?out:arr -> arr -> elt -> unit
val atan2_scalar_ : ?out:arr -> arr -> elt -> unit
val fmod_scalar_ : ?out:arr -> arr -> elt -> unit
val scalar_add_ : ?out:arr -> elt -> arr -> unit
val scalar_sub_ : ?out:arr -> elt -> arr -> unit
val scalar_mul_ : ?out:arr -> elt -> arr -> unit
val scalar_div_ : ?out:arr -> elt -> arr -> unit
val scalar_pow_ : ?out:arr -> elt -> arr -> unit
val scalar_atan2_ : ?out:arr -> elt -> arr -> unit
val scalar_fmod_ : ?out:arr -> elt -> arr -> unit
val fma_ : ?out:arr -> arr -> arr -> arr -> unit
val clip_by_value_ : ?out:arr -> ?amin:elt -> ?amax:elt -> arr -> unit
val clip_by_l2norm_ : ?out:arr -> elt -> arr -> unit
val dot_ : - ?transa:bool -> - ?transb:bool -> - ?alpha:elt -> - ?beta:elt -> - c:arr -> - arr -> - arr -> - unit
val abs_ : ?out:arr -> arr -> unit
val neg_ : ?out:arr -> arr -> unit
val conj_ : ?out:arr -> arr -> unit
val reci_ : ?out:arr -> arr -> unit
val signum_ : ?out:arr -> arr -> unit
val sqr_ : ?out:arr -> arr -> unit
val sqrt_ : ?out:arr -> arr -> unit
val cbrt_ : ?out:arr -> arr -> unit
val exp_ : ?out:arr -> arr -> unit
val exp2_ : ?out:arr -> arr -> unit
val exp10_ : ?out:arr -> arr -> unit
val expm1_ : ?out:arr -> arr -> unit
val log_ : ?out:arr -> arr -> unit
val log2_ : ?out:arr -> arr -> unit
val log10_ : ?out:arr -> arr -> unit
val log1p_ : ?out:arr -> arr -> unit
val sin_ : ?out:arr -> arr -> unit
val cos_ : ?out:arr -> arr -> unit
val tan_ : ?out:arr -> arr -> unit
val asin_ : ?out:arr -> arr -> unit
val acos_ : ?out:arr -> arr -> unit
val atan_ : ?out:arr -> arr -> unit
val sinh_ : ?out:arr -> arr -> unit
val cosh_ : ?out:arr -> arr -> unit
val tanh_ : ?out:arr -> arr -> unit
val asinh_ : ?out:arr -> arr -> unit
val acosh_ : ?out:arr -> arr -> unit
val atanh_ : ?out:arr -> arr -> unit
val floor_ : ?out:arr -> arr -> unit
val ceil_ : ?out:arr -> arr -> unit
val round_ : ?out:arr -> arr -> unit
val trunc_ : ?out:arr -> arr -> unit
val fix_ : ?out:arr -> arr -> unit
val erf_ : ?out:arr -> arr -> unit
val erfc_ : ?out:arr -> arr -> unit
val relu_ : ?out:arr -> arr -> unit
val softplus_ : ?out:arr -> arr -> unit
val softsign_ : ?out:arr -> arr -> unit
val softmax_ : ?out:arr -> ?axis:int -> arr -> unit
val sigmoid_ : ?out:arr -> arr -> unit
val sum_ : out:arr -> axis:int -> arr -> unit
val min_ : out:arr -> axis:int -> arr -> unit
val max_ : out:arr -> axis:int -> arr -> unit
val sum : ?axis:int -> ?keep_dims:bool -> arr -> arr
val prod : ?axis:int -> ?keep_dims:bool -> arr -> arr
val min : ?axis:int -> ?keep_dims:bool -> arr -> arr
val max : ?axis:int -> ?keep_dims:bool -> arr -> arr
val mean : ?axis:int -> ?keep_dims:bool -> arr -> arr
val var : ?axis:int -> ?keep_dims:bool -> arr -> arr
val std : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l2norm : ?axis:int -> ?keep_dims:bool -> arr -> arr
val cumsum_ : ?out:arr -> ?axis:int -> arr -> unit
val cumprod_ : ?out:arr -> ?axis:int -> arr -> unit
val cummin_ : ?out:arr -> ?axis:int -> arr -> unit
val cummax_ : ?out:arr -> ?axis:int -> arr -> unit
val dropout_ : ?out:arr -> ?rate:float -> arr -> unit
val prod' : arr -> elt
val mean' : arr -> elt
val var' : arr -> elt
val std' : arr -> elt
val elt_equal_ : ?out:arr -> arr -> arr -> unit
val elt_not_equal_ : ?out:arr -> arr -> arr -> unit
val elt_less_ : ?out:arr -> arr -> arr -> unit
val elt_greater_ : ?out:arr -> arr -> arr -> unit
val elt_less_equal_ : ?out:arr -> arr -> arr -> unit
val elt_greater_equal_ : ?out:arr -> arr -> arr -> unit
val elt_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_not_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_less_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_greater_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_less_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_greater_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val conv1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val conv2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val conv3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val dilated_conv1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val dilated_conv2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val dilated_conv3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val transpose_conv1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val transpose_conv2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val transpose_conv3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val max_pool1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val max_pool2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val max_pool3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val upsampling2d_ : out:arr -> arr -> int array -> unit
val conv1d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv1d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv2d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv2d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv3d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv3d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val dilated_conv1d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv1d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv2d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv2d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv3d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv3d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val transpose_conv1d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv1d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv2d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv2d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv3d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv3d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val max_pool1d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val max_pool2d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val max_pool3d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool1d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool2d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool3d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val upsampling2d_backward_ : out:arr -> arr -> int array -> arr -> unit
val fused_adagrad_ : ?out:arr -> rate:float -> eps:float -> arr -> unit
\ No newline at end of file diff --git a/owl-base/Owl_computation_cpu_eval/Make/argument-1-Graph/Optimiser/Operator/Symbol/Shape/Type/Device/index.html b/owl-base/Owl_computation_cpu_eval/Make/argument-1-Graph/Optimiser/Operator/Symbol/Shape/Type/Device/index.html deleted file mode 100644 index 9a1874888..000000000 --- a/owl-base/Owl_computation_cpu_eval/Make/argument-1-Graph/Optimiser/Operator/Symbol/Shape/Type/Device/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Device (owl-base.Owl_computation_cpu_eval.Make.Graph.Optimiser.Operator.Symbol.Shape.Type.Device)

Module Type.Device

Type definition
type device

TODO

type value

TODO

Core functions
val make_device : unit -> device

TODO

val arr_to_value : A.arr -> value

TODO

val value_to_arr : value -> A.arr

TODO

val elt_to_value : A.elt -> value

TODO

val value_to_elt : value -> A.elt

TODO

val value_to_float : value -> float

TODO

val is_arr : value -> bool

TODO

val is_elt : value -> bool

TODO

\ No newline at end of file diff --git a/owl-base/Owl_computation_cpu_eval/Make/argument-1-Graph/Optimiser/Operator/Symbol/Shape/Type/index.html b/owl-base/Owl_computation_cpu_eval/Make/argument-1-Graph/Optimiser/Operator/Symbol/Shape/Type/index.html deleted file mode 100644 index 1a0ad7b03..000000000 --- a/owl-base/Owl_computation_cpu_eval/Make/argument-1-Graph/Optimiser/Operator/Symbol/Shape/Type/index.html +++ /dev/null @@ -1,5 +0,0 @@ - -Type (owl-base.Owl_computation_cpu_eval.Make.Graph.Optimiser.Operator.Symbol.Shape.Type)

Module Shape.Type

Type definition
type state =
  1. | Valid
  2. | Invalid
    (*

    TODO

    *)

TODO

and block = {
  1. size : int;
  2. block_id : int;
  3. mutable active : t option;
  4. mutable memory : Device.value;
  5. mutable nodes : t list;
}

block type keeps a reference to a block of memory and to the nodes sharing that block.

and attr = {
  1. mutable op : op;
  2. mutable freeze : bool;
  3. mutable reuse : bool;
  4. mutable state : state;
  5. mutable shape : int array option array;
  6. mutable value : Device.value array;
  7. mutable block : block array option;
}

TODO

and arr =
  1. | Arr of t
and elt =
  1. | Elt of t
and op =
  1. | Noop
  2. | Var
  3. | Const
  4. | Empty of int array
  5. | Zeros of int array
  6. | Ones of int array
  7. | Create of int array
  8. | Sequential of int array
  9. | Uniform of int array
  10. | Gaussian of int array
  11. | Bernoulli of int array
  12. | Init of int array * int -> elt
  13. | Get of int array
  14. | Set of int array
  15. | GetSlice of int list list
  16. | SetSlice of int list list
  17. | GetFancy of Owl_types_common.index list
  18. | SetFancy of Owl_types_common.index list
  19. | Copy
  20. | Reset
  21. | Reshape of int array
  22. | Reverse
  23. | Tile of int array
  24. | Repeat of int array
  25. | Pad of elt * int list list
  26. | Concatenate of int
  27. | Stack of int
  28. | Split of int * int array
  29. | Draw of int * int
  30. | Map of elt -> elt
  31. | Fold of int * elt -> elt -> elt
  32. | Scan of int * elt -> elt -> elt
  33. | OneHot of int
  34. | OfArray of int array
  35. | Delay of Device.A.arr -> Device.A.arr
  36. | DelayArray of int array * Device.A.arr array -> Device.A.arr
  37. | LazyPrint of int option - * int option - * bool option - * (Device.A.elt -> string) option
  38. | Abs
  39. | Neg
  40. | Floor
  41. | Ceil
  42. | Round
  43. | Sqr
  44. | Sqrt
  45. | Log
  46. | Log2
  47. | Log10
  48. | Exp
  49. | Sin
  50. | Cos
  51. | Tan
  52. | Sinh
  53. | Cosh
  54. | Tanh
  55. | Asin
  56. | Acos
  57. | Atan
  58. | Asinh
  59. | Acosh
  60. | Atanh
  61. | Min of bool * int
  62. | Max of bool * int
  63. | Sum of bool * int
  64. | SumReduce of int array
  65. | Signum
  66. | Sigmoid
  67. | Relu
  68. | Dawsn
  69. | Min'
  70. | Max'
  71. | Sum'
  72. | LogSumExp'
  73. | LogSumExp of bool * int
  74. | L1norm'
  75. | L2norm'
  76. | L2NormSqr'
  77. | ClipByValue
  78. | ClipByL2norm
  79. | Pow
  80. | ScalarPow
  81. | PowScalar
  82. | Atan2
  83. | ScalarAtan2
  84. | Atan2Scalar
  85. | Hypot
  86. | Min2
  87. | Max2
  88. | Add
  89. | Sub
  90. | Mul
  91. | Div
  92. | AddScalar
  93. | SubScalar
  94. | MulScalar
  95. | DivScalar
  96. | ScalarAdd
  97. | ScalarSub
  98. | ScalarMul
  99. | ScalarDiv
  100. | FMA
  101. | EltEqual
  102. | EltNotEqual
  103. | EltLess
  104. | EltGreater
  105. | EltLessEqual
  106. | EltGreaterEqual
  107. | EltEqualScalar
  108. | EltNotEqualScalar
  109. | EltLessScalar
  110. | EltGreaterScalar
  111. | EltLessEqualScalar
  112. | EltGreaterEqualScalar
  113. | Conv1d of Owl_types_common.padding * int array
  114. | Conv2d of Owl_types_common.padding * int array
  115. | Conv3d of Owl_types_common.padding * int array
  116. | TransposeConv1d of Owl_types_common.padding * int array
  117. | TransposeConv2d of Owl_types_common.padding * int array
  118. | TransposeConv3d of Owl_types_common.padding * int array
  119. | DilatedConv1d of Owl_types_common.padding * int array * int array
  120. | DilatedConv2d of Owl_types_common.padding * int array * int array
  121. | DilatedConv3d of Owl_types_common.padding * int array * int array
  122. | MaxPool1d of Owl_types_common.padding * int array * int array
  123. | MaxPool2d of Owl_types_common.padding * int array * int array
  124. | MaxPool3d of Owl_types_common.padding * int array * int array
  125. | AvgPool1d of Owl_types_common.padding * int array * int array
  126. | AvgPool2d of Owl_types_common.padding * int array * int array
  127. | AvgPool3d of Owl_types_common.padding * int array * int array
  128. | UpSampling2d of int array
  129. | Conv1dBackwardInput of int array
  130. | Conv1dBackwardKernel of int array
  131. | Conv2dBackwardInput of int array
  132. | Conv2dBackwardKernel of int array
  133. | Conv3dBackwardInput of int array
  134. | Conv3dBackwardKernel of int array
  135. | TransposeConv1dBackwardInput of int array
  136. | TransposeConv1dBackwardKernel of int array
  137. | TransposeConv2dBackwardInput of int array
  138. | TransposeConv2dBackwardKernel of int array
  139. | TransposeConv3dBackwardInput of int array
  140. | TransposeConv3dBackwardKernel of int array
  141. | DilatedConv1dBackwardInput of int array * int array
  142. | DilatedConv1dBackwardKernel of int array * int array
  143. | DilatedConv2dBackwardInput of int array * int array
  144. | DilatedConv2dBackwardKernel of int array * int array
  145. | DilatedConv3dBackwardInput of int array * int array
  146. | DilatedConv3dBackwardKernel of int array * int array
  147. | MaxPool1dBackward of Owl_types_common.padding * int array * int array
  148. | MaxPool2dBackward of Owl_types_common.padding * int array * int array
  149. | MaxPool3dBackward of Owl_types_common.padding * int array * int array
  150. | AvgPool1dBackward of Owl_types_common.padding * int array * int array
  151. | AvgPool2dBackward of Owl_types_common.padding * int array * int array
  152. | AvgPool3dBackward of Owl_types_common.padding * int array * int array
  153. | UpSampling2dBackward of int array
  154. | RowNum
  155. | ColNum
  156. | Row
  157. | Rows of int array
  158. | CopyRowTo
  159. | CopyColTo
  160. | Dot of bool * bool * elt * elt
  161. | Inv
  162. | Trace
  163. | Transpose of int array
  164. | ToRows
  165. | OfRows
  166. | Scalar_Add
  167. | Scalar_Sub
  168. | Scalar_Mul
  169. | Scalar_Div
  170. | Scalar_Pow
  171. | Scalar_Atan2
  172. | Scalar_Abs
  173. | Scalar_Neg
  174. | Scalar_Sqr
  175. | Scalar_Sqrt
  176. | Scalar_Exp
  177. | Scalar_Log
  178. | Scalar_Log2
  179. | Scalar_Log10
  180. | Scalar_Signum
  181. | Scalar_Floor
  182. | Scalar_Ceil
  183. | Scalar_Round
  184. | Scalar_Sin
  185. | Scalar_Cos
  186. | Scalar_Tan
  187. | Scalar_Sinh
  188. | Scalar_Cosh
  189. | Scalar_Tanh
  190. | Scalar_Asin
  191. | Scalar_Acos
  192. | Scalar_Atan
  193. | Scalar_Asinh
  194. | Scalar_Acosh
  195. | Scalar_Atanh
  196. | Scalar_Relu
  197. | Scalar_Dawsn
  198. | Scalar_Sigmoid
  199. | Fused_Adagrad of float * float
    (*

    TODO

    *)
\ No newline at end of file diff --git a/owl-base/Owl_computation_cpu_eval/Make/argument-1-Graph/Optimiser/Operator/Symbol/Shape/index.html b/owl-base/Owl_computation_cpu_eval/Make/argument-1-Graph/Optimiser/Operator/Symbol/Shape/index.html deleted file mode 100644 index 5531853f7..000000000 --- a/owl-base/Owl_computation_cpu_eval/Make/argument-1-Graph/Optimiser/Operator/Symbol/Shape/index.html +++ /dev/null @@ -1,5 +0,0 @@ - -Shape (owl-base.Owl_computation_cpu_eval.Make.Graph.Optimiser.Operator.Symbol.Shape)

Module Symbol.Shape

Core functions
val infer_shape : - Type.op -> - Type.attr Owl_graph.node array -> - int array option array

TODO

\ No newline at end of file diff --git a/owl-base/Owl_computation_cpu_eval/Make/argument-1-Graph/Optimiser/Operator/Symbol/index.html b/owl-base/Owl_computation_cpu_eval/Make/argument-1-Graph/Optimiser/Operator/Symbol/index.html deleted file mode 100644 index 3c0de9c6b..000000000 --- a/owl-base/Owl_computation_cpu_eval/Make/argument-1-Graph/Optimiser/Operator/Symbol/index.html +++ /dev/null @@ -1,28 +0,0 @@ - -Symbol (owl-base.Owl_computation_cpu_eval.Make.Graph.Optimiser.Operator.Symbol)

Module Operator.Symbol

Core functions
val op_to_str : Shape.Type.op -> string

TODO

val is_random_variable : Shape.Type.op -> bool

TODO

val refnum : 'a Owl_graph.node -> int

TODO

val node_shape : Shape.Type.attr Owl_graph.node -> int array

TODO

val node_numel : Shape.Type.attr Owl_graph.node -> int

TODO

val is_shape_unknown : Shape.Type.attr Owl_graph.node -> bool

TODO

val infer_shape_graph : Shape.Type.attr Owl_graph.node array -> unit

TODO

val shape_to_str : int array option array -> string

TODO

val node_to_str : Shape.Type.attr Owl_graph.node -> string

TODO

val node_to_arr : Shape.Type.t -> Shape.Type.arr

TODO

val arr_to_node : Shape.Type.arr -> Shape.Type.t

TODO

val node_to_elt : Shape.Type.t -> Shape.Type.elt

TODO

val elt_to_node : Shape.Type.elt -> Shape.Type.t

TODO

val make_node : - ?name:string -> - ?value:Shape.Type.Device.value array -> - ?shape:int array option array -> - ?freeze:bool -> - ?reuse:bool -> - ?state:Shape.Type.state -> - Shape.Type.op -> - Shape.Type.attr Owl_graph.node

TODO

val make_then_connect : - ?shape:int array option array -> - Shape.Type.op -> - Shape.Type.attr Owl_graph.node array -> - Shape.Type.attr Owl_graph.node

TODO

val var_arr : ?shape:int array -> string -> Shape.Type.arr

TODO

val var_elt : string -> Shape.Type.elt

TODO

val const_arr : string -> Shape.Type.Device.A.arr -> Shape.Type.arr

TODO

val const_elt : string -> Shape.Type.Device.A.elt -> Shape.Type.elt

TODO

val new_block_id : unit -> int

new_block_id () returns an unused block id.

val make_empty_block : ?block_id:int -> int -> Shape.Type.block

make_empty_block s returns an empty block of memory of size s.

val make_value_block : - Shape.Type.Device.value -> - Shape.Type.attr Owl_graph.node -> - unit

make_value_block value node creates a block of memory initialised with value and links the new block to node.

get_block node returns the memory block allocated to node. If no block is allocated, throws an exception.

val add_node_to_block : - Shape.Type.attr Owl_graph.node -> - Shape.Type.block -> - unit

Link a node to a reusable block and initialises its memory on the memory of the block.

val get_active_node : Shape.Type.block -> Shape.Type.attr Owl_graph.node option

Return the node that is currently using the memory of the block.

val set_active_node : - Shape.Type.block -> - Shape.Type.attr Owl_graph.node -> - unit

Update the node that is currently using the block of memory.

val get_block_id : Shape.Type.attr Owl_graph.node -> int

get_block_id node returns the id of the block assigned to node. If node has not been assigned yet, returns -1.

val set_value : - Shape.Type.attr Owl_graph.node -> - Shape.Type.Device.value array -> - unit

TODO

val set_operator : Shape.Type.attr Owl_graph.node -> Shape.Type.op -> unit

TODO

TODO

val set_reuse : Shape.Type.attr Owl_graph.node -> bool -> unit

TODO

val get_reuse : Shape.Type.attr Owl_graph.node -> bool

TODO

val is_shared : Shape.Type.attr Owl_graph.node -> bool

TODO

val get_shared_nodes : - Shape.Type.attr Owl_graph.node -> - Shape.Type.attr Owl_graph.node array

get_shared_nodes node returns the nodes sharing the same block of memory as node.

val is_var : Shape.Type.attr Owl_graph.node -> bool

TODO

val is_const : Shape.Type.attr Owl_graph.node -> bool

TODO

val is_node_arr : Shape.Type.attr Owl_graph.node -> bool

TODO

val is_node_elt : Shape.Type.attr Owl_graph.node -> bool

TODO

val is_assigned : Shape.Type.attr Owl_graph.node -> bool

is_assigned node checks if a block of memory has been assigned to node.

val check_assigned : Shape.Type.attr Owl_graph.node -> unit

check_assigned node throws an exception if node has not been assigned to a block.

val is_valid : Shape.Type.attr Owl_graph.node -> bool

TODO

val validate : Shape.Type.attr Owl_graph.node -> unit

TODO

val invalidate : Shape.Type.attr Owl_graph.node -> unit

TODO

val invalidate_graph : Shape.Type.attr Owl_graph.node -> unit

TODO

val is_freeze : Shape.Type.attr Owl_graph.node -> bool

TODO

val freeze : Shape.Type.attr Owl_graph.node -> unit

TODO

val freeze_descendants : Shape.Type.attr Owl_graph.node array -> unit

TODO

val freeze_ancestors : Shape.Type.attr Owl_graph.node array -> unit

TODO

TODO

TODO

val unsafe_assign_arr : Shape.Type.arr -> Shape.Type.Device.A.arr -> unit

TODO

val assign_arr : Shape.Type.arr -> Shape.Type.Device.A.arr -> unit

TODO

val assign_elt : Shape.Type.elt -> Shape.Type.Device.A.elt -> unit

TODO

val float_to_elt : float -> Shape.Type.elt

TODO

val elt_to_float : Shape.Type.elt -> float

TODO

\ No newline at end of file diff --git a/owl-base/Owl_computation_cpu_eval/Make/argument-1-Graph/Optimiser/Operator/index.html b/owl-base/Owl_computation_cpu_eval/Make/argument-1-Graph/Optimiser/Operator/index.html deleted file mode 100644 index d633e06dd..000000000 --- a/owl-base/Owl_computation_cpu_eval/Make/argument-1-Graph/Optimiser/Operator/index.html +++ /dev/null @@ -1,420 +0,0 @@ - -Operator (owl-base.Owl_computation_cpu_eval.Make.Graph.Optimiser.Operator)

Module Optimiser.Operator

Vectorised functions
val empty : int array -> Symbol.Shape.Type.arr

TODO

val zeros : int array -> Symbol.Shape.Type.arr

TODO

val ones : int array -> Symbol.Shape.Type.arr

TODO

val create : int array -> Symbol.Shape.Type.elt -> Symbol.Shape.Type.arr

TODO

val sequential : - ?a:Symbol.Shape.Type.elt -> - ?step:Symbol.Shape.Type.elt -> - int array -> - Symbol.Shape.Type.arr

TODO

val uniform : - ?a:Symbol.Shape.Type.elt -> - ?b:Symbol.Shape.Type.elt -> - int array -> - Symbol.Shape.Type.arr

TODO

val gaussian : - ?mu:Symbol.Shape.Type.elt -> - ?sigma:Symbol.Shape.Type.elt -> - int array -> - Symbol.Shape.Type.arr

TODO

val bernoulli : ?p:Symbol.Shape.Type.elt -> int array -> Symbol.Shape.Type.arr

TODO

val init : int array -> (int -> Symbol.Shape.Type.elt) -> Symbol.Shape.Type.arr

TODO

val init_nd : - int array -> - (int array -> Symbol.Shape.Type.elt) -> - Symbol.Shape.Type.arr

TODO

val shape : Symbol.Shape.Type.arr -> int array

TODO

val numel : Symbol.Shape.Type.arr -> int

TODO

TODO

val set : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.elt -> unit

TODO

val get_slice : int list list -> Symbol.Shape.Type.arr -> Symbol.Shape.Type.arr

TODO

val set_slice : - int list list -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - unit

TODO

TODO

val set_fancy : - Owl_types.index list -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - unit

TODO

val copy_ : out:'a -> 'b -> 'c

TODO

val reset : Symbol.Shape.Type.arr -> unit

TODO

val reshape : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.arr

TODO

val tile : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.arr

TODO

val repeat : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.arr

TODO

val pad : - ?v:Symbol.Shape.Type.elt -> - int list list -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val expand : ?hi:bool -> Symbol.Shape.Type.arr -> int -> Symbol.Shape.Type.arr

TODO

val squeeze : ?axis:int array -> Symbol.Shape.Type.arr -> Symbol.Shape.Type.arr

TODO

val concatenate : - ?axis:int -> - Symbol.Shape.Type.arr array -> - Symbol.Shape.Type.arr

TODO

val stack : ?axis:int -> Symbol.Shape.Type.arr array -> Symbol.Shape.Type.arr

TODO

TODO

val split : ?axis:int -> 'a -> 'b -> 'c

TODO

val draw : - ?axis:int -> - Symbol.Shape.Type.arr -> - int -> - Symbol.Shape.Type.arr * 'a array

TODO

TODO

delay f x returns f x. It allows to use a function that is not tracked by the computation graph and delay its evaluation. The output must have the same shape as the input.

delay_array out_shape f x works in the same way as delay but is applied on an array of ndarrays. Needs the shape of the output as an argument.

val lazy_print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(Symbol.Shape.Type.Device.A.elt -> string) -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

lazy_print x prints the output of x when it is evaluated. Is implemented as an identity node. For information about the optional parameters, refer to the print function of the Ndarray module.

val print : ?max_row:'a -> ?max_col:'b -> ?header:'c -> ?fmt:'d -> 'e -> unit

TODO

val min : - ?axis:int -> - ?keep_dims:bool -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val max : - ?axis:int -> - ?keep_dims:bool -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val sum : - ?axis:int -> - ?keep_dims:bool -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val sum_reduce : - ?axis:int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val log_sum_exp : - ?axis:int -> - ?keep_dims:bool -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

TODO

TODO

TODO

TODO

val elt_less_equal_scalar : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.elt -> - Symbol.Shape.Type.arr

TODO

val elt_greater_equal_scalar : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.elt -> - Symbol.Shape.Type.arr

TODO

val conv1d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr

TODO

val conv2d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr

TODO

val conv3d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr

TODO

val transpose_conv1d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr

TODO

val transpose_conv2d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr

TODO

val transpose_conv3d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr

TODO

val dilated_conv1d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val dilated_conv2d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val dilated_conv3d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val max_pool1d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val max_pool2d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val max_pool3d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val avg_pool1d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val avg_pool2d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val avg_pool3d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val upsampling2d : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.arr

TODO

val conv1d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val conv1d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val conv2d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val conv2d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val conv3d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val conv3d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val transpose_conv1d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val transpose_conv1d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val transpose_conv2d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val transpose_conv2d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val transpose_conv3d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val transpose_conv3d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val dilated_conv1d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val dilated_conv1d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val dilated_conv2d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val dilated_conv2d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val dilated_conv3d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val dilated_conv3d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val max_pool1d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val max_pool2d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val max_pool3d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val avg_pool1d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val avg_pool2d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val avg_pool3d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val upsampling2d_backward : - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val row_num : Symbol.Shape.Type.arr -> int

TODO

val col_num : Symbol.Shape.Type.arr -> int

TODO

val rows : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.arr

TODO

val copy_row_to : Symbol.Shape.Type.arr -> 'a -> 'b -> unit

TODO

val copy_col_to : Symbol.Shape.Type.arr -> 'a -> 'b -> unit

TODO

TODO

val transpose : - ?axis:int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val to_rows : Symbol.Shape.Type.arr -> 'a array

TODO

TODO

val to_cols : Symbol.Shape.Type.arr -> 'a array

TODO

TODO

val of_array : - Symbol.Shape.Type.elt array -> - int array -> - Symbol.Shape.Type.arr

TODO

val of_arrays : Symbol.Shape.Type.elt array array -> Symbol.Shape.Type.arr

TODO

val to_arrays : Symbol.Shape.Type.arr -> Symbol.Shape.Type.elt array array

TODO

Scalar functions
module Scalar : sig ... end
module Mat : sig ... end
module Linalg : sig ... end
\ No newline at end of file diff --git a/owl-base/Owl_computation_cpu_eval/Make/argument-1-Graph/Optimiser/index.html b/owl-base/Owl_computation_cpu_eval/Make/argument-1-Graph/Optimiser/index.html deleted file mode 100644 index 71ed125c5..000000000 --- a/owl-base/Owl_computation_cpu_eval/Make/argument-1-Graph/Optimiser/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Optimiser (owl-base.Owl_computation_cpu_eval.Make.Graph.Optimiser)

Module Graph.Optimiser

Core functions
val estimate_complexity : 'a Owl_graph.node array -> int * int

TODO

val optimise_nodes : - Operator.Symbol.Shape.Type.attr Owl_graph.node array -> - unit

TODO

\ No newline at end of file diff --git a/owl-base/Owl_computation_cpu_eval/Make/argument-1-Graph/index.html b/owl-base/Owl_computation_cpu_eval/Make/argument-1-Graph/index.html deleted file mode 100644 index 308d3a0db..000000000 --- a/owl-base/Owl_computation_cpu_eval/Make/argument-1-Graph/index.html +++ /dev/null @@ -1,33 +0,0 @@ - -Graph (owl-base.Owl_computation_cpu_eval.Make.Graph)

Parameter Make.Graph

Type definition
type graph

TODO

Core functions
val shape_or_value : Optimiser.Operator.Symbol.Shape.Type.t -> string

TODO

val graph_to_dot : graph -> string

TODO

val graph_to_trace : graph -> string

TODO

val save_graph : 'a -> string -> unit

TODO

val load_graph : string -> 'a * 'b

TODO

val invalidate_rvs : graph -> unit

TODO

val is_iopair_safe : 'a Owl_graph.node -> 'a Owl_graph.node -> bool

TODO

val update_iopair : graph -> unit

TODO

val remove_unused_iopair : - 'a Owl_graph.node array -> - 'b array -> - 'a Owl_graph.node array * 'b array

TODO

val optimise : graph -> unit

TODO

\ No newline at end of file diff --git a/owl-base/Owl_computation_cpu_eval/Make/index.html b/owl-base/Owl_computation_cpu_eval/Make/index.html deleted file mode 100644 index 8fecfb306..000000000 --- a/owl-base/Owl_computation_cpu_eval/Make/index.html +++ /dev/null @@ -1,57 +0,0 @@ - -Make (owl-base.Owl_computation_cpu_eval.Make)

Module Owl_computation_cpu_eval.Make

Parameters

Signature

val invalidate_opt : - Graph.Optimiser.Operator.Symbol.Shape.Type.attr Owl_graph.node option -> - unit
\ No newline at end of file diff --git a/owl-base/Owl_computation_cpu_init/.dummy b/owl-base/Owl_computation_cpu_init/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl-base/Owl_computation_cpu_init/Make/MultiMap/index.html b/owl-base/Owl_computation_cpu_init/Make/MultiMap/index.html deleted file mode 100644 index 70d70eb79..000000000 --- a/owl-base/Owl_computation_cpu_init/Make/MultiMap/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -MultiMap (owl-base.Owl_computation_cpu_init.Make.MultiMap)

Module Make.MultiMap

type key = int
type 'a t
val empty : 'a t
val is_empty : 'a t -> bool
val mem : key -> 'a t -> bool
val add : key -> 'a -> 'a t -> 'a t
val remove : key -> 'a t -> 'a t
val find : key -> 'a t -> 'a
val max_binding : 'a t -> key * 'a
val find_first_opt : (key -> bool) -> 'a t -> (key * 'a) option
\ No newline at end of file diff --git a/owl-base/Owl_computation_cpu_init/Make/argument-1-Graph/Optimiser/Operator/Linalg/index.html b/owl-base/Owl_computation_cpu_init/Make/argument-1-Graph/Optimiser/Operator/Linalg/index.html deleted file mode 100644 index 683e6c2e3..000000000 --- a/owl-base/Owl_computation_cpu_init/Make/argument-1-Graph/Optimiser/Operator/Linalg/index.html +++ /dev/null @@ -1,33 +0,0 @@ - -Linalg (owl-base.Owl_computation_cpu_init.Make.Graph.Optimiser.Operator.Linalg)

Module Operator.Linalg

\ No newline at end of file diff --git a/owl-base/Owl_computation_cpu_init/Make/argument-1-Graph/Optimiser/Operator/Mat/index.html b/owl-base/Owl_computation_cpu_init/Make/argument-1-Graph/Optimiser/Operator/Mat/index.html deleted file mode 100644 index 22213f4fa..000000000 --- a/owl-base/Owl_computation_cpu_init/Make/argument-1-Graph/Optimiser/Operator/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl-base.Owl_computation_cpu_init.Make.Graph.Optimiser.Operator.Mat)

Module Operator.Mat

val eye : int -> Symbol.Shape.Type.arr

TODO

TODO

TODO

TODO

\ No newline at end of file diff --git a/owl-base/Owl_computation_cpu_init/Make/argument-1-Graph/Optimiser/Operator/Scalar/index.html b/owl-base/Owl_computation_cpu_init/Make/argument-1-Graph/Optimiser/Operator/Scalar/index.html deleted file mode 100644 index 741443727..000000000 --- a/owl-base/Owl_computation_cpu_init/Make/argument-1-Graph/Optimiser/Operator/Scalar/index.html +++ /dev/null @@ -1,20 +0,0 @@ - -Scalar (owl-base.Owl_computation_cpu_init.Make.Graph.Optimiser.Operator.Scalar)

Module Operator.Scalar

\ No newline at end of file diff --git a/owl-base/Owl_computation_cpu_init/Make/argument-1-Graph/Optimiser/Operator/Symbol/Shape/Type/Device/A/Linalg/index.html b/owl-base/Owl_computation_cpu_init/Make/argument-1-Graph/Optimiser/Operator/Symbol/Shape/Type/Device/A/Linalg/index.html deleted file mode 100644 index d0bb6d434..000000000 --- a/owl-base/Owl_computation_cpu_init/Make/argument-1-Graph/Optimiser/Operator/Symbol/Shape/Type/Device/A/Linalg/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -Linalg (owl-base.Owl_computation_cpu_init.Make.Graph.Optimiser.Operator.Symbol.Shape.Type.Device.A.Linalg)

Module A.Linalg

val inv : arr -> arr
val logdet : arr -> elt
val chol : ?upper:bool -> arr -> arr
val svd : ?thin:bool -> arr -> arr * arr * arr
val qr : arr -> arr * arr
val lq : arr -> arr * arr
val sylvester : arr -> arr -> arr -> arr
val lyapunov : arr -> arr -> arr
val discrete_lyapunov : - ?solver:[ `default | `bilinear | `direct ] -> - arr -> - arr -> - arr
val linsolve : ?trans:bool -> ?typ:[ `n | `u | `l ] -> arr -> arr -> arr
val care : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
val dare : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
\ No newline at end of file diff --git a/owl-base/Owl_computation_cpu_init/Make/argument-1-Graph/Optimiser/Operator/Symbol/Shape/Type/Device/A/Mat/index.html b/owl-base/Owl_computation_cpu_init/Make/argument-1-Graph/Optimiser/Operator/Symbol/Shape/Type/Device/A/Mat/index.html deleted file mode 100644 index 876ab5a5d..000000000 --- a/owl-base/Owl_computation_cpu_init/Make/argument-1-Graph/Optimiser/Operator/Symbol/Shape/Type/Device/A/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl-base.Owl_computation_cpu_init.Make.Graph.Optimiser.Operator.Symbol.Shape.Type.Device.A.Mat)

Module A.Mat

val diagm : ?k:int -> arr -> arr
val triu : ?k:int -> arr -> arr
val tril : ?k:int -> arr -> arr
val eye : int -> arr
\ No newline at end of file diff --git a/owl-base/Owl_computation_cpu_init/Make/argument-1-Graph/Optimiser/Operator/Symbol/Shape/Type/Device/A/Scalar/index.html b/owl-base/Owl_computation_cpu_init/Make/argument-1-Graph/Optimiser/Operator/Symbol/Shape/Type/Device/A/Scalar/index.html deleted file mode 100644 index 3f59de1ca..000000000 --- a/owl-base/Owl_computation_cpu_init/Make/argument-1-Graph/Optimiser/Operator/Symbol/Shape/Type/Device/A/Scalar/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Scalar (owl-base.Owl_computation_cpu_init.Make.Graph.Optimiser.Operator.Symbol.Shape.Type.Device.A.Scalar)

Module A.Scalar

val add : elt -> elt -> elt
val sub : elt -> elt -> elt
val mul : elt -> elt -> elt
val div : elt -> elt -> elt
val pow : elt -> elt -> elt
val atan2 : elt -> elt -> elt
val abs : elt -> elt
val neg : elt -> elt
val sqr : elt -> elt
val sqrt : elt -> elt
val exp : elt -> elt
val log : elt -> elt
val log2 : elt -> elt
val log10 : elt -> elt
val signum : elt -> elt
val floor : elt -> elt
val ceil : elt -> elt
val round : elt -> elt
val sin : elt -> elt
val cos : elt -> elt
val tan : elt -> elt
val sinh : elt -> elt
val cosh : elt -> elt
val tanh : elt -> elt
val asin : elt -> elt
val acos : elt -> elt
val atan : elt -> elt
val asinh : elt -> elt
val acosh : elt -> elt
val atanh : elt -> elt
val relu : elt -> elt
val dawsn : elt -> elt
val sigmoid : elt -> elt
\ No newline at end of file diff --git a/owl-base/Owl_computation_cpu_init/Make/argument-1-Graph/Optimiser/Operator/Symbol/Shape/Type/Device/A/index.html b/owl-base/Owl_computation_cpu_init/Make/argument-1-Graph/Optimiser/Operator/Symbol/Shape/Type/Device/A/index.html deleted file mode 100644 index 9286e9f43..000000000 --- a/owl-base/Owl_computation_cpu_init/Make/argument-1-Graph/Optimiser/Operator/Symbol/Shape/Type/Device/A/index.html +++ /dev/null @@ -1,379 +0,0 @@ - -A (owl-base.Owl_computation_cpu_init.Make.Graph.Optimiser.Operator.Symbol.Shape.Type.Device.A)

Module Device.A

include Owl_types_ndarray_algodiff.Sig
include Owl_types_ndarray_eltcmp.Sig
include Owl_types_ndarray_basic.Sig
type arr
type elt
val empty : int array -> arr
val zeros : int array -> arr
val ones : int array -> arr
val create : int array -> elt -> arr
val sequential : ?a:elt -> ?step:elt -> int array -> arr
val uniform : ?a:elt -> ?b:elt -> int array -> arr
val gaussian : ?mu:elt -> ?sigma:elt -> int array -> arr
val bernoulli : ?p:elt -> int array -> arr
val init : int array -> (int -> elt) -> arr
val init_nd : int array -> (int array -> elt) -> arr
val shape : arr -> int array
val numel : arr -> int
val get : arr -> int array -> elt
val set : arr -> int array -> elt -> unit
val get_slice : int list list -> arr -> arr
val set_slice : int list list -> arr -> arr -> unit
val get_fancy : Owl_types_common.index list -> arr -> arr
val set_fancy : Owl_types_common.index list -> arr -> arr -> unit
val copy : arr -> arr
val copy_ : out:arr -> arr -> unit
val reset : arr -> unit
val reshape : arr -> int array -> arr
val reverse : arr -> arr
val tile : arr -> int array -> arr
val repeat : arr -> int array -> arr
val concatenate : ?axis:int -> arr array -> arr
val stack : ?axis:int -> arr array -> arr
val split : ?axis:int -> int array -> arr -> arr array
val expand : ?hi:bool -> arr -> int -> arr
val squeeze : ?axis:int array -> arr -> arr
val draw : ?axis:int -> arr -> int -> arr * int array
val map : (elt -> elt) -> arr -> arr
val fold : ?axis:int -> (elt -> elt -> elt) -> elt -> arr -> arr
val scan : ?axis:int -> (elt -> elt -> elt) -> arr -> arr
val one_hot : int -> arr -> arr
val pad : ?v:elt -> int list list -> arr -> arr
val print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(elt -> string) -> - arr -> - unit
val abs : arr -> arr
val neg : arr -> arr
val floor : arr -> arr
val ceil : arr -> arr
val round : arr -> arr
val sqr : arr -> arr
val sqrt : arr -> arr
val log : arr -> arr
val log2 : arr -> arr
val log10 : arr -> arr
val exp : arr -> arr
val sin : arr -> arr
val cos : arr -> arr
val tan : arr -> arr
val sinh : arr -> arr
val cosh : arr -> arr
val tanh : arr -> arr
val asin : arr -> arr
val acos : arr -> arr
val atan : arr -> arr
val asinh : arr -> arr
val acosh : arr -> arr
val atanh : arr -> arr
val sum_reduce : ?axis:int array -> arr -> arr
val signum : arr -> arr
val sigmoid : arr -> arr
val relu : arr -> arr
val dawsn : arr -> arr
val min' : arr -> elt
val max' : arr -> elt
val sum' : arr -> elt
val log_sum_exp' : arr -> elt
val log_sum_exp : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm' : arr -> elt
val l2norm' : arr -> elt
val l2norm_sqr' : arr -> elt
val clip_by_value : ?amin:elt -> ?amax:elt -> arr -> arr
val clip_by_l2norm : elt -> arr -> arr
val pow : arr -> arr -> arr
val scalar_pow : elt -> arr -> arr
val pow_scalar : arr -> elt -> arr
val atan2 : arr -> arr -> arr
val scalar_atan2 : elt -> arr -> arr
val atan2_scalar : arr -> elt -> arr
val add : arr -> arr -> arr
val sub : arr -> arr -> arr
val mul : arr -> arr -> arr
val div : arr -> arr -> arr
val add_scalar : arr -> elt -> arr
val sub_scalar : arr -> elt -> arr
val mul_scalar : arr -> elt -> arr
val div_scalar : arr -> elt -> arr
val scalar_add : elt -> arr -> arr
val scalar_sub : elt -> arr -> arr
val scalar_mul : elt -> arr -> arr
val scalar_div : elt -> arr -> arr
val fma : arr -> arr -> arr -> arr
val conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val dilated_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val transpose_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val max_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val upsampling2d : arr -> int array -> arr
val conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val dilated_conv1d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv1d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val transpose_conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val max_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val upsampling2d_backward : arr -> int array -> arr -> arr
val row_num : arr -> int
val col_num : arr -> int
val row : arr -> int -> arr
val rows : arr -> int array -> arr
val copy_row_to : arr -> arr -> int -> unit
val copy_col_to : arr -> arr -> int -> unit
val dot : arr -> arr -> arr
val diag : ?k:int -> arr -> arr
val trace : arr -> elt
val transpose : ?axis:int array -> arr -> arr
val to_rows : arr -> arr array
val of_rows : arr array -> arr
val to_cols : arr -> arr array
val of_cols : arr array -> arr
val of_array : elt array -> int array -> arr
val of_arrays : elt array array -> arr
val float_to_elt : float -> elt
val elt_to_float : elt -> float
val elt_equal : arr -> arr -> arr
val elt_not_equal : arr -> arr -> arr
val elt_less : arr -> arr -> arr
val elt_greater : arr -> arr -> arr
val elt_less_equal : arr -> arr -> arr
val elt_greater_equal : arr -> arr -> arr
val elt_equal_scalar : arr -> elt -> arr
val elt_not_equal_scalar : arr -> elt -> arr
val elt_less_scalar : arr -> elt -> arr
val elt_greater_scalar : arr -> elt -> arr
val elt_less_equal_scalar : arr -> elt -> arr
val elt_greater_equal_scalar : arr -> elt -> arr
module Scalar : sig ... end
module Mat : sig ... end
module Linalg : sig ... end
val create_ : out:arr -> elt -> unit
val uniform_ : ?a:elt -> ?b:elt -> out:arr -> unit
val gaussian_ : ?mu:elt -> ?sigma:elt -> out:arr -> unit
val sequential_ : ?a:elt -> ?step:elt -> out:arr -> unit
val bernoulli_ : ?p:elt -> out:arr -> unit
val zeros_ : out:arr -> unit
val ones_ : out:arr -> unit
val one_hot_ : out:arr -> int -> arr -> unit
val get_slice_ : out:arr -> int list list -> arr -> unit
val set_slice_ : out:arr -> int list list -> arr -> arr -> unit
val sub_left : arr -> int -> int -> arr
val reshape_ : out:arr -> arr -> unit
val reverse_ : out:arr -> arr -> unit
val transpose_ : out:arr -> ?axis:int array -> arr -> unit
val repeat_ : out:arr -> arr -> int array -> unit
val tile_ : out:arr -> arr -> int array -> unit
val pad_ : out:arr -> ?v:elt -> int list list -> arr -> unit
val hypot : arr -> arr -> arr
val fmod : arr -> arr -> arr
val min2 : arr -> arr -> arr
val max2 : arr -> arr -> arr
val add_ : ?out:arr -> arr -> arr -> unit
val sub_ : ?out:arr -> arr -> arr -> unit
val mul_ : ?out:arr -> arr -> arr -> unit
val div_ : ?out:arr -> arr -> arr -> unit
val pow_ : ?out:arr -> arr -> arr -> unit
val atan2_ : ?out:arr -> arr -> arr -> unit
val hypot_ : ?out:arr -> arr -> arr -> unit
val fmod_ : ?out:arr -> arr -> arr -> unit
val min2_ : ?out:arr -> arr -> arr -> unit
val max2_ : ?out:arr -> arr -> arr -> unit
val add_scalar_ : ?out:arr -> arr -> elt -> unit
val sub_scalar_ : ?out:arr -> arr -> elt -> unit
val mul_scalar_ : ?out:arr -> arr -> elt -> unit
val div_scalar_ : ?out:arr -> arr -> elt -> unit
val pow_scalar_ : ?out:arr -> arr -> elt -> unit
val atan2_scalar_ : ?out:arr -> arr -> elt -> unit
val fmod_scalar_ : ?out:arr -> arr -> elt -> unit
val scalar_add_ : ?out:arr -> elt -> arr -> unit
val scalar_sub_ : ?out:arr -> elt -> arr -> unit
val scalar_mul_ : ?out:arr -> elt -> arr -> unit
val scalar_div_ : ?out:arr -> elt -> arr -> unit
val scalar_pow_ : ?out:arr -> elt -> arr -> unit
val scalar_atan2_ : ?out:arr -> elt -> arr -> unit
val scalar_fmod_ : ?out:arr -> elt -> arr -> unit
val fma_ : ?out:arr -> arr -> arr -> arr -> unit
val clip_by_value_ : ?out:arr -> ?amin:elt -> ?amax:elt -> arr -> unit
val clip_by_l2norm_ : ?out:arr -> elt -> arr -> unit
val dot_ : - ?transa:bool -> - ?transb:bool -> - ?alpha:elt -> - ?beta:elt -> - c:arr -> - arr -> - arr -> - unit
val abs_ : ?out:arr -> arr -> unit
val neg_ : ?out:arr -> arr -> unit
val conj_ : ?out:arr -> arr -> unit
val reci_ : ?out:arr -> arr -> unit
val signum_ : ?out:arr -> arr -> unit
val sqr_ : ?out:arr -> arr -> unit
val sqrt_ : ?out:arr -> arr -> unit
val cbrt_ : ?out:arr -> arr -> unit
val exp_ : ?out:arr -> arr -> unit
val exp2_ : ?out:arr -> arr -> unit
val exp10_ : ?out:arr -> arr -> unit
val expm1_ : ?out:arr -> arr -> unit
val log_ : ?out:arr -> arr -> unit
val log2_ : ?out:arr -> arr -> unit
val log10_ : ?out:arr -> arr -> unit
val log1p_ : ?out:arr -> arr -> unit
val sin_ : ?out:arr -> arr -> unit
val cos_ : ?out:arr -> arr -> unit
val tan_ : ?out:arr -> arr -> unit
val asin_ : ?out:arr -> arr -> unit
val acos_ : ?out:arr -> arr -> unit
val atan_ : ?out:arr -> arr -> unit
val sinh_ : ?out:arr -> arr -> unit
val cosh_ : ?out:arr -> arr -> unit
val tanh_ : ?out:arr -> arr -> unit
val asinh_ : ?out:arr -> arr -> unit
val acosh_ : ?out:arr -> arr -> unit
val atanh_ : ?out:arr -> arr -> unit
val floor_ : ?out:arr -> arr -> unit
val ceil_ : ?out:arr -> arr -> unit
val round_ : ?out:arr -> arr -> unit
val trunc_ : ?out:arr -> arr -> unit
val fix_ : ?out:arr -> arr -> unit
val erf_ : ?out:arr -> arr -> unit
val erfc_ : ?out:arr -> arr -> unit
val relu_ : ?out:arr -> arr -> unit
val softplus_ : ?out:arr -> arr -> unit
val softsign_ : ?out:arr -> arr -> unit
val softmax_ : ?out:arr -> ?axis:int -> arr -> unit
val sigmoid_ : ?out:arr -> arr -> unit
val sum_ : out:arr -> axis:int -> arr -> unit
val min_ : out:arr -> axis:int -> arr -> unit
val max_ : out:arr -> axis:int -> arr -> unit
val sum : ?axis:int -> ?keep_dims:bool -> arr -> arr
val prod : ?axis:int -> ?keep_dims:bool -> arr -> arr
val min : ?axis:int -> ?keep_dims:bool -> arr -> arr
val max : ?axis:int -> ?keep_dims:bool -> arr -> arr
val mean : ?axis:int -> ?keep_dims:bool -> arr -> arr
val var : ?axis:int -> ?keep_dims:bool -> arr -> arr
val std : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l2norm : ?axis:int -> ?keep_dims:bool -> arr -> arr
val cumsum_ : ?out:arr -> ?axis:int -> arr -> unit
val cumprod_ : ?out:arr -> ?axis:int -> arr -> unit
val cummin_ : ?out:arr -> ?axis:int -> arr -> unit
val cummax_ : ?out:arr -> ?axis:int -> arr -> unit
val dropout_ : ?out:arr -> ?rate:float -> arr -> unit
val prod' : arr -> elt
val mean' : arr -> elt
val var' : arr -> elt
val std' : arr -> elt
val elt_equal_ : ?out:arr -> arr -> arr -> unit
val elt_not_equal_ : ?out:arr -> arr -> arr -> unit
val elt_less_ : ?out:arr -> arr -> arr -> unit
val elt_greater_ : ?out:arr -> arr -> arr -> unit
val elt_less_equal_ : ?out:arr -> arr -> arr -> unit
val elt_greater_equal_ : ?out:arr -> arr -> arr -> unit
val elt_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_not_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_less_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_greater_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_less_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_greater_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val conv1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val conv2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val conv3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val dilated_conv1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val dilated_conv2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val dilated_conv3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val transpose_conv1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val transpose_conv2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val transpose_conv3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val max_pool1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val max_pool2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val max_pool3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val upsampling2d_ : out:arr -> arr -> int array -> unit
val conv1d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv1d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv2d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv2d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv3d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv3d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val dilated_conv1d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv1d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv2d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv2d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv3d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv3d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val transpose_conv1d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv1d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv2d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv2d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv3d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv3d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val max_pool1d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val max_pool2d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val max_pool3d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool1d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool2d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool3d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val upsampling2d_backward_ : out:arr -> arr -> int array -> arr -> unit
val fused_adagrad_ : ?out:arr -> rate:float -> eps:float -> arr -> unit
\ No newline at end of file diff --git a/owl-base/Owl_computation_cpu_init/Make/argument-1-Graph/Optimiser/Operator/Symbol/Shape/Type/Device/index.html b/owl-base/Owl_computation_cpu_init/Make/argument-1-Graph/Optimiser/Operator/Symbol/Shape/Type/Device/index.html deleted file mode 100644 index 1dd421fd7..000000000 --- a/owl-base/Owl_computation_cpu_init/Make/argument-1-Graph/Optimiser/Operator/Symbol/Shape/Type/Device/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Device (owl-base.Owl_computation_cpu_init.Make.Graph.Optimiser.Operator.Symbol.Shape.Type.Device)

Module Type.Device

Type definition
type device

TODO

type value

TODO

Core functions
val make_device : unit -> device

TODO

val arr_to_value : A.arr -> value

TODO

val value_to_arr : value -> A.arr

TODO

val elt_to_value : A.elt -> value

TODO

val value_to_elt : value -> A.elt

TODO

val value_to_float : value -> float

TODO

val is_arr : value -> bool

TODO

val is_elt : value -> bool

TODO

\ No newline at end of file diff --git a/owl-base/Owl_computation_cpu_init/Make/argument-1-Graph/Optimiser/Operator/Symbol/Shape/Type/index.html b/owl-base/Owl_computation_cpu_init/Make/argument-1-Graph/Optimiser/Operator/Symbol/Shape/Type/index.html deleted file mode 100644 index c208433ed..000000000 --- a/owl-base/Owl_computation_cpu_init/Make/argument-1-Graph/Optimiser/Operator/Symbol/Shape/Type/index.html +++ /dev/null @@ -1,5 +0,0 @@ - -Type (owl-base.Owl_computation_cpu_init.Make.Graph.Optimiser.Operator.Symbol.Shape.Type)

Module Shape.Type

Type definition
type state =
  1. | Valid
  2. | Invalid
    (*

    TODO

    *)

TODO

and block = {
  1. size : int;
  2. block_id : int;
  3. mutable active : t option;
  4. mutable memory : Device.value;
  5. mutable nodes : t list;
}

block type keeps a reference to a block of memory and to the nodes sharing that block.

and attr = {
  1. mutable op : op;
  2. mutable freeze : bool;
  3. mutable reuse : bool;
  4. mutable state : state;
  5. mutable shape : int array option array;
  6. mutable value : Device.value array;
  7. mutable block : block array option;
}

TODO

and arr =
  1. | Arr of t
and elt =
  1. | Elt of t
and op =
  1. | Noop
  2. | Var
  3. | Const
  4. | Empty of int array
  5. | Zeros of int array
  6. | Ones of int array
  7. | Create of int array
  8. | Sequential of int array
  9. | Uniform of int array
  10. | Gaussian of int array
  11. | Bernoulli of int array
  12. | Init of int array * int -> elt
  13. | Get of int array
  14. | Set of int array
  15. | GetSlice of int list list
  16. | SetSlice of int list list
  17. | GetFancy of Owl_types_common.index list
  18. | SetFancy of Owl_types_common.index list
  19. | Copy
  20. | Reset
  21. | Reshape of int array
  22. | Reverse
  23. | Tile of int array
  24. | Repeat of int array
  25. | Pad of elt * int list list
  26. | Concatenate of int
  27. | Stack of int
  28. | Split of int * int array
  29. | Draw of int * int
  30. | Map of elt -> elt
  31. | Fold of int * elt -> elt -> elt
  32. | Scan of int * elt -> elt -> elt
  33. | OneHot of int
  34. | OfArray of int array
  35. | Delay of Device.A.arr -> Device.A.arr
  36. | DelayArray of int array * Device.A.arr array -> Device.A.arr
  37. | LazyPrint of int option - * int option - * bool option - * (Device.A.elt -> string) option
  38. | Abs
  39. | Neg
  40. | Floor
  41. | Ceil
  42. | Round
  43. | Sqr
  44. | Sqrt
  45. | Log
  46. | Log2
  47. | Log10
  48. | Exp
  49. | Sin
  50. | Cos
  51. | Tan
  52. | Sinh
  53. | Cosh
  54. | Tanh
  55. | Asin
  56. | Acos
  57. | Atan
  58. | Asinh
  59. | Acosh
  60. | Atanh
  61. | Min of bool * int
  62. | Max of bool * int
  63. | Sum of bool * int
  64. | SumReduce of int array
  65. | Signum
  66. | Sigmoid
  67. | Relu
  68. | Dawsn
  69. | Min'
  70. | Max'
  71. | Sum'
  72. | LogSumExp'
  73. | LogSumExp of bool * int
  74. | L1norm'
  75. | L2norm'
  76. | L2NormSqr'
  77. | ClipByValue
  78. | ClipByL2norm
  79. | Pow
  80. | ScalarPow
  81. | PowScalar
  82. | Atan2
  83. | ScalarAtan2
  84. | Atan2Scalar
  85. | Hypot
  86. | Min2
  87. | Max2
  88. | Add
  89. | Sub
  90. | Mul
  91. | Div
  92. | AddScalar
  93. | SubScalar
  94. | MulScalar
  95. | DivScalar
  96. | ScalarAdd
  97. | ScalarSub
  98. | ScalarMul
  99. | ScalarDiv
  100. | FMA
  101. | EltEqual
  102. | EltNotEqual
  103. | EltLess
  104. | EltGreater
  105. | EltLessEqual
  106. | EltGreaterEqual
  107. | EltEqualScalar
  108. | EltNotEqualScalar
  109. | EltLessScalar
  110. | EltGreaterScalar
  111. | EltLessEqualScalar
  112. | EltGreaterEqualScalar
  113. | Conv1d of Owl_types_common.padding * int array
  114. | Conv2d of Owl_types_common.padding * int array
  115. | Conv3d of Owl_types_common.padding * int array
  116. | TransposeConv1d of Owl_types_common.padding * int array
  117. | TransposeConv2d of Owl_types_common.padding * int array
  118. | TransposeConv3d of Owl_types_common.padding * int array
  119. | DilatedConv1d of Owl_types_common.padding * int array * int array
  120. | DilatedConv2d of Owl_types_common.padding * int array * int array
  121. | DilatedConv3d of Owl_types_common.padding * int array * int array
  122. | MaxPool1d of Owl_types_common.padding * int array * int array
  123. | MaxPool2d of Owl_types_common.padding * int array * int array
  124. | MaxPool3d of Owl_types_common.padding * int array * int array
  125. | AvgPool1d of Owl_types_common.padding * int array * int array
  126. | AvgPool2d of Owl_types_common.padding * int array * int array
  127. | AvgPool3d of Owl_types_common.padding * int array * int array
  128. | UpSampling2d of int array
  129. | Conv1dBackwardInput of int array
  130. | Conv1dBackwardKernel of int array
  131. | Conv2dBackwardInput of int array
  132. | Conv2dBackwardKernel of int array
  133. | Conv3dBackwardInput of int array
  134. | Conv3dBackwardKernel of int array
  135. | TransposeConv1dBackwardInput of int array
  136. | TransposeConv1dBackwardKernel of int array
  137. | TransposeConv2dBackwardInput of int array
  138. | TransposeConv2dBackwardKernel of int array
  139. | TransposeConv3dBackwardInput of int array
  140. | TransposeConv3dBackwardKernel of int array
  141. | DilatedConv1dBackwardInput of int array * int array
  142. | DilatedConv1dBackwardKernel of int array * int array
  143. | DilatedConv2dBackwardInput of int array * int array
  144. | DilatedConv2dBackwardKernel of int array * int array
  145. | DilatedConv3dBackwardInput of int array * int array
  146. | DilatedConv3dBackwardKernel of int array * int array
  147. | MaxPool1dBackward of Owl_types_common.padding * int array * int array
  148. | MaxPool2dBackward of Owl_types_common.padding * int array * int array
  149. | MaxPool3dBackward of Owl_types_common.padding * int array * int array
  150. | AvgPool1dBackward of Owl_types_common.padding * int array * int array
  151. | AvgPool2dBackward of Owl_types_common.padding * int array * int array
  152. | AvgPool3dBackward of Owl_types_common.padding * int array * int array
  153. | UpSampling2dBackward of int array
  154. | RowNum
  155. | ColNum
  156. | Row
  157. | Rows of int array
  158. | CopyRowTo
  159. | CopyColTo
  160. | Dot of bool * bool * elt * elt
  161. | Inv
  162. | Trace
  163. | Transpose of int array
  164. | ToRows
  165. | OfRows
  166. | Scalar_Add
  167. | Scalar_Sub
  168. | Scalar_Mul
  169. | Scalar_Div
  170. | Scalar_Pow
  171. | Scalar_Atan2
  172. | Scalar_Abs
  173. | Scalar_Neg
  174. | Scalar_Sqr
  175. | Scalar_Sqrt
  176. | Scalar_Exp
  177. | Scalar_Log
  178. | Scalar_Log2
  179. | Scalar_Log10
  180. | Scalar_Signum
  181. | Scalar_Floor
  182. | Scalar_Ceil
  183. | Scalar_Round
  184. | Scalar_Sin
  185. | Scalar_Cos
  186. | Scalar_Tan
  187. | Scalar_Sinh
  188. | Scalar_Cosh
  189. | Scalar_Tanh
  190. | Scalar_Asin
  191. | Scalar_Acos
  192. | Scalar_Atan
  193. | Scalar_Asinh
  194. | Scalar_Acosh
  195. | Scalar_Atanh
  196. | Scalar_Relu
  197. | Scalar_Dawsn
  198. | Scalar_Sigmoid
  199. | Fused_Adagrad of float * float
    (*

    TODO

    *)
\ No newline at end of file diff --git a/owl-base/Owl_computation_cpu_init/Make/argument-1-Graph/Optimiser/Operator/Symbol/Shape/index.html b/owl-base/Owl_computation_cpu_init/Make/argument-1-Graph/Optimiser/Operator/Symbol/Shape/index.html deleted file mode 100644 index 22a2686c6..000000000 --- a/owl-base/Owl_computation_cpu_init/Make/argument-1-Graph/Optimiser/Operator/Symbol/Shape/index.html +++ /dev/null @@ -1,5 +0,0 @@ - -Shape (owl-base.Owl_computation_cpu_init.Make.Graph.Optimiser.Operator.Symbol.Shape)

Module Symbol.Shape

Core functions
val infer_shape : - Type.op -> - Type.attr Owl_graph.node array -> - int array option array

TODO

\ No newline at end of file diff --git a/owl-base/Owl_computation_cpu_init/Make/argument-1-Graph/Optimiser/Operator/Symbol/index.html b/owl-base/Owl_computation_cpu_init/Make/argument-1-Graph/Optimiser/Operator/Symbol/index.html deleted file mode 100644 index 880eddf50..000000000 --- a/owl-base/Owl_computation_cpu_init/Make/argument-1-Graph/Optimiser/Operator/Symbol/index.html +++ /dev/null @@ -1,28 +0,0 @@ - -Symbol (owl-base.Owl_computation_cpu_init.Make.Graph.Optimiser.Operator.Symbol)

Module Operator.Symbol

Core functions
val op_to_str : Shape.Type.op -> string

TODO

val is_random_variable : Shape.Type.op -> bool

TODO

val refnum : 'a Owl_graph.node -> int

TODO

val node_shape : Shape.Type.attr Owl_graph.node -> int array

TODO

val node_numel : Shape.Type.attr Owl_graph.node -> int

TODO

val is_shape_unknown : Shape.Type.attr Owl_graph.node -> bool

TODO

val infer_shape_graph : Shape.Type.attr Owl_graph.node array -> unit

TODO

val shape_to_str : int array option array -> string

TODO

val node_to_str : Shape.Type.attr Owl_graph.node -> string

TODO

val node_to_arr : Shape.Type.t -> Shape.Type.arr

TODO

val arr_to_node : Shape.Type.arr -> Shape.Type.t

TODO

val node_to_elt : Shape.Type.t -> Shape.Type.elt

TODO

val elt_to_node : Shape.Type.elt -> Shape.Type.t

TODO

val make_node : - ?name:string -> - ?value:Shape.Type.Device.value array -> - ?shape:int array option array -> - ?freeze:bool -> - ?reuse:bool -> - ?state:Shape.Type.state -> - Shape.Type.op -> - Shape.Type.attr Owl_graph.node

TODO

val make_then_connect : - ?shape:int array option array -> - Shape.Type.op -> - Shape.Type.attr Owl_graph.node array -> - Shape.Type.attr Owl_graph.node

TODO

val var_arr : ?shape:int array -> string -> Shape.Type.arr

TODO

val var_elt : string -> Shape.Type.elt

TODO

val const_arr : string -> Shape.Type.Device.A.arr -> Shape.Type.arr

TODO

val const_elt : string -> Shape.Type.Device.A.elt -> Shape.Type.elt

TODO

val new_block_id : unit -> int

new_block_id () returns an unused block id.

val make_empty_block : ?block_id:int -> int -> Shape.Type.block

make_empty_block s returns an empty block of memory of size s.

val make_value_block : - Shape.Type.Device.value -> - Shape.Type.attr Owl_graph.node -> - unit

make_value_block value node creates a block of memory initialised with value and links the new block to node.

get_block node returns the memory block allocated to node. If no block is allocated, throws an exception.

val add_node_to_block : - Shape.Type.attr Owl_graph.node -> - Shape.Type.block -> - unit

Link a node to a reusable block and initialises its memory on the memory of the block.

val get_active_node : Shape.Type.block -> Shape.Type.attr Owl_graph.node option

Return the node that is currently using the memory of the block.

val set_active_node : - Shape.Type.block -> - Shape.Type.attr Owl_graph.node -> - unit

Update the node that is currently using the block of memory.

val get_block_id : Shape.Type.attr Owl_graph.node -> int

get_block_id node returns the id of the block assigned to node. If node has not been assigned yet, returns -1.

val set_value : - Shape.Type.attr Owl_graph.node -> - Shape.Type.Device.value array -> - unit

TODO

val set_operator : Shape.Type.attr Owl_graph.node -> Shape.Type.op -> unit

TODO

TODO

val set_reuse : Shape.Type.attr Owl_graph.node -> bool -> unit

TODO

val get_reuse : Shape.Type.attr Owl_graph.node -> bool

TODO

val is_shared : Shape.Type.attr Owl_graph.node -> bool

TODO

val get_shared_nodes : - Shape.Type.attr Owl_graph.node -> - Shape.Type.attr Owl_graph.node array

get_shared_nodes node returns the nodes sharing the same block of memory as node.

val is_var : Shape.Type.attr Owl_graph.node -> bool

TODO

val is_const : Shape.Type.attr Owl_graph.node -> bool

TODO

val is_node_arr : Shape.Type.attr Owl_graph.node -> bool

TODO

val is_node_elt : Shape.Type.attr Owl_graph.node -> bool

TODO

val is_assigned : Shape.Type.attr Owl_graph.node -> bool

is_assigned node checks if a block of memory has been assigned to node.

val check_assigned : Shape.Type.attr Owl_graph.node -> unit

check_assigned node throws an exception if node has not been assigned to a block.

val is_valid : Shape.Type.attr Owl_graph.node -> bool

TODO

val validate : Shape.Type.attr Owl_graph.node -> unit

TODO

val invalidate : Shape.Type.attr Owl_graph.node -> unit

TODO

val invalidate_graph : Shape.Type.attr Owl_graph.node -> unit

TODO

val is_freeze : Shape.Type.attr Owl_graph.node -> bool

TODO

val freeze : Shape.Type.attr Owl_graph.node -> unit

TODO

val freeze_descendants : Shape.Type.attr Owl_graph.node array -> unit

TODO

val freeze_ancestors : Shape.Type.attr Owl_graph.node array -> unit

TODO

TODO

TODO

val unsafe_assign_arr : Shape.Type.arr -> Shape.Type.Device.A.arr -> unit

TODO

val assign_arr : Shape.Type.arr -> Shape.Type.Device.A.arr -> unit

TODO

val assign_elt : Shape.Type.elt -> Shape.Type.Device.A.elt -> unit

TODO

val float_to_elt : float -> Shape.Type.elt

TODO

val elt_to_float : Shape.Type.elt -> float

TODO

\ No newline at end of file diff --git a/owl-base/Owl_computation_cpu_init/Make/argument-1-Graph/Optimiser/Operator/index.html b/owl-base/Owl_computation_cpu_init/Make/argument-1-Graph/Optimiser/Operator/index.html deleted file mode 100644 index 3b76c5184..000000000 --- a/owl-base/Owl_computation_cpu_init/Make/argument-1-Graph/Optimiser/Operator/index.html +++ /dev/null @@ -1,420 +0,0 @@ - -Operator (owl-base.Owl_computation_cpu_init.Make.Graph.Optimiser.Operator)

Module Optimiser.Operator

Vectorised functions
val empty : int array -> Symbol.Shape.Type.arr

TODO

val zeros : int array -> Symbol.Shape.Type.arr

TODO

val ones : int array -> Symbol.Shape.Type.arr

TODO

val create : int array -> Symbol.Shape.Type.elt -> Symbol.Shape.Type.arr

TODO

val sequential : - ?a:Symbol.Shape.Type.elt -> - ?step:Symbol.Shape.Type.elt -> - int array -> - Symbol.Shape.Type.arr

TODO

val uniform : - ?a:Symbol.Shape.Type.elt -> - ?b:Symbol.Shape.Type.elt -> - int array -> - Symbol.Shape.Type.arr

TODO

val gaussian : - ?mu:Symbol.Shape.Type.elt -> - ?sigma:Symbol.Shape.Type.elt -> - int array -> - Symbol.Shape.Type.arr

TODO

val bernoulli : ?p:Symbol.Shape.Type.elt -> int array -> Symbol.Shape.Type.arr

TODO

val init : int array -> (int -> Symbol.Shape.Type.elt) -> Symbol.Shape.Type.arr

TODO

val init_nd : - int array -> - (int array -> Symbol.Shape.Type.elt) -> - Symbol.Shape.Type.arr

TODO

val shape : Symbol.Shape.Type.arr -> int array

TODO

val numel : Symbol.Shape.Type.arr -> int

TODO

TODO

val set : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.elt -> unit

TODO

val get_slice : int list list -> Symbol.Shape.Type.arr -> Symbol.Shape.Type.arr

TODO

val set_slice : - int list list -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - unit

TODO

TODO

val set_fancy : - Owl_types.index list -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - unit

TODO

val copy_ : out:'a -> 'b -> 'c

TODO

val reset : Symbol.Shape.Type.arr -> unit

TODO

val reshape : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.arr

TODO

val tile : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.arr

TODO

val repeat : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.arr

TODO

val pad : - ?v:Symbol.Shape.Type.elt -> - int list list -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val expand : ?hi:bool -> Symbol.Shape.Type.arr -> int -> Symbol.Shape.Type.arr

TODO

val squeeze : ?axis:int array -> Symbol.Shape.Type.arr -> Symbol.Shape.Type.arr

TODO

val concatenate : - ?axis:int -> - Symbol.Shape.Type.arr array -> - Symbol.Shape.Type.arr

TODO

val stack : ?axis:int -> Symbol.Shape.Type.arr array -> Symbol.Shape.Type.arr

TODO

TODO

val split : ?axis:int -> 'a -> 'b -> 'c

TODO

val draw : - ?axis:int -> - Symbol.Shape.Type.arr -> - int -> - Symbol.Shape.Type.arr * 'a array

TODO

TODO

delay f x returns f x. It allows to use a function that is not tracked by the computation graph and delay its evaluation. The output must have the same shape as the input.

delay_array out_shape f x works in the same way as delay but is applied on an array of ndarrays. Needs the shape of the output as an argument.

val lazy_print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(Symbol.Shape.Type.Device.A.elt -> string) -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

lazy_print x prints the output of x when it is evaluated. Is implemented as an identity node. For information about the optional parameters, refer to the print function of the Ndarray module.

val print : ?max_row:'a -> ?max_col:'b -> ?header:'c -> ?fmt:'d -> 'e -> unit

TODO

val min : - ?axis:int -> - ?keep_dims:bool -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val max : - ?axis:int -> - ?keep_dims:bool -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val sum : - ?axis:int -> - ?keep_dims:bool -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val sum_reduce : - ?axis:int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val log_sum_exp : - ?axis:int -> - ?keep_dims:bool -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

TODO

TODO

TODO

TODO

val elt_less_equal_scalar : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.elt -> - Symbol.Shape.Type.arr

TODO

val elt_greater_equal_scalar : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.elt -> - Symbol.Shape.Type.arr

TODO

val conv1d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr

TODO

val conv2d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr

TODO

val conv3d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr

TODO

val transpose_conv1d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr

TODO

val transpose_conv2d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr

TODO

val transpose_conv3d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr

TODO

val dilated_conv1d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val dilated_conv2d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val dilated_conv3d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val max_pool1d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val max_pool2d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val max_pool3d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val avg_pool1d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val avg_pool2d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val avg_pool3d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val upsampling2d : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.arr

TODO

val conv1d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val conv1d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val conv2d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val conv2d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val conv3d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val conv3d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val transpose_conv1d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val transpose_conv1d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val transpose_conv2d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val transpose_conv2d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val transpose_conv3d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val transpose_conv3d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val dilated_conv1d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val dilated_conv1d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val dilated_conv2d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val dilated_conv2d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val dilated_conv3d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val dilated_conv3d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val max_pool1d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val max_pool2d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val max_pool3d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val avg_pool1d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val avg_pool2d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val avg_pool3d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val upsampling2d_backward : - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val row_num : Symbol.Shape.Type.arr -> int

TODO

val col_num : Symbol.Shape.Type.arr -> int

TODO

val rows : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.arr

TODO

val copy_row_to : Symbol.Shape.Type.arr -> 'a -> 'b -> unit

TODO

val copy_col_to : Symbol.Shape.Type.arr -> 'a -> 'b -> unit

TODO

TODO

val transpose : - ?axis:int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val to_rows : Symbol.Shape.Type.arr -> 'a array

TODO

TODO

val to_cols : Symbol.Shape.Type.arr -> 'a array

TODO

TODO

val of_array : - Symbol.Shape.Type.elt array -> - int array -> - Symbol.Shape.Type.arr

TODO

val of_arrays : Symbol.Shape.Type.elt array array -> Symbol.Shape.Type.arr

TODO

val to_arrays : Symbol.Shape.Type.arr -> Symbol.Shape.Type.elt array array

TODO

Scalar functions
module Scalar : sig ... end
module Mat : sig ... end
module Linalg : sig ... end
\ No newline at end of file diff --git a/owl-base/Owl_computation_cpu_init/Make/argument-1-Graph/Optimiser/index.html b/owl-base/Owl_computation_cpu_init/Make/argument-1-Graph/Optimiser/index.html deleted file mode 100644 index c36c1dc52..000000000 --- a/owl-base/Owl_computation_cpu_init/Make/argument-1-Graph/Optimiser/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Optimiser (owl-base.Owl_computation_cpu_init.Make.Graph.Optimiser)

Module Graph.Optimiser

Core functions
val estimate_complexity : 'a Owl_graph.node array -> int * int

TODO

val optimise_nodes : - Operator.Symbol.Shape.Type.attr Owl_graph.node array -> - unit

TODO

\ No newline at end of file diff --git a/owl-base/Owl_computation_cpu_init/Make/argument-1-Graph/index.html b/owl-base/Owl_computation_cpu_init/Make/argument-1-Graph/index.html deleted file mode 100644 index 6a5f27f59..000000000 --- a/owl-base/Owl_computation_cpu_init/Make/argument-1-Graph/index.html +++ /dev/null @@ -1,33 +0,0 @@ - -Graph (owl-base.Owl_computation_cpu_init.Make.Graph)

Parameter Make.Graph

Type definition
type graph

TODO

Core functions
val shape_or_value : Optimiser.Operator.Symbol.Shape.Type.t -> string

TODO

val graph_to_dot : graph -> string

TODO

val graph_to_trace : graph -> string

TODO

val save_graph : 'a -> string -> unit

TODO

val load_graph : string -> 'a * 'b

TODO

val invalidate_rvs : graph -> unit

TODO

val is_iopair_safe : 'a Owl_graph.node -> 'a Owl_graph.node -> bool

TODO

val update_iopair : graph -> unit

TODO

val remove_unused_iopair : - 'a Owl_graph.node array -> - 'b array -> - 'a Owl_graph.node array * 'b array

TODO

val optimise : graph -> unit

TODO

\ No newline at end of file diff --git a/owl-base/Owl_computation_cpu_init/Make/index.html b/owl-base/Owl_computation_cpu_init/Make/index.html deleted file mode 100644 index 0e3504589..000000000 --- a/owl-base/Owl_computation_cpu_init/Make/index.html +++ /dev/null @@ -1,13 +0,0 @@ - -Make (owl-base.Owl_computation_cpu_init.Make)

Module Owl_computation_cpu_init.Make

\ No newline at end of file diff --git a/owl-base/Owl_computation_engine/.dummy b/owl-base/Owl_computation_engine/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl-base/Owl_computation_engine/Flatten/argument-1-Engine/Graph/Optimiser/Operator/Linalg/index.html b/owl-base/Owl_computation_engine/Flatten/argument-1-Engine/Graph/Optimiser/Operator/Linalg/index.html deleted file mode 100644 index 2e3b771e4..000000000 --- a/owl-base/Owl_computation_engine/Flatten/argument-1-Engine/Graph/Optimiser/Operator/Linalg/index.html +++ /dev/null @@ -1,33 +0,0 @@ - -Linalg (owl-base.Owl_computation_engine.Flatten.Engine.Graph.Optimiser.Operator.Linalg)

Module Operator.Linalg

\ No newline at end of file diff --git a/owl-base/Owl_computation_engine/Flatten/argument-1-Engine/Graph/Optimiser/Operator/Mat/index.html b/owl-base/Owl_computation_engine/Flatten/argument-1-Engine/Graph/Optimiser/Operator/Mat/index.html deleted file mode 100644 index 191ad9b3b..000000000 --- a/owl-base/Owl_computation_engine/Flatten/argument-1-Engine/Graph/Optimiser/Operator/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl-base.Owl_computation_engine.Flatten.Engine.Graph.Optimiser.Operator.Mat)

Module Operator.Mat

val eye : int -> Symbol.Shape.Type.arr

TODO

TODO

TODO

TODO

\ No newline at end of file diff --git a/owl-base/Owl_computation_engine/Flatten/argument-1-Engine/Graph/Optimiser/Operator/Scalar/index.html b/owl-base/Owl_computation_engine/Flatten/argument-1-Engine/Graph/Optimiser/Operator/Scalar/index.html deleted file mode 100644 index 17fe86604..000000000 --- a/owl-base/Owl_computation_engine/Flatten/argument-1-Engine/Graph/Optimiser/Operator/Scalar/index.html +++ /dev/null @@ -1,20 +0,0 @@ - -Scalar (owl-base.Owl_computation_engine.Flatten.Engine.Graph.Optimiser.Operator.Scalar)

Module Operator.Scalar

\ No newline at end of file diff --git a/owl-base/Owl_computation_engine/Flatten/argument-1-Engine/Graph/Optimiser/Operator/Symbol/Shape/Type/Device/A/Linalg/index.html b/owl-base/Owl_computation_engine/Flatten/argument-1-Engine/Graph/Optimiser/Operator/Symbol/Shape/Type/Device/A/Linalg/index.html deleted file mode 100644 index fd727b97a..000000000 --- a/owl-base/Owl_computation_engine/Flatten/argument-1-Engine/Graph/Optimiser/Operator/Symbol/Shape/Type/Device/A/Linalg/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -Linalg (owl-base.Owl_computation_engine.Flatten.Engine.Graph.Optimiser.Operator.Symbol.Shape.Type.Device.A.Linalg)

Module A.Linalg

val inv : arr -> arr
val logdet : arr -> elt
val chol : ?upper:bool -> arr -> arr
val svd : ?thin:bool -> arr -> arr * arr * arr
val qr : arr -> arr * arr
val lq : arr -> arr * arr
val sylvester : arr -> arr -> arr -> arr
val lyapunov : arr -> arr -> arr
val discrete_lyapunov : - ?solver:[ `default | `bilinear | `direct ] -> - arr -> - arr -> - arr
val linsolve : ?trans:bool -> ?typ:[ `n | `u | `l ] -> arr -> arr -> arr
val care : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
val dare : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
\ No newline at end of file diff --git a/owl-base/Owl_computation_engine/Flatten/argument-1-Engine/Graph/Optimiser/Operator/Symbol/Shape/Type/Device/A/Mat/index.html b/owl-base/Owl_computation_engine/Flatten/argument-1-Engine/Graph/Optimiser/Operator/Symbol/Shape/Type/Device/A/Mat/index.html deleted file mode 100644 index 1907052e2..000000000 --- a/owl-base/Owl_computation_engine/Flatten/argument-1-Engine/Graph/Optimiser/Operator/Symbol/Shape/Type/Device/A/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl-base.Owl_computation_engine.Flatten.Engine.Graph.Optimiser.Operator.Symbol.Shape.Type.Device.A.Mat)

Module A.Mat

val diagm : ?k:int -> arr -> arr
val triu : ?k:int -> arr -> arr
val tril : ?k:int -> arr -> arr
val eye : int -> arr
\ No newline at end of file diff --git a/owl-base/Owl_computation_engine/Flatten/argument-1-Engine/Graph/Optimiser/Operator/Symbol/Shape/Type/Device/A/Scalar/index.html b/owl-base/Owl_computation_engine/Flatten/argument-1-Engine/Graph/Optimiser/Operator/Symbol/Shape/Type/Device/A/Scalar/index.html deleted file mode 100644 index 5c759e55d..000000000 --- a/owl-base/Owl_computation_engine/Flatten/argument-1-Engine/Graph/Optimiser/Operator/Symbol/Shape/Type/Device/A/Scalar/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Scalar (owl-base.Owl_computation_engine.Flatten.Engine.Graph.Optimiser.Operator.Symbol.Shape.Type.Device.A.Scalar)

Module A.Scalar

val add : elt -> elt -> elt
val sub : elt -> elt -> elt
val mul : elt -> elt -> elt
val div : elt -> elt -> elt
val pow : elt -> elt -> elt
val atan2 : elt -> elt -> elt
val abs : elt -> elt
val neg : elt -> elt
val sqr : elt -> elt
val sqrt : elt -> elt
val exp : elt -> elt
val log : elt -> elt
val log2 : elt -> elt
val log10 : elt -> elt
val signum : elt -> elt
val floor : elt -> elt
val ceil : elt -> elt
val round : elt -> elt
val sin : elt -> elt
val cos : elt -> elt
val tan : elt -> elt
val sinh : elt -> elt
val cosh : elt -> elt
val tanh : elt -> elt
val asin : elt -> elt
val acos : elt -> elt
val atan : elt -> elt
val asinh : elt -> elt
val acosh : elt -> elt
val atanh : elt -> elt
val relu : elt -> elt
val dawsn : elt -> elt
val sigmoid : elt -> elt
\ No newline at end of file diff --git a/owl-base/Owl_computation_engine/Flatten/argument-1-Engine/Graph/Optimiser/Operator/Symbol/Shape/Type/Device/A/index.html b/owl-base/Owl_computation_engine/Flatten/argument-1-Engine/Graph/Optimiser/Operator/Symbol/Shape/Type/Device/A/index.html deleted file mode 100644 index be772cdb4..000000000 --- a/owl-base/Owl_computation_engine/Flatten/argument-1-Engine/Graph/Optimiser/Operator/Symbol/Shape/Type/Device/A/index.html +++ /dev/null @@ -1,379 +0,0 @@ - -A (owl-base.Owl_computation_engine.Flatten.Engine.Graph.Optimiser.Operator.Symbol.Shape.Type.Device.A)

Module Device.A

include Owl_types_ndarray_algodiff.Sig
include Owl_types_ndarray_eltcmp.Sig
include Owl_types_ndarray_basic.Sig
type arr
type elt
val empty : int array -> arr
val zeros : int array -> arr
val ones : int array -> arr
val create : int array -> elt -> arr
val sequential : ?a:elt -> ?step:elt -> int array -> arr
val uniform : ?a:elt -> ?b:elt -> int array -> arr
val gaussian : ?mu:elt -> ?sigma:elt -> int array -> arr
val bernoulli : ?p:elt -> int array -> arr
val init : int array -> (int -> elt) -> arr
val init_nd : int array -> (int array -> elt) -> arr
val shape : arr -> int array
val numel : arr -> int
val get : arr -> int array -> elt
val set : arr -> int array -> elt -> unit
val get_slice : int list list -> arr -> arr
val set_slice : int list list -> arr -> arr -> unit
val get_fancy : Owl_types_common.index list -> arr -> arr
val set_fancy : Owl_types_common.index list -> arr -> arr -> unit
val copy : arr -> arr
val copy_ : out:arr -> arr -> unit
val reset : arr -> unit
val reshape : arr -> int array -> arr
val reverse : arr -> arr
val tile : arr -> int array -> arr
val repeat : arr -> int array -> arr
val concatenate : ?axis:int -> arr array -> arr
val stack : ?axis:int -> arr array -> arr
val split : ?axis:int -> int array -> arr -> arr array
val expand : ?hi:bool -> arr -> int -> arr
val squeeze : ?axis:int array -> arr -> arr
val draw : ?axis:int -> arr -> int -> arr * int array
val map : (elt -> elt) -> arr -> arr
val fold : ?axis:int -> (elt -> elt -> elt) -> elt -> arr -> arr
val scan : ?axis:int -> (elt -> elt -> elt) -> arr -> arr
val one_hot : int -> arr -> arr
val pad : ?v:elt -> int list list -> arr -> arr
val print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(elt -> string) -> - arr -> - unit
val abs : arr -> arr
val neg : arr -> arr
val floor : arr -> arr
val ceil : arr -> arr
val round : arr -> arr
val sqr : arr -> arr
val sqrt : arr -> arr
val log : arr -> arr
val log2 : arr -> arr
val log10 : arr -> arr
val exp : arr -> arr
val sin : arr -> arr
val cos : arr -> arr
val tan : arr -> arr
val sinh : arr -> arr
val cosh : arr -> arr
val tanh : arr -> arr
val asin : arr -> arr
val acos : arr -> arr
val atan : arr -> arr
val asinh : arr -> arr
val acosh : arr -> arr
val atanh : arr -> arr
val sum_reduce : ?axis:int array -> arr -> arr
val signum : arr -> arr
val sigmoid : arr -> arr
val relu : arr -> arr
val dawsn : arr -> arr
val min' : arr -> elt
val max' : arr -> elt
val sum' : arr -> elt
val log_sum_exp' : arr -> elt
val log_sum_exp : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm' : arr -> elt
val l2norm' : arr -> elt
val l2norm_sqr' : arr -> elt
val clip_by_value : ?amin:elt -> ?amax:elt -> arr -> arr
val clip_by_l2norm : elt -> arr -> arr
val pow : arr -> arr -> arr
val scalar_pow : elt -> arr -> arr
val pow_scalar : arr -> elt -> arr
val atan2 : arr -> arr -> arr
val scalar_atan2 : elt -> arr -> arr
val atan2_scalar : arr -> elt -> arr
val add : arr -> arr -> arr
val sub : arr -> arr -> arr
val mul : arr -> arr -> arr
val div : arr -> arr -> arr
val add_scalar : arr -> elt -> arr
val sub_scalar : arr -> elt -> arr
val mul_scalar : arr -> elt -> arr
val div_scalar : arr -> elt -> arr
val scalar_add : elt -> arr -> arr
val scalar_sub : elt -> arr -> arr
val scalar_mul : elt -> arr -> arr
val scalar_div : elt -> arr -> arr
val fma : arr -> arr -> arr -> arr
val conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val dilated_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val transpose_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val max_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val upsampling2d : arr -> int array -> arr
val conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val dilated_conv1d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv1d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val transpose_conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val max_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val upsampling2d_backward : arr -> int array -> arr -> arr
val row_num : arr -> int
val col_num : arr -> int
val row : arr -> int -> arr
val rows : arr -> int array -> arr
val copy_row_to : arr -> arr -> int -> unit
val copy_col_to : arr -> arr -> int -> unit
val dot : arr -> arr -> arr
val diag : ?k:int -> arr -> arr
val trace : arr -> elt
val transpose : ?axis:int array -> arr -> arr
val to_rows : arr -> arr array
val of_rows : arr array -> arr
val to_cols : arr -> arr array
val of_cols : arr array -> arr
val of_array : elt array -> int array -> arr
val of_arrays : elt array array -> arr
val float_to_elt : float -> elt
val elt_to_float : elt -> float
val elt_equal : arr -> arr -> arr
val elt_not_equal : arr -> arr -> arr
val elt_less : arr -> arr -> arr
val elt_greater : arr -> arr -> arr
val elt_less_equal : arr -> arr -> arr
val elt_greater_equal : arr -> arr -> arr
val elt_equal_scalar : arr -> elt -> arr
val elt_not_equal_scalar : arr -> elt -> arr
val elt_less_scalar : arr -> elt -> arr
val elt_greater_scalar : arr -> elt -> arr
val elt_less_equal_scalar : arr -> elt -> arr
val elt_greater_equal_scalar : arr -> elt -> arr
module Scalar : sig ... end
module Mat : sig ... end
module Linalg : sig ... end
val create_ : out:arr -> elt -> unit
val uniform_ : ?a:elt -> ?b:elt -> out:arr -> unit
val gaussian_ : ?mu:elt -> ?sigma:elt -> out:arr -> unit
val sequential_ : ?a:elt -> ?step:elt -> out:arr -> unit
val bernoulli_ : ?p:elt -> out:arr -> unit
val zeros_ : out:arr -> unit
val ones_ : out:arr -> unit
val one_hot_ : out:arr -> int -> arr -> unit
val get_slice_ : out:arr -> int list list -> arr -> unit
val set_slice_ : out:arr -> int list list -> arr -> arr -> unit
val sub_left : arr -> int -> int -> arr
val reshape_ : out:arr -> arr -> unit
val reverse_ : out:arr -> arr -> unit
val transpose_ : out:arr -> ?axis:int array -> arr -> unit
val repeat_ : out:arr -> arr -> int array -> unit
val tile_ : out:arr -> arr -> int array -> unit
val pad_ : out:arr -> ?v:elt -> int list list -> arr -> unit
val hypot : arr -> arr -> arr
val fmod : arr -> arr -> arr
val min2 : arr -> arr -> arr
val max2 : arr -> arr -> arr
val add_ : ?out:arr -> arr -> arr -> unit
val sub_ : ?out:arr -> arr -> arr -> unit
val mul_ : ?out:arr -> arr -> arr -> unit
val div_ : ?out:arr -> arr -> arr -> unit
val pow_ : ?out:arr -> arr -> arr -> unit
val atan2_ : ?out:arr -> arr -> arr -> unit
val hypot_ : ?out:arr -> arr -> arr -> unit
val fmod_ : ?out:arr -> arr -> arr -> unit
val min2_ : ?out:arr -> arr -> arr -> unit
val max2_ : ?out:arr -> arr -> arr -> unit
val add_scalar_ : ?out:arr -> arr -> elt -> unit
val sub_scalar_ : ?out:arr -> arr -> elt -> unit
val mul_scalar_ : ?out:arr -> arr -> elt -> unit
val div_scalar_ : ?out:arr -> arr -> elt -> unit
val pow_scalar_ : ?out:arr -> arr -> elt -> unit
val atan2_scalar_ : ?out:arr -> arr -> elt -> unit
val fmod_scalar_ : ?out:arr -> arr -> elt -> unit
val scalar_add_ : ?out:arr -> elt -> arr -> unit
val scalar_sub_ : ?out:arr -> elt -> arr -> unit
val scalar_mul_ : ?out:arr -> elt -> arr -> unit
val scalar_div_ : ?out:arr -> elt -> arr -> unit
val scalar_pow_ : ?out:arr -> elt -> arr -> unit
val scalar_atan2_ : ?out:arr -> elt -> arr -> unit
val scalar_fmod_ : ?out:arr -> elt -> arr -> unit
val fma_ : ?out:arr -> arr -> arr -> arr -> unit
val clip_by_value_ : ?out:arr -> ?amin:elt -> ?amax:elt -> arr -> unit
val clip_by_l2norm_ : ?out:arr -> elt -> arr -> unit
val dot_ : - ?transa:bool -> - ?transb:bool -> - ?alpha:elt -> - ?beta:elt -> - c:arr -> - arr -> - arr -> - unit
val abs_ : ?out:arr -> arr -> unit
val neg_ : ?out:arr -> arr -> unit
val conj_ : ?out:arr -> arr -> unit
val reci_ : ?out:arr -> arr -> unit
val signum_ : ?out:arr -> arr -> unit
val sqr_ : ?out:arr -> arr -> unit
val sqrt_ : ?out:arr -> arr -> unit
val cbrt_ : ?out:arr -> arr -> unit
val exp_ : ?out:arr -> arr -> unit
val exp2_ : ?out:arr -> arr -> unit
val exp10_ : ?out:arr -> arr -> unit
val expm1_ : ?out:arr -> arr -> unit
val log_ : ?out:arr -> arr -> unit
val log2_ : ?out:arr -> arr -> unit
val log10_ : ?out:arr -> arr -> unit
val log1p_ : ?out:arr -> arr -> unit
val sin_ : ?out:arr -> arr -> unit
val cos_ : ?out:arr -> arr -> unit
val tan_ : ?out:arr -> arr -> unit
val asin_ : ?out:arr -> arr -> unit
val acos_ : ?out:arr -> arr -> unit
val atan_ : ?out:arr -> arr -> unit
val sinh_ : ?out:arr -> arr -> unit
val cosh_ : ?out:arr -> arr -> unit
val tanh_ : ?out:arr -> arr -> unit
val asinh_ : ?out:arr -> arr -> unit
val acosh_ : ?out:arr -> arr -> unit
val atanh_ : ?out:arr -> arr -> unit
val floor_ : ?out:arr -> arr -> unit
val ceil_ : ?out:arr -> arr -> unit
val round_ : ?out:arr -> arr -> unit
val trunc_ : ?out:arr -> arr -> unit
val fix_ : ?out:arr -> arr -> unit
val erf_ : ?out:arr -> arr -> unit
val erfc_ : ?out:arr -> arr -> unit
val relu_ : ?out:arr -> arr -> unit
val softplus_ : ?out:arr -> arr -> unit
val softsign_ : ?out:arr -> arr -> unit
val softmax_ : ?out:arr -> ?axis:int -> arr -> unit
val sigmoid_ : ?out:arr -> arr -> unit
val sum_ : out:arr -> axis:int -> arr -> unit
val min_ : out:arr -> axis:int -> arr -> unit
val max_ : out:arr -> axis:int -> arr -> unit
val sum : ?axis:int -> ?keep_dims:bool -> arr -> arr
val prod : ?axis:int -> ?keep_dims:bool -> arr -> arr
val min : ?axis:int -> ?keep_dims:bool -> arr -> arr
val max : ?axis:int -> ?keep_dims:bool -> arr -> arr
val mean : ?axis:int -> ?keep_dims:bool -> arr -> arr
val var : ?axis:int -> ?keep_dims:bool -> arr -> arr
val std : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l2norm : ?axis:int -> ?keep_dims:bool -> arr -> arr
val cumsum_ : ?out:arr -> ?axis:int -> arr -> unit
val cumprod_ : ?out:arr -> ?axis:int -> arr -> unit
val cummin_ : ?out:arr -> ?axis:int -> arr -> unit
val cummax_ : ?out:arr -> ?axis:int -> arr -> unit
val dropout_ : ?out:arr -> ?rate:float -> arr -> unit
val prod' : arr -> elt
val mean' : arr -> elt
val var' : arr -> elt
val std' : arr -> elt
val elt_equal_ : ?out:arr -> arr -> arr -> unit
val elt_not_equal_ : ?out:arr -> arr -> arr -> unit
val elt_less_ : ?out:arr -> arr -> arr -> unit
val elt_greater_ : ?out:arr -> arr -> arr -> unit
val elt_less_equal_ : ?out:arr -> arr -> arr -> unit
val elt_greater_equal_ : ?out:arr -> arr -> arr -> unit
val elt_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_not_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_less_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_greater_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_less_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_greater_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val conv1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val conv2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val conv3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val dilated_conv1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val dilated_conv2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val dilated_conv3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val transpose_conv1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val transpose_conv2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val transpose_conv3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val max_pool1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val max_pool2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val max_pool3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val upsampling2d_ : out:arr -> arr -> int array -> unit
val conv1d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv1d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv2d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv2d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv3d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv3d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val dilated_conv1d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv1d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv2d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv2d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv3d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv3d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val transpose_conv1d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv1d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv2d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv2d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv3d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv3d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val max_pool1d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val max_pool2d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val max_pool3d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool1d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool2d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool3d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val upsampling2d_backward_ : out:arr -> arr -> int array -> arr -> unit
val fused_adagrad_ : ?out:arr -> rate:float -> eps:float -> arr -> unit
\ No newline at end of file diff --git a/owl-base/Owl_computation_engine/Flatten/argument-1-Engine/Graph/Optimiser/Operator/Symbol/Shape/Type/Device/index.html b/owl-base/Owl_computation_engine/Flatten/argument-1-Engine/Graph/Optimiser/Operator/Symbol/Shape/Type/Device/index.html deleted file mode 100644 index b41ed7516..000000000 --- a/owl-base/Owl_computation_engine/Flatten/argument-1-Engine/Graph/Optimiser/Operator/Symbol/Shape/Type/Device/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Device (owl-base.Owl_computation_engine.Flatten.Engine.Graph.Optimiser.Operator.Symbol.Shape.Type.Device)

Module Type.Device

Type definition
type device

TODO

type value

TODO

Core functions
val make_device : unit -> device

TODO

val arr_to_value : A.arr -> value

TODO

val value_to_arr : value -> A.arr

TODO

val elt_to_value : A.elt -> value

TODO

val value_to_elt : value -> A.elt

TODO

val value_to_float : value -> float

TODO

val is_arr : value -> bool

TODO

val is_elt : value -> bool

TODO

\ No newline at end of file diff --git a/owl-base/Owl_computation_engine/Flatten/argument-1-Engine/Graph/Optimiser/Operator/Symbol/Shape/Type/index.html b/owl-base/Owl_computation_engine/Flatten/argument-1-Engine/Graph/Optimiser/Operator/Symbol/Shape/Type/index.html deleted file mode 100644 index 40f37e209..000000000 --- a/owl-base/Owl_computation_engine/Flatten/argument-1-Engine/Graph/Optimiser/Operator/Symbol/Shape/Type/index.html +++ /dev/null @@ -1,5 +0,0 @@ - -Type (owl-base.Owl_computation_engine.Flatten.Engine.Graph.Optimiser.Operator.Symbol.Shape.Type)

Module Shape.Type

Type definition
type state =
  1. | Valid
  2. | Invalid
    (*

    TODO

    *)

TODO

and block = {
  1. size : int;
  2. block_id : int;
  3. mutable active : t option;
  4. mutable memory : Device.value;
  5. mutable nodes : t list;
}

block type keeps a reference to a block of memory and to the nodes sharing that block.

and attr = {
  1. mutable op : op;
  2. mutable freeze : bool;
  3. mutable reuse : bool;
  4. mutable state : state;
  5. mutable shape : int array option array;
  6. mutable value : Device.value array;
  7. mutable block : block array option;
}

TODO

and arr =
  1. | Arr of t
and elt =
  1. | Elt of t
and op =
  1. | Noop
  2. | Var
  3. | Const
  4. | Empty of int array
  5. | Zeros of int array
  6. | Ones of int array
  7. | Create of int array
  8. | Sequential of int array
  9. | Uniform of int array
  10. | Gaussian of int array
  11. | Bernoulli of int array
  12. | Init of int array * int -> elt
  13. | Get of int array
  14. | Set of int array
  15. | GetSlice of int list list
  16. | SetSlice of int list list
  17. | GetFancy of Owl_types_common.index list
  18. | SetFancy of Owl_types_common.index list
  19. | Copy
  20. | Reset
  21. | Reshape of int array
  22. | Reverse
  23. | Tile of int array
  24. | Repeat of int array
  25. | Pad of elt * int list list
  26. | Concatenate of int
  27. | Stack of int
  28. | Split of int * int array
  29. | Draw of int * int
  30. | Map of elt -> elt
  31. | Fold of int * elt -> elt -> elt
  32. | Scan of int * elt -> elt -> elt
  33. | OneHot of int
  34. | OfArray of int array
  35. | Delay of Device.A.arr -> Device.A.arr
  36. | DelayArray of int array * Device.A.arr array -> Device.A.arr
  37. | LazyPrint of int option - * int option - * bool option - * (Device.A.elt -> string) option
  38. | Abs
  39. | Neg
  40. | Floor
  41. | Ceil
  42. | Round
  43. | Sqr
  44. | Sqrt
  45. | Log
  46. | Log2
  47. | Log10
  48. | Exp
  49. | Sin
  50. | Cos
  51. | Tan
  52. | Sinh
  53. | Cosh
  54. | Tanh
  55. | Asin
  56. | Acos
  57. | Atan
  58. | Asinh
  59. | Acosh
  60. | Atanh
  61. | Min of bool * int
  62. | Max of bool * int
  63. | Sum of bool * int
  64. | SumReduce of int array
  65. | Signum
  66. | Sigmoid
  67. | Relu
  68. | Dawsn
  69. | Min'
  70. | Max'
  71. | Sum'
  72. | LogSumExp'
  73. | LogSumExp of bool * int
  74. | L1norm'
  75. | L2norm'
  76. | L2NormSqr'
  77. | ClipByValue
  78. | ClipByL2norm
  79. | Pow
  80. | ScalarPow
  81. | PowScalar
  82. | Atan2
  83. | ScalarAtan2
  84. | Atan2Scalar
  85. | Hypot
  86. | Min2
  87. | Max2
  88. | Add
  89. | Sub
  90. | Mul
  91. | Div
  92. | AddScalar
  93. | SubScalar
  94. | MulScalar
  95. | DivScalar
  96. | ScalarAdd
  97. | ScalarSub
  98. | ScalarMul
  99. | ScalarDiv
  100. | FMA
  101. | EltEqual
  102. | EltNotEqual
  103. | EltLess
  104. | EltGreater
  105. | EltLessEqual
  106. | EltGreaterEqual
  107. | EltEqualScalar
  108. | EltNotEqualScalar
  109. | EltLessScalar
  110. | EltGreaterScalar
  111. | EltLessEqualScalar
  112. | EltGreaterEqualScalar
  113. | Conv1d of Owl_types_common.padding * int array
  114. | Conv2d of Owl_types_common.padding * int array
  115. | Conv3d of Owl_types_common.padding * int array
  116. | TransposeConv1d of Owl_types_common.padding * int array
  117. | TransposeConv2d of Owl_types_common.padding * int array
  118. | TransposeConv3d of Owl_types_common.padding * int array
  119. | DilatedConv1d of Owl_types_common.padding * int array * int array
  120. | DilatedConv2d of Owl_types_common.padding * int array * int array
  121. | DilatedConv3d of Owl_types_common.padding * int array * int array
  122. | MaxPool1d of Owl_types_common.padding * int array * int array
  123. | MaxPool2d of Owl_types_common.padding * int array * int array
  124. | MaxPool3d of Owl_types_common.padding * int array * int array
  125. | AvgPool1d of Owl_types_common.padding * int array * int array
  126. | AvgPool2d of Owl_types_common.padding * int array * int array
  127. | AvgPool3d of Owl_types_common.padding * int array * int array
  128. | UpSampling2d of int array
  129. | Conv1dBackwardInput of int array
  130. | Conv1dBackwardKernel of int array
  131. | Conv2dBackwardInput of int array
  132. | Conv2dBackwardKernel of int array
  133. | Conv3dBackwardInput of int array
  134. | Conv3dBackwardKernel of int array
  135. | TransposeConv1dBackwardInput of int array
  136. | TransposeConv1dBackwardKernel of int array
  137. | TransposeConv2dBackwardInput of int array
  138. | TransposeConv2dBackwardKernel of int array
  139. | TransposeConv3dBackwardInput of int array
  140. | TransposeConv3dBackwardKernel of int array
  141. | DilatedConv1dBackwardInput of int array * int array
  142. | DilatedConv1dBackwardKernel of int array * int array
  143. | DilatedConv2dBackwardInput of int array * int array
  144. | DilatedConv2dBackwardKernel of int array * int array
  145. | DilatedConv3dBackwardInput of int array * int array
  146. | DilatedConv3dBackwardKernel of int array * int array
  147. | MaxPool1dBackward of Owl_types_common.padding * int array * int array
  148. | MaxPool2dBackward of Owl_types_common.padding * int array * int array
  149. | MaxPool3dBackward of Owl_types_common.padding * int array * int array
  150. | AvgPool1dBackward of Owl_types_common.padding * int array * int array
  151. | AvgPool2dBackward of Owl_types_common.padding * int array * int array
  152. | AvgPool3dBackward of Owl_types_common.padding * int array * int array
  153. | UpSampling2dBackward of int array
  154. | RowNum
  155. | ColNum
  156. | Row
  157. | Rows of int array
  158. | CopyRowTo
  159. | CopyColTo
  160. | Dot of bool * bool * elt * elt
  161. | Inv
  162. | Trace
  163. | Transpose of int array
  164. | ToRows
  165. | OfRows
  166. | Scalar_Add
  167. | Scalar_Sub
  168. | Scalar_Mul
  169. | Scalar_Div
  170. | Scalar_Pow
  171. | Scalar_Atan2
  172. | Scalar_Abs
  173. | Scalar_Neg
  174. | Scalar_Sqr
  175. | Scalar_Sqrt
  176. | Scalar_Exp
  177. | Scalar_Log
  178. | Scalar_Log2
  179. | Scalar_Log10
  180. | Scalar_Signum
  181. | Scalar_Floor
  182. | Scalar_Ceil
  183. | Scalar_Round
  184. | Scalar_Sin
  185. | Scalar_Cos
  186. | Scalar_Tan
  187. | Scalar_Sinh
  188. | Scalar_Cosh
  189. | Scalar_Tanh
  190. | Scalar_Asin
  191. | Scalar_Acos
  192. | Scalar_Atan
  193. | Scalar_Asinh
  194. | Scalar_Acosh
  195. | Scalar_Atanh
  196. | Scalar_Relu
  197. | Scalar_Dawsn
  198. | Scalar_Sigmoid
  199. | Fused_Adagrad of float * float
    (*

    TODO

    *)
\ No newline at end of file diff --git a/owl-base/Owl_computation_engine/Flatten/argument-1-Engine/Graph/Optimiser/Operator/Symbol/Shape/index.html b/owl-base/Owl_computation_engine/Flatten/argument-1-Engine/Graph/Optimiser/Operator/Symbol/Shape/index.html deleted file mode 100644 index 45357094c..000000000 --- a/owl-base/Owl_computation_engine/Flatten/argument-1-Engine/Graph/Optimiser/Operator/Symbol/Shape/index.html +++ /dev/null @@ -1,5 +0,0 @@ - -Shape (owl-base.Owl_computation_engine.Flatten.Engine.Graph.Optimiser.Operator.Symbol.Shape)

Module Symbol.Shape

Core functions
val infer_shape : - Type.op -> - Type.attr Owl_graph.node array -> - int array option array

TODO

\ No newline at end of file diff --git a/owl-base/Owl_computation_engine/Flatten/argument-1-Engine/Graph/Optimiser/Operator/Symbol/index.html b/owl-base/Owl_computation_engine/Flatten/argument-1-Engine/Graph/Optimiser/Operator/Symbol/index.html deleted file mode 100644 index d08aaaab4..000000000 --- a/owl-base/Owl_computation_engine/Flatten/argument-1-Engine/Graph/Optimiser/Operator/Symbol/index.html +++ /dev/null @@ -1,28 +0,0 @@ - -Symbol (owl-base.Owl_computation_engine.Flatten.Engine.Graph.Optimiser.Operator.Symbol)

Module Operator.Symbol

Core functions
val op_to_str : Shape.Type.op -> string

TODO

val is_random_variable : Shape.Type.op -> bool

TODO

val refnum : 'a Owl_graph.node -> int

TODO

val node_shape : Shape.Type.attr Owl_graph.node -> int array

TODO

val node_numel : Shape.Type.attr Owl_graph.node -> int

TODO

val is_shape_unknown : Shape.Type.attr Owl_graph.node -> bool

TODO

val infer_shape_graph : Shape.Type.attr Owl_graph.node array -> unit

TODO

val shape_to_str : int array option array -> string

TODO

val node_to_str : Shape.Type.attr Owl_graph.node -> string

TODO

val node_to_arr : Shape.Type.t -> Shape.Type.arr

TODO

val arr_to_node : Shape.Type.arr -> Shape.Type.t

TODO

val node_to_elt : Shape.Type.t -> Shape.Type.elt

TODO

val elt_to_node : Shape.Type.elt -> Shape.Type.t

TODO

val make_node : - ?name:string -> - ?value:Shape.Type.Device.value array -> - ?shape:int array option array -> - ?freeze:bool -> - ?reuse:bool -> - ?state:Shape.Type.state -> - Shape.Type.op -> - Shape.Type.attr Owl_graph.node

TODO

val make_then_connect : - ?shape:int array option array -> - Shape.Type.op -> - Shape.Type.attr Owl_graph.node array -> - Shape.Type.attr Owl_graph.node

TODO

val var_arr : ?shape:int array -> string -> Shape.Type.arr

TODO

val var_elt : string -> Shape.Type.elt

TODO

val const_arr : string -> Shape.Type.Device.A.arr -> Shape.Type.arr

TODO

val const_elt : string -> Shape.Type.Device.A.elt -> Shape.Type.elt

TODO

val new_block_id : unit -> int

new_block_id () returns an unused block id.

val make_empty_block : ?block_id:int -> int -> Shape.Type.block

make_empty_block s returns an empty block of memory of size s.

val make_value_block : - Shape.Type.Device.value -> - Shape.Type.attr Owl_graph.node -> - unit

make_value_block value node creates a block of memory initialised with value and links the new block to node.

get_block node returns the memory block allocated to node. If no block is allocated, throws an exception.

val add_node_to_block : - Shape.Type.attr Owl_graph.node -> - Shape.Type.block -> - unit

Link a node to a reusable block and initialises its memory on the memory of the block.

val get_active_node : Shape.Type.block -> Shape.Type.attr Owl_graph.node option

Return the node that is currently using the memory of the block.

val set_active_node : - Shape.Type.block -> - Shape.Type.attr Owl_graph.node -> - unit

Update the node that is currently using the block of memory.

val get_block_id : Shape.Type.attr Owl_graph.node -> int

get_block_id node returns the id of the block assigned to node. If node has not been assigned yet, returns -1.

val set_value : - Shape.Type.attr Owl_graph.node -> - Shape.Type.Device.value array -> - unit

TODO

val set_operator : Shape.Type.attr Owl_graph.node -> Shape.Type.op -> unit

TODO

TODO

val set_reuse : Shape.Type.attr Owl_graph.node -> bool -> unit

TODO

val get_reuse : Shape.Type.attr Owl_graph.node -> bool

TODO

val is_shared : Shape.Type.attr Owl_graph.node -> bool

TODO

val get_shared_nodes : - Shape.Type.attr Owl_graph.node -> - Shape.Type.attr Owl_graph.node array

get_shared_nodes node returns the nodes sharing the same block of memory as node.

val is_var : Shape.Type.attr Owl_graph.node -> bool

TODO

val is_const : Shape.Type.attr Owl_graph.node -> bool

TODO

val is_node_arr : Shape.Type.attr Owl_graph.node -> bool

TODO

val is_node_elt : Shape.Type.attr Owl_graph.node -> bool

TODO

val is_assigned : Shape.Type.attr Owl_graph.node -> bool

is_assigned node checks if a block of memory has been assigned to node.

val check_assigned : Shape.Type.attr Owl_graph.node -> unit

check_assigned node throws an exception if node has not been assigned to a block.

val is_valid : Shape.Type.attr Owl_graph.node -> bool

TODO

val validate : Shape.Type.attr Owl_graph.node -> unit

TODO

val invalidate : Shape.Type.attr Owl_graph.node -> unit

TODO

val invalidate_graph : Shape.Type.attr Owl_graph.node -> unit

TODO

val is_freeze : Shape.Type.attr Owl_graph.node -> bool

TODO

val freeze : Shape.Type.attr Owl_graph.node -> unit

TODO

val freeze_descendants : Shape.Type.attr Owl_graph.node array -> unit

TODO

val freeze_ancestors : Shape.Type.attr Owl_graph.node array -> unit

TODO

TODO

TODO

val unsafe_assign_arr : Shape.Type.arr -> Shape.Type.Device.A.arr -> unit

TODO

val assign_arr : Shape.Type.arr -> Shape.Type.Device.A.arr -> unit

TODO

val assign_elt : Shape.Type.elt -> Shape.Type.Device.A.elt -> unit

TODO

val float_to_elt : float -> Shape.Type.elt

TODO

val elt_to_float : Shape.Type.elt -> float

TODO

\ No newline at end of file diff --git a/owl-base/Owl_computation_engine/Flatten/argument-1-Engine/Graph/Optimiser/Operator/index.html b/owl-base/Owl_computation_engine/Flatten/argument-1-Engine/Graph/Optimiser/Operator/index.html deleted file mode 100644 index 9cdb4a5fe..000000000 --- a/owl-base/Owl_computation_engine/Flatten/argument-1-Engine/Graph/Optimiser/Operator/index.html +++ /dev/null @@ -1,420 +0,0 @@ - -Operator (owl-base.Owl_computation_engine.Flatten.Engine.Graph.Optimiser.Operator)

Module Optimiser.Operator

Vectorised functions
val empty : int array -> Symbol.Shape.Type.arr

TODO

val zeros : int array -> Symbol.Shape.Type.arr

TODO

val ones : int array -> Symbol.Shape.Type.arr

TODO

val create : int array -> Symbol.Shape.Type.elt -> Symbol.Shape.Type.arr

TODO

val sequential : - ?a:Symbol.Shape.Type.elt -> - ?step:Symbol.Shape.Type.elt -> - int array -> - Symbol.Shape.Type.arr

TODO

val uniform : - ?a:Symbol.Shape.Type.elt -> - ?b:Symbol.Shape.Type.elt -> - int array -> - Symbol.Shape.Type.arr

TODO

val gaussian : - ?mu:Symbol.Shape.Type.elt -> - ?sigma:Symbol.Shape.Type.elt -> - int array -> - Symbol.Shape.Type.arr

TODO

val bernoulli : ?p:Symbol.Shape.Type.elt -> int array -> Symbol.Shape.Type.arr

TODO

val init : int array -> (int -> Symbol.Shape.Type.elt) -> Symbol.Shape.Type.arr

TODO

val init_nd : - int array -> - (int array -> Symbol.Shape.Type.elt) -> - Symbol.Shape.Type.arr

TODO

val shape : Symbol.Shape.Type.arr -> int array

TODO

val numel : Symbol.Shape.Type.arr -> int

TODO

TODO

val set : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.elt -> unit

TODO

val get_slice : int list list -> Symbol.Shape.Type.arr -> Symbol.Shape.Type.arr

TODO

val set_slice : - int list list -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - unit

TODO

TODO

val set_fancy : - Owl_types.index list -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - unit

TODO

val copy_ : out:'a -> 'b -> 'c

TODO

val reset : Symbol.Shape.Type.arr -> unit

TODO

val reshape : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.arr

TODO

val tile : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.arr

TODO

val repeat : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.arr

TODO

val pad : - ?v:Symbol.Shape.Type.elt -> - int list list -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val expand : ?hi:bool -> Symbol.Shape.Type.arr -> int -> Symbol.Shape.Type.arr

TODO

val squeeze : ?axis:int array -> Symbol.Shape.Type.arr -> Symbol.Shape.Type.arr

TODO

val concatenate : - ?axis:int -> - Symbol.Shape.Type.arr array -> - Symbol.Shape.Type.arr

TODO

val stack : ?axis:int -> Symbol.Shape.Type.arr array -> Symbol.Shape.Type.arr

TODO

TODO

val split : ?axis:int -> 'a -> 'b -> 'c

TODO

val draw : - ?axis:int -> - Symbol.Shape.Type.arr -> - int -> - Symbol.Shape.Type.arr * 'a array

TODO

TODO

delay f x returns f x. It allows to use a function that is not tracked by the computation graph and delay its evaluation. The output must have the same shape as the input.

delay_array out_shape f x works in the same way as delay but is applied on an array of ndarrays. Needs the shape of the output as an argument.

val lazy_print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(Symbol.Shape.Type.Device.A.elt -> string) -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

lazy_print x prints the output of x when it is evaluated. Is implemented as an identity node. For information about the optional parameters, refer to the print function of the Ndarray module.

val print : ?max_row:'a -> ?max_col:'b -> ?header:'c -> ?fmt:'d -> 'e -> unit

TODO

val min : - ?axis:int -> - ?keep_dims:bool -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val max : - ?axis:int -> - ?keep_dims:bool -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val sum : - ?axis:int -> - ?keep_dims:bool -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val sum_reduce : - ?axis:int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val log_sum_exp : - ?axis:int -> - ?keep_dims:bool -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

TODO

TODO

TODO

TODO

val elt_less_equal_scalar : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.elt -> - Symbol.Shape.Type.arr

TODO

val elt_greater_equal_scalar : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.elt -> - Symbol.Shape.Type.arr

TODO

val conv1d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr

TODO

val conv2d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr

TODO

val conv3d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr

TODO

val transpose_conv1d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr

TODO

val transpose_conv2d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr

TODO

val transpose_conv3d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr

TODO

val dilated_conv1d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val dilated_conv2d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val dilated_conv3d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val max_pool1d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val max_pool2d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val max_pool3d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val avg_pool1d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val avg_pool2d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val avg_pool3d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val upsampling2d : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.arr

TODO

val conv1d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val conv1d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val conv2d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val conv2d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val conv3d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val conv3d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val transpose_conv1d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val transpose_conv1d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val transpose_conv2d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val transpose_conv2d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val transpose_conv3d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val transpose_conv3d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val dilated_conv1d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val dilated_conv1d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val dilated_conv2d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val dilated_conv2d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val dilated_conv3d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val dilated_conv3d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val max_pool1d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val max_pool2d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val max_pool3d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val avg_pool1d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val avg_pool2d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val avg_pool3d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val upsampling2d_backward : - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val row_num : Symbol.Shape.Type.arr -> int

TODO

val col_num : Symbol.Shape.Type.arr -> int

TODO

val rows : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.arr

TODO

val copy_row_to : Symbol.Shape.Type.arr -> 'a -> 'b -> unit

TODO

val copy_col_to : Symbol.Shape.Type.arr -> 'a -> 'b -> unit

TODO

TODO

val transpose : - ?axis:int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val to_rows : Symbol.Shape.Type.arr -> 'a array

TODO

TODO

val to_cols : Symbol.Shape.Type.arr -> 'a array

TODO

TODO

val of_array : - Symbol.Shape.Type.elt array -> - int array -> - Symbol.Shape.Type.arr

TODO

val of_arrays : Symbol.Shape.Type.elt array array -> Symbol.Shape.Type.arr

TODO

val to_arrays : Symbol.Shape.Type.arr -> Symbol.Shape.Type.elt array array

TODO

Scalar functions
module Scalar : sig ... end
module Mat : sig ... end
module Linalg : sig ... end
\ No newline at end of file diff --git a/owl-base/Owl_computation_engine/Flatten/argument-1-Engine/Graph/Optimiser/index.html b/owl-base/Owl_computation_engine/Flatten/argument-1-Engine/Graph/Optimiser/index.html deleted file mode 100644 index 5fa13ff88..000000000 --- a/owl-base/Owl_computation_engine/Flatten/argument-1-Engine/Graph/Optimiser/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Optimiser (owl-base.Owl_computation_engine.Flatten.Engine.Graph.Optimiser)

Module Graph.Optimiser

Core functions
val estimate_complexity : 'a Owl_graph.node array -> int * int

TODO

val optimise_nodes : - Operator.Symbol.Shape.Type.attr Owl_graph.node array -> - unit

TODO

\ No newline at end of file diff --git a/owl-base/Owl_computation_engine/Flatten/argument-1-Engine/Graph/index.html b/owl-base/Owl_computation_engine/Flatten/argument-1-Engine/Graph/index.html deleted file mode 100644 index e0c902731..000000000 --- a/owl-base/Owl_computation_engine/Flatten/argument-1-Engine/Graph/index.html +++ /dev/null @@ -1,33 +0,0 @@ - -Graph (owl-base.Owl_computation_engine.Flatten.Engine.Graph)

Module Engine.Graph

Type definition
type graph

TODO

Core functions
val shape_or_value : Optimiser.Operator.Symbol.Shape.Type.t -> string

TODO

val graph_to_dot : graph -> string

TODO

val graph_to_trace : graph -> string

TODO

val save_graph : 'a -> string -> unit

TODO

val load_graph : string -> 'a * 'b

TODO

val invalidate_rvs : graph -> unit

TODO

val is_iopair_safe : 'a Owl_graph.node -> 'a Owl_graph.node -> bool

TODO

val update_iopair : graph -> unit

TODO

val remove_unused_iopair : - 'a Owl_graph.node array -> - 'b array -> - 'a Owl_graph.node array * 'b array

TODO

val optimise : graph -> unit

TODO

\ No newline at end of file diff --git a/owl-base/Owl_computation_engine/Flatten/argument-1-Engine/index.html b/owl-base/Owl_computation_engine/Flatten/argument-1-Engine/index.html deleted file mode 100644 index 6f7a3e451..000000000 --- a/owl-base/Owl_computation_engine/Flatten/argument-1-Engine/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Engine (owl-base.Owl_computation_engine.Flatten.Engine)

Parameter Flatten.Engine

Core evaluation functions of the engine

TODO

TODO

val eval_graph : Graph.graph -> unit

TODO

\ No newline at end of file diff --git a/owl-base/Owl_computation_engine/Flatten/index.html b/owl-base/Owl_computation_engine/Flatten/index.html deleted file mode 100644 index 7e7efa982..000000000 --- a/owl-base/Owl_computation_engine/Flatten/index.html +++ /dev/null @@ -1,485 +0,0 @@ - -Flatten (owl-base.Owl_computation_engine.Flatten)

Module Owl_computation_engine.Flatten

Parameters

Signature

include module type of struct include Engine end
module Graph = Engine.Graph
Core evaluation functions of the engine

TODO

TODO

val eval_graph : Graph.graph -> unit

TODO

include module type of struct include Graph end
module Optimiser = Graph.Optimiser
type graph = Engine.Graph.graph
val shape_or_value : Optimiser.Operator.Symbol.Shape.Type.t -> string
val graph_to_dot : graph -> string
val graph_to_trace : graph -> string
val save_graph : 'a -> string -> unit
val load_graph : string -> 'a * 'b
val invalidate_rvs : graph -> unit
val is_iopair_safe : 'a Owl_graph.node -> 'a Owl_graph.node -> bool
val update_iopair : graph -> unit
val remove_unused_iopair : - 'a Owl_graph.node array -> - 'b array -> - 'a Owl_graph.node array * 'b array
val optimise : graph -> unit
include module type of struct include Optimiser end
module Operator = Optimiser.Operator
val estimate_complexity : 'a Owl_graph.node array -> int * int
val optimise_nodes : - Operator.Symbol.Shape.Type.attr Owl_graph.node array -> - unit
include module type of struct include Operator end
module Symbol = Operator.Symbol
val empty : int array -> Symbol.Shape.Type.arr
val zeros : int array -> Symbol.Shape.Type.arr
val ones : int array -> Symbol.Shape.Type.arr
val create : int array -> Symbol.Shape.Type.elt -> Symbol.Shape.Type.arr
val sequential : - ?a:Symbol.Shape.Type.elt -> - ?step:Symbol.Shape.Type.elt -> - int array -> - Symbol.Shape.Type.arr
val uniform : - ?a:Symbol.Shape.Type.elt -> - ?b:Symbol.Shape.Type.elt -> - int array -> - Symbol.Shape.Type.arr
val gaussian : - ?mu:Symbol.Shape.Type.elt -> - ?sigma:Symbol.Shape.Type.elt -> - int array -> - Symbol.Shape.Type.arr
val bernoulli : ?p:Symbol.Shape.Type.elt -> int array -> Symbol.Shape.Type.arr
val init : int array -> (int -> Symbol.Shape.Type.elt) -> Symbol.Shape.Type.arr
val init_nd : - int array -> - (int array -> Symbol.Shape.Type.elt) -> - Symbol.Shape.Type.arr
val shape : Symbol.Shape.Type.arr -> int array
val numel : Symbol.Shape.Type.arr -> int
val set : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.elt -> unit
val get_slice : int list list -> Symbol.Shape.Type.arr -> Symbol.Shape.Type.arr
val set_slice : - int list list -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - unit
val set_fancy : - Owl_types.index list -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - unit
val copy_ : out:'a -> 'b -> 'c
val reset : Symbol.Shape.Type.arr -> unit
val reshape : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.arr
val tile : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.arr
val repeat : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.arr
val pad : - ?v:Symbol.Shape.Type.elt -> - int list list -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val expand : ?hi:bool -> Symbol.Shape.Type.arr -> int -> Symbol.Shape.Type.arr
val squeeze : ?axis:int array -> Symbol.Shape.Type.arr -> Symbol.Shape.Type.arr
val concatenate : - ?axis:int -> - Symbol.Shape.Type.arr array -> - Symbol.Shape.Type.arr
val stack : ?axis:int -> Symbol.Shape.Type.arr array -> Symbol.Shape.Type.arr
val split : ?axis:int -> 'a -> 'b -> 'c
val draw : - ?axis:int -> - Symbol.Shape.Type.arr -> - int -> - Symbol.Shape.Type.arr * 'a array
val lazy_print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(Symbol.Shape.Type.Device.A.elt -> string) -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val print : ?max_row:'a -> ?max_col:'b -> ?header:'c -> ?fmt:'d -> 'e -> unit
val min : - ?axis:int -> - ?keep_dims:bool -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val max : - ?axis:int -> - ?keep_dims:bool -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val sum : - ?axis:int -> - ?keep_dims:bool -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val sum_reduce : - ?axis:int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val log_sum_exp : - ?axis:int -> - ?keep_dims:bool -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val elt_less_equal_scalar : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.elt -> - Symbol.Shape.Type.arr
val elt_greater_equal_scalar : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.elt -> - Symbol.Shape.Type.arr
val conv1d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr
val conv2d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr
val conv3d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr
val transpose_conv1d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr
val transpose_conv2d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr
val transpose_conv3d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr
val dilated_conv1d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr
val dilated_conv2d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr
val dilated_conv3d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr
val max_pool1d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr
val max_pool2d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr
val max_pool3d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr
val avg_pool1d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr
val avg_pool2d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr
val avg_pool3d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr
val upsampling2d : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.arr
val conv1d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val conv1d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val conv2d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val conv2d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val conv3d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val conv3d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val transpose_conv1d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val transpose_conv1d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val transpose_conv2d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val transpose_conv2d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val transpose_conv3d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val transpose_conv3d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val dilated_conv1d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val dilated_conv1d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val dilated_conv2d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val dilated_conv2d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val dilated_conv3d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val dilated_conv3d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val max_pool1d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val max_pool2d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val max_pool3d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val avg_pool1d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val avg_pool2d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val avg_pool3d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val upsampling2d_backward : - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val row_num : Symbol.Shape.Type.arr -> int
val col_num : Symbol.Shape.Type.arr -> int
val rows : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.arr
val copy_row_to : Symbol.Shape.Type.arr -> 'a -> 'b -> unit
val copy_col_to : Symbol.Shape.Type.arr -> 'a -> 'b -> unit
val transpose : - ?axis:int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val to_rows : Symbol.Shape.Type.arr -> 'a array
val to_cols : Symbol.Shape.Type.arr -> 'a array
val of_array : - Symbol.Shape.Type.elt array -> - int array -> - Symbol.Shape.Type.arr
val of_arrays : Symbol.Shape.Type.elt array array -> Symbol.Shape.Type.arr
val to_arrays : Symbol.Shape.Type.arr -> Symbol.Shape.Type.elt array array
module Scalar = Operator.Scalar
module Mat = Operator.Mat
module Linalg = Operator.Linalg
include module type of struct include Symbol end
module Shape = Symbol.Shape
val op_to_str : Shape.Type.op -> string
val is_random_variable : Shape.Type.op -> bool
val refnum : 'a Owl_graph.node -> int
val node_shape : Shape.Type.attr Owl_graph.node -> int array
val node_numel : Shape.Type.attr Owl_graph.node -> int
val is_shape_unknown : Shape.Type.attr Owl_graph.node -> bool
val infer_shape_graph : Shape.Type.attr Owl_graph.node array -> unit
val shape_to_str : int array option array -> string
val node_to_str : Shape.Type.attr Owl_graph.node -> string
val node_to_arr : Shape.Type.t -> Shape.Type.arr
val arr_to_node : Shape.Type.arr -> Shape.Type.t
val node_to_elt : Shape.Type.t -> Shape.Type.elt
val elt_to_node : Shape.Type.elt -> Shape.Type.t
val make_node : - ?name:string -> - ?value:Shape.Type.Device.value array -> - ?shape:int array option array -> - ?freeze:bool -> - ?reuse:bool -> - ?state:Shape.Type.state -> - Shape.Type.op -> - Shape.Type.attr Owl_graph.node
val make_then_connect : - ?shape:int array option array -> - Shape.Type.op -> - Shape.Type.attr Owl_graph.node array -> - Shape.Type.attr Owl_graph.node
val var_arr : ?shape:int array -> string -> Shape.Type.arr
val var_elt : string -> Shape.Type.elt
val const_arr : string -> Shape.Type.Device.A.arr -> Shape.Type.arr
val const_elt : string -> Shape.Type.Device.A.elt -> Shape.Type.elt
val new_block_id : unit -> int
val make_empty_block : ?block_id:int -> int -> Shape.Type.block
val make_value_block : - Shape.Type.Device.value -> - Shape.Type.attr Owl_graph.node -> - unit
val add_node_to_block : - Shape.Type.attr Owl_graph.node -> - Shape.Type.block -> - unit
val get_active_node : Shape.Type.block -> Shape.Type.attr Owl_graph.node option
val set_active_node : - Shape.Type.block -> - Shape.Type.attr Owl_graph.node -> - unit
val get_block_id : Shape.Type.attr Owl_graph.node -> int
val set_value : - Shape.Type.attr Owl_graph.node -> - Shape.Type.Device.value array -> - unit
val set_operator : Shape.Type.attr Owl_graph.node -> Shape.Type.op -> unit
val set_reuse : Shape.Type.attr Owl_graph.node -> bool -> unit
val get_reuse : Shape.Type.attr Owl_graph.node -> bool
val is_shared : Shape.Type.attr Owl_graph.node -> bool
val get_shared_nodes : - Shape.Type.attr Owl_graph.node -> - Shape.Type.attr Owl_graph.node array
val is_var : Shape.Type.attr Owl_graph.node -> bool
val is_const : Shape.Type.attr Owl_graph.node -> bool
val is_node_arr : Shape.Type.attr Owl_graph.node -> bool
val is_node_elt : Shape.Type.attr Owl_graph.node -> bool
val is_assigned : Shape.Type.attr Owl_graph.node -> bool
val check_assigned : Shape.Type.attr Owl_graph.node -> unit
val is_valid : Shape.Type.attr Owl_graph.node -> bool
val validate : Shape.Type.attr Owl_graph.node -> unit
val invalidate : Shape.Type.attr Owl_graph.node -> unit
val invalidate_graph : Shape.Type.attr Owl_graph.node -> unit
val is_freeze : Shape.Type.attr Owl_graph.node -> bool
val freeze : Shape.Type.attr Owl_graph.node -> unit
val freeze_descendants : Shape.Type.attr Owl_graph.node array -> unit
val freeze_ancestors : Shape.Type.attr Owl_graph.node array -> unit
val unsafe_assign_arr : Shape.Type.arr -> Shape.Type.Device.A.arr -> unit
val assign_arr : Shape.Type.arr -> Shape.Type.Device.A.arr -> unit
val assign_elt : Shape.Type.elt -> Shape.Type.Device.A.elt -> unit
val float_to_elt : float -> Shape.Type.elt
val elt_to_float : Shape.Type.elt -> float
include module type of struct include Shape end
module Type = Shape.Type
val infer_shape : - Type.op -> - Type.attr Owl_graph.node array -> - int array option array
include module type of struct include Type end
module Device = Type.Device
and block = Engine.Graph.Optimiser.Operator.Symbol.Shape.Type.block = {
  1. size : int;
  2. block_id : int;
  3. mutable active : t option;
  4. mutable memory : Device.value;
  5. mutable nodes : t list;
}
and attr = Engine.Graph.Optimiser.Operator.Symbol.Shape.Type.attr = {
  1. mutable op : op;
  2. mutable freeze : bool;
  3. mutable reuse : bool;
  4. mutable state : state;
  5. mutable shape : int array option array;
  6. mutable value : Device.value array;
  7. mutable block : block array option;
}
and op = Engine.Graph.Optimiser.Operator.Symbol.Shape.Type.op =
  1. | Noop
  2. | Var
  3. | Const
  4. | Empty of int array
  5. | Zeros of int array
  6. | Ones of int array
  7. | Create of int array
  8. | Sequential of int array
  9. | Uniform of int array
  10. | Gaussian of int array
  11. | Bernoulli of int array
  12. | Init of int array * int -> elt
  13. | Get of int array
  14. | Set of int array
  15. | GetSlice of int list list
  16. | SetSlice of int list list
  17. | GetFancy of Owl_types_common.index list
  18. | SetFancy of Owl_types_common.index list
  19. | Copy
  20. | Reset
  21. | Reshape of int array
  22. | Reverse
  23. | Tile of int array
  24. | Repeat of int array
  25. | Pad of elt * int list list
  26. | Concatenate of int
  27. | Stack of int
  28. | Split of int * int array
  29. | Draw of int * int
  30. | Map of elt -> elt
  31. | Fold of int * elt -> elt -> elt
  32. | Scan of int * elt -> elt -> elt
  33. | OneHot of int
  34. | OfArray of int array
  35. | Delay of Device.A.arr -> Device.A.arr
  36. | DelayArray of int array * Device.A.arr array -> Device.A.arr
  37. | LazyPrint of int option - * int option - * bool option - * (Device.A.elt -> string) option
  38. | Abs
  39. | Neg
  40. | Floor
  41. | Ceil
  42. | Round
  43. | Sqr
  44. | Sqrt
  45. | Log
  46. | Log2
  47. | Log10
  48. | Exp
  49. | Sin
  50. | Cos
  51. | Tan
  52. | Sinh
  53. | Cosh
  54. | Tanh
  55. | Asin
  56. | Acos
  57. | Atan
  58. | Asinh
  59. | Acosh
  60. | Atanh
  61. | Min of bool * int
  62. | Max of bool * int
  63. | Sum of bool * int
  64. | SumReduce of int array
  65. | Signum
  66. | Sigmoid
  67. | Relu
  68. | Dawsn
  69. | Min'
  70. | Max'
  71. | Sum'
  72. | LogSumExp'
  73. | LogSumExp of bool * int
  74. | L1norm'
  75. | L2norm'
  76. | L2NormSqr'
  77. | ClipByValue
  78. | ClipByL2norm
  79. | Pow
  80. | ScalarPow
  81. | PowScalar
  82. | Atan2
  83. | ScalarAtan2
  84. | Atan2Scalar
  85. | Hypot
  86. | Min2
  87. | Max2
  88. | Add
  89. | Sub
  90. | Mul
  91. | Div
  92. | AddScalar
  93. | SubScalar
  94. | MulScalar
  95. | DivScalar
  96. | ScalarAdd
  97. | ScalarSub
  98. | ScalarMul
  99. | ScalarDiv
  100. | FMA
  101. | EltEqual
  102. | EltNotEqual
  103. | EltLess
  104. | EltGreater
  105. | EltLessEqual
  106. | EltGreaterEqual
  107. | EltEqualScalar
  108. | EltNotEqualScalar
  109. | EltLessScalar
  110. | EltGreaterScalar
  111. | EltLessEqualScalar
  112. | EltGreaterEqualScalar
  113. | Conv1d of Owl_types_common.padding * int array
  114. | Conv2d of Owl_types_common.padding * int array
  115. | Conv3d of Owl_types_common.padding * int array
  116. | TransposeConv1d of Owl_types_common.padding * int array
  117. | TransposeConv2d of Owl_types_common.padding * int array
  118. | TransposeConv3d of Owl_types_common.padding * int array
  119. | DilatedConv1d of Owl_types_common.padding * int array * int array
  120. | DilatedConv2d of Owl_types_common.padding * int array * int array
  121. | DilatedConv3d of Owl_types_common.padding * int array * int array
  122. | MaxPool1d of Owl_types_common.padding * int array * int array
  123. | MaxPool2d of Owl_types_common.padding * int array * int array
  124. | MaxPool3d of Owl_types_common.padding * int array * int array
  125. | AvgPool1d of Owl_types_common.padding * int array * int array
  126. | AvgPool2d of Owl_types_common.padding * int array * int array
  127. | AvgPool3d of Owl_types_common.padding * int array * int array
  128. | UpSampling2d of int array
  129. | Conv1dBackwardInput of int array
  130. | Conv1dBackwardKernel of int array
  131. | Conv2dBackwardInput of int array
  132. | Conv2dBackwardKernel of int array
  133. | Conv3dBackwardInput of int array
  134. | Conv3dBackwardKernel of int array
  135. | TransposeConv1dBackwardInput of int array
  136. | TransposeConv1dBackwardKernel of int array
  137. | TransposeConv2dBackwardInput of int array
  138. | TransposeConv2dBackwardKernel of int array
  139. | TransposeConv3dBackwardInput of int array
  140. | TransposeConv3dBackwardKernel of int array
  141. | DilatedConv1dBackwardInput of int array * int array
  142. | DilatedConv1dBackwardKernel of int array * int array
  143. | DilatedConv2dBackwardInput of int array * int array
  144. | DilatedConv2dBackwardKernel of int array * int array
  145. | DilatedConv3dBackwardInput of int array * int array
  146. | DilatedConv3dBackwardKernel of int array * int array
  147. | MaxPool1dBackward of Owl_types_common.padding * int array * int array
  148. | MaxPool2dBackward of Owl_types_common.padding * int array * int array
  149. | MaxPool3dBackward of Owl_types_common.padding * int array * int array
  150. | AvgPool1dBackward of Owl_types_common.padding * int array * int array
  151. | AvgPool2dBackward of Owl_types_common.padding * int array * int array
  152. | AvgPool3dBackward of Owl_types_common.padding * int array * int array
  153. | UpSampling2dBackward of int array
  154. | RowNum
  155. | ColNum
  156. | Row
  157. | Rows of int array
  158. | CopyRowTo
  159. | CopyColTo
  160. | Dot of bool * bool * elt * elt
  161. | Inv
  162. | Trace
  163. | Transpose of int array
  164. | ToRows
  165. | OfRows
  166. | Scalar_Add
  167. | Scalar_Sub
  168. | Scalar_Mul
  169. | Scalar_Div
  170. | Scalar_Pow
  171. | Scalar_Atan2
  172. | Scalar_Abs
  173. | Scalar_Neg
  174. | Scalar_Sqr
  175. | Scalar_Sqrt
  176. | Scalar_Exp
  177. | Scalar_Log
  178. | Scalar_Log2
  179. | Scalar_Log10
  180. | Scalar_Signum
  181. | Scalar_Floor
  182. | Scalar_Ceil
  183. | Scalar_Round
  184. | Scalar_Sin
  185. | Scalar_Cos
  186. | Scalar_Tan
  187. | Scalar_Sinh
  188. | Scalar_Cosh
  189. | Scalar_Tanh
  190. | Scalar_Asin
  191. | Scalar_Acos
  192. | Scalar_Atan
  193. | Scalar_Asinh
  194. | Scalar_Acosh
  195. | Scalar_Atanh
  196. | Scalar_Relu
  197. | Scalar_Dawsn
  198. | Scalar_Sigmoid
  199. | Fused_Adagrad of float * float
include module type of struct include Device end
module A = Device.A
val make_device : unit -> device
val arr_to_value : A.arr -> value
val value_to_arr : value -> A.arr
val elt_to_value : A.elt -> value
val value_to_elt : value -> A.elt
val value_to_float : value -> float
val is_arr : value -> bool
val is_elt : value -> bool
\ No newline at end of file diff --git a/owl-base/Owl_computation_engine/Make_Graph/Optimiser/Operator/Linalg/index.html b/owl-base/Owl_computation_engine/Make_Graph/Optimiser/Operator/Linalg/index.html deleted file mode 100644 index e099d4d81..000000000 --- a/owl-base/Owl_computation_engine/Make_Graph/Optimiser/Operator/Linalg/index.html +++ /dev/null @@ -1,33 +0,0 @@ - -Linalg (owl-base.Owl_computation_engine.Make_Graph.Optimiser.Operator.Linalg)

Module Operator.Linalg

\ No newline at end of file diff --git a/owl-base/Owl_computation_engine/Make_Graph/Optimiser/Operator/Mat/index.html b/owl-base/Owl_computation_engine/Make_Graph/Optimiser/Operator/Mat/index.html deleted file mode 100644 index 04ad8c4f4..000000000 --- a/owl-base/Owl_computation_engine/Make_Graph/Optimiser/Operator/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl-base.Owl_computation_engine.Make_Graph.Optimiser.Operator.Mat)

Module Operator.Mat

\ No newline at end of file diff --git a/owl-base/Owl_computation_engine/Make_Graph/Optimiser/Operator/Scalar/index.html b/owl-base/Owl_computation_engine/Make_Graph/Optimiser/Operator/Scalar/index.html deleted file mode 100644 index 6f9f901cd..000000000 --- a/owl-base/Owl_computation_engine/Make_Graph/Optimiser/Operator/Scalar/index.html +++ /dev/null @@ -1,20 +0,0 @@ - -Scalar (owl-base.Owl_computation_engine.Make_Graph.Optimiser.Operator.Scalar)

Module Operator.Scalar

\ No newline at end of file diff --git a/owl-base/Owl_computation_engine/Make_Graph/Optimiser/Operator/Symbol/Shape/Type/Device/A/Linalg/index.html b/owl-base/Owl_computation_engine/Make_Graph/Optimiser/Operator/Symbol/Shape/Type/Device/A/Linalg/index.html deleted file mode 100644 index fdcb03ef3..000000000 --- a/owl-base/Owl_computation_engine/Make_Graph/Optimiser/Operator/Symbol/Shape/Type/Device/A/Linalg/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -Linalg (owl-base.Owl_computation_engine.Make_Graph.Optimiser.Operator.Symbol.Shape.Type.Device.A.Linalg)

Module A.Linalg

val inv : arr -> arr
val logdet : arr -> elt
val chol : ?upper:bool -> arr -> arr
val svd : ?thin:bool -> arr -> arr * arr * arr
val qr : arr -> arr * arr
val lq : arr -> arr * arr
val sylvester : arr -> arr -> arr -> arr
val lyapunov : arr -> arr -> arr
val discrete_lyapunov : - ?solver:[ `bilinear | `default | `direct ] -> - arr -> - arr -> - arr
val linsolve : ?trans:bool -> ?typ:[ `l | `n | `u ] -> arr -> arr -> arr
val care : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
val dare : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
\ No newline at end of file diff --git a/owl-base/Owl_computation_engine/Make_Graph/Optimiser/Operator/Symbol/Shape/Type/Device/A/Mat/index.html b/owl-base/Owl_computation_engine/Make_Graph/Optimiser/Operator/Symbol/Shape/Type/Device/A/Mat/index.html deleted file mode 100644 index e74963864..000000000 --- a/owl-base/Owl_computation_engine/Make_Graph/Optimiser/Operator/Symbol/Shape/Type/Device/A/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl-base.Owl_computation_engine.Make_Graph.Optimiser.Operator.Symbol.Shape.Type.Device.A.Mat)

Module A.Mat

val diagm : ?k:int -> arr -> arr
val triu : ?k:int -> arr -> arr
val tril : ?k:int -> arr -> arr
val eye : int -> arr
\ No newline at end of file diff --git a/owl-base/Owl_computation_engine/Make_Graph/Optimiser/Operator/Symbol/Shape/Type/Device/A/Scalar/index.html b/owl-base/Owl_computation_engine/Make_Graph/Optimiser/Operator/Symbol/Shape/Type/Device/A/Scalar/index.html deleted file mode 100644 index 42fe06c57..000000000 --- a/owl-base/Owl_computation_engine/Make_Graph/Optimiser/Operator/Symbol/Shape/Type/Device/A/Scalar/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Scalar (owl-base.Owl_computation_engine.Make_Graph.Optimiser.Operator.Symbol.Shape.Type.Device.A.Scalar)

Module A.Scalar

val add : elt -> elt -> elt
val sub : elt -> elt -> elt
val mul : elt -> elt -> elt
val div : elt -> elt -> elt
val pow : elt -> elt -> elt
val atan2 : elt -> elt -> elt
val abs : elt -> elt
val neg : elt -> elt
val sqr : elt -> elt
val sqrt : elt -> elt
val exp : elt -> elt
val log : elt -> elt
val log2 : elt -> elt
val log10 : elt -> elt
val signum : elt -> elt
val floor : elt -> elt
val ceil : elt -> elt
val round : elt -> elt
val sin : elt -> elt
val cos : elt -> elt
val tan : elt -> elt
val sinh : elt -> elt
val cosh : elt -> elt
val tanh : elt -> elt
val asin : elt -> elt
val acos : elt -> elt
val atan : elt -> elt
val asinh : elt -> elt
val acosh : elt -> elt
val atanh : elt -> elt
val relu : elt -> elt
val dawsn : elt -> elt
val sigmoid : elt -> elt
\ No newline at end of file diff --git a/owl-base/Owl_computation_engine/Make_Graph/Optimiser/Operator/Symbol/Shape/Type/Device/A/index.html b/owl-base/Owl_computation_engine/Make_Graph/Optimiser/Operator/Symbol/Shape/Type/Device/A/index.html deleted file mode 100644 index c3caf57cd..000000000 --- a/owl-base/Owl_computation_engine/Make_Graph/Optimiser/Operator/Symbol/Shape/Type/Device/A/index.html +++ /dev/null @@ -1,381 +0,0 @@ - -A (owl-base.Owl_computation_engine.Make_Graph.Optimiser.Operator.Symbol.Shape.Type.Device.A)

Module Device.A

val empty : int array -> arr
val zeros : int array -> arr
val ones : int array -> arr
val create : int array -> elt -> arr
val sequential : ?a:elt -> ?step:elt -> int array -> arr
val uniform : ?a:elt -> ?b:elt -> int array -> arr
val gaussian : ?mu:elt -> ?sigma:elt -> int array -> arr
val bernoulli : ?p:elt -> int array -> arr
val init : int array -> (int -> elt) -> arr
val init_nd : int array -> (int array -> elt) -> arr
val shape : arr -> int array
val numel : arr -> int
val get : arr -> int array -> elt
val set : arr -> int array -> elt -> unit
val get_slice : int list list -> arr -> arr
val set_slice : int list list -> arr -> arr -> unit
val get_fancy : Owl_types_common.index list -> arr -> arr
val set_fancy : Owl_types_common.index list -> arr -> arr -> unit
val copy : arr -> arr
val copy_ : out:arr -> arr -> unit
val reset : arr -> unit
val reshape : arr -> int array -> arr
val reverse : arr -> arr
val tile : arr -> int array -> arr
val repeat : arr -> int array -> arr
val concatenate : ?axis:int -> arr array -> arr
val stack : ?axis:int -> arr array -> arr
val split : ?axis:int -> int array -> arr -> arr array
val expand : ?hi:bool -> arr -> int -> arr
val squeeze : ?axis:int array -> arr -> arr
val draw : ?axis:int -> arr -> int -> arr * int array
val map : (elt -> elt) -> arr -> arr
val fold : ?axis:int -> (elt -> elt -> elt) -> elt -> arr -> arr
val scan : ?axis:int -> (elt -> elt -> elt) -> arr -> arr
val one_hot : int -> arr -> arr
val pad : ?v:elt -> int list list -> arr -> arr
val print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(elt -> string) -> - arr -> - unit
val abs : arr -> arr
val neg : arr -> arr
val floor : arr -> arr
val ceil : arr -> arr
val round : arr -> arr
val sqr : arr -> arr
val sqrt : arr -> arr
val log : arr -> arr
val log2 : arr -> arr
val log10 : arr -> arr
val exp : arr -> arr
val sin : arr -> arr
val cos : arr -> arr
val tan : arr -> arr
val sinh : arr -> arr
val cosh : arr -> arr
val tanh : arr -> arr
val asin : arr -> arr
val acos : arr -> arr
val atan : arr -> arr
val asinh : arr -> arr
val acosh : arr -> arr
val atanh : arr -> arr
val sum_reduce : ?axis:int array -> arr -> arr
val signum : arr -> arr
val sigmoid : arr -> arr
val relu : arr -> arr
val dawsn : arr -> arr
val min' : arr -> elt
val max' : arr -> elt
val sum' : arr -> elt
val log_sum_exp' : arr -> elt
val log_sum_exp : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm' : arr -> elt
val l2norm' : arr -> elt
val l2norm_sqr' : arr -> elt
val clip_by_value : ?amin:elt -> ?amax:elt -> arr -> arr
val clip_by_l2norm : elt -> arr -> arr
val pow : arr -> arr -> arr
val scalar_pow : elt -> arr -> arr
val pow_scalar : arr -> elt -> arr
val atan2 : arr -> arr -> arr
val scalar_atan2 : elt -> arr -> arr
val atan2_scalar : arr -> elt -> arr
val add : arr -> arr -> arr
val sub : arr -> arr -> arr
val mul : arr -> arr -> arr
val div : arr -> arr -> arr
val add_scalar : arr -> elt -> arr
val sub_scalar : arr -> elt -> arr
val mul_scalar : arr -> elt -> arr
val div_scalar : arr -> elt -> arr
val scalar_add : elt -> arr -> arr
val scalar_sub : elt -> arr -> arr
val scalar_mul : elt -> arr -> arr
val scalar_div : elt -> arr -> arr
val fma : arr -> arr -> arr -> arr
val conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val dilated_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val transpose_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val max_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val upsampling2d : arr -> int array -> arr
val conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val dilated_conv1d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv1d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val transpose_conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val max_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val upsampling2d_backward : arr -> int array -> arr -> arr
val row_num : arr -> int
val col_num : arr -> int
val row : arr -> int -> arr
val rows : arr -> int array -> arr
val copy_row_to : arr -> arr -> int -> unit
val copy_col_to : arr -> arr -> int -> unit
val dot : arr -> arr -> arr
val diag : ?k:int -> arr -> arr
val trace : arr -> elt
val transpose : ?axis:int array -> arr -> arr
val to_rows : arr -> arr array
val of_rows : arr array -> arr
val to_cols : arr -> arr array
val of_cols : arr array -> arr
val of_array : elt array -> int array -> arr
val of_arrays : elt array array -> arr
val float_to_elt : float -> elt
val elt_to_float : elt -> float
val elt_equal : arr -> arr -> arr
val elt_not_equal : arr -> arr -> arr
val elt_less : arr -> arr -> arr
val elt_greater : arr -> arr -> arr
val elt_less_equal : arr -> arr -> arr
val elt_greater_equal : arr -> arr -> arr
val elt_equal_scalar : arr -> elt -> arr
val elt_not_equal_scalar : arr -> elt -> arr
val elt_less_scalar : arr -> elt -> arr
val elt_greater_scalar : arr -> elt -> arr
val elt_less_equal_scalar : arr -> elt -> arr
val elt_greater_equal_scalar : arr -> elt -> arr
module Scalar : sig ... end
module Mat : sig ... end
module Linalg : sig ... end
val create_ : out:arr -> elt -> unit
val uniform_ : ?a:elt -> ?b:elt -> out:arr -> unit
val gaussian_ : ?mu:elt -> ?sigma:elt -> out:arr -> unit
val sequential_ : ?a:elt -> ?step:elt -> out:arr -> unit
val bernoulli_ : ?p:elt -> out:arr -> unit
val zeros_ : out:arr -> unit
val ones_ : out:arr -> unit
val one_hot_ : out:arr -> int -> arr -> unit
val get_slice_ : out:arr -> int list list -> arr -> unit
val set_slice_ : out:arr -> int list list -> arr -> arr -> unit
val sub_left : arr -> int -> int -> arr
val reshape_ : out:arr -> arr -> unit
val reverse_ : out:arr -> arr -> unit
val transpose_ : out:arr -> ?axis:int array -> arr -> unit
val repeat_ : out:arr -> arr -> int array -> unit
val tile_ : out:arr -> arr -> int array -> unit
val pad_ : out:arr -> ?v:elt -> int list list -> arr -> unit
val hypot : arr -> arr -> arr
val fmod : arr -> arr -> arr
val min2 : arr -> arr -> arr
val max2 : arr -> arr -> arr
val add_ : ?out:arr -> arr -> arr -> unit
val sub_ : ?out:arr -> arr -> arr -> unit
val mul_ : ?out:arr -> arr -> arr -> unit
val div_ : ?out:arr -> arr -> arr -> unit
val pow_ : ?out:arr -> arr -> arr -> unit
val atan2_ : ?out:arr -> arr -> arr -> unit
val hypot_ : ?out:arr -> arr -> arr -> unit
val fmod_ : ?out:arr -> arr -> arr -> unit
val min2_ : ?out:arr -> arr -> arr -> unit
val max2_ : ?out:arr -> arr -> arr -> unit
val add_scalar_ : ?out:arr -> arr -> elt -> unit
val sub_scalar_ : ?out:arr -> arr -> elt -> unit
val mul_scalar_ : ?out:arr -> arr -> elt -> unit
val div_scalar_ : ?out:arr -> arr -> elt -> unit
val pow_scalar_ : ?out:arr -> arr -> elt -> unit
val atan2_scalar_ : ?out:arr -> arr -> elt -> unit
val fmod_scalar_ : ?out:arr -> arr -> elt -> unit
val scalar_add_ : ?out:arr -> elt -> arr -> unit
val scalar_sub_ : ?out:arr -> elt -> arr -> unit
val scalar_mul_ : ?out:arr -> elt -> arr -> unit
val scalar_div_ : ?out:arr -> elt -> arr -> unit
val scalar_pow_ : ?out:arr -> elt -> arr -> unit
val scalar_atan2_ : ?out:arr -> elt -> arr -> unit
val scalar_fmod_ : ?out:arr -> elt -> arr -> unit
val fma_ : ?out:arr -> arr -> arr -> arr -> unit
val clip_by_value_ : ?out:arr -> ?amin:elt -> ?amax:elt -> arr -> unit
val clip_by_l2norm_ : ?out:arr -> elt -> arr -> unit
val dot_ : - ?transa:bool -> - ?transb:bool -> - ?alpha:elt -> - ?beta:elt -> - c:arr -> - arr -> - arr -> - unit
val abs_ : ?out:arr -> arr -> unit
val neg_ : ?out:arr -> arr -> unit
val conj_ : ?out:arr -> arr -> unit
val reci_ : ?out:arr -> arr -> unit
val signum_ : ?out:arr -> arr -> unit
val sqr_ : ?out:arr -> arr -> unit
val sqrt_ : ?out:arr -> arr -> unit
val cbrt_ : ?out:arr -> arr -> unit
val exp_ : ?out:arr -> arr -> unit
val exp2_ : ?out:arr -> arr -> unit
val exp10_ : ?out:arr -> arr -> unit
val expm1_ : ?out:arr -> arr -> unit
val log_ : ?out:arr -> arr -> unit
val log2_ : ?out:arr -> arr -> unit
val log10_ : ?out:arr -> arr -> unit
val log1p_ : ?out:arr -> arr -> unit
val sin_ : ?out:arr -> arr -> unit
val cos_ : ?out:arr -> arr -> unit
val tan_ : ?out:arr -> arr -> unit
val asin_ : ?out:arr -> arr -> unit
val acos_ : ?out:arr -> arr -> unit
val atan_ : ?out:arr -> arr -> unit
val sinh_ : ?out:arr -> arr -> unit
val cosh_ : ?out:arr -> arr -> unit
val tanh_ : ?out:arr -> arr -> unit
val asinh_ : ?out:arr -> arr -> unit
val acosh_ : ?out:arr -> arr -> unit
val atanh_ : ?out:arr -> arr -> unit
val floor_ : ?out:arr -> arr -> unit
val ceil_ : ?out:arr -> arr -> unit
val round_ : ?out:arr -> arr -> unit
val trunc_ : ?out:arr -> arr -> unit
val fix_ : ?out:arr -> arr -> unit
val erf_ : ?out:arr -> arr -> unit
val erfc_ : ?out:arr -> arr -> unit
val relu_ : ?out:arr -> arr -> unit
val softplus_ : ?out:arr -> arr -> unit
val softsign_ : ?out:arr -> arr -> unit
val softmax_ : ?out:arr -> ?axis:int -> arr -> unit
val sigmoid_ : ?out:arr -> arr -> unit
val sum_ : out:arr -> axis:int -> arr -> unit
val min_ : out:arr -> axis:int -> arr -> unit
val max_ : out:arr -> axis:int -> arr -> unit
val sum : ?axis:int -> ?keep_dims:bool -> arr -> arr
val prod : ?axis:int -> ?keep_dims:bool -> arr -> arr
val min : ?axis:int -> ?keep_dims:bool -> arr -> arr
val max : ?axis:int -> ?keep_dims:bool -> arr -> arr
val mean : ?axis:int -> ?keep_dims:bool -> arr -> arr
val var : ?axis:int -> ?keep_dims:bool -> arr -> arr
val std : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l2norm : ?axis:int -> ?keep_dims:bool -> arr -> arr
val cumsum_ : ?out:arr -> ?axis:int -> arr -> unit
val cumprod_ : ?out:arr -> ?axis:int -> arr -> unit
val cummin_ : ?out:arr -> ?axis:int -> arr -> unit
val cummax_ : ?out:arr -> ?axis:int -> arr -> unit
val dropout_ : ?out:arr -> ?rate:float -> arr -> unit
val prod' : arr -> elt
val mean' : arr -> elt
val var' : arr -> elt
val std' : arr -> elt
val elt_equal_ : ?out:arr -> arr -> arr -> unit
val elt_not_equal_ : ?out:arr -> arr -> arr -> unit
val elt_less_ : ?out:arr -> arr -> arr -> unit
val elt_greater_ : ?out:arr -> arr -> arr -> unit
val elt_less_equal_ : ?out:arr -> arr -> arr -> unit
val elt_greater_equal_ : ?out:arr -> arr -> arr -> unit
val elt_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_not_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_less_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_greater_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_less_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_greater_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val conv1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val conv2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val conv3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val dilated_conv1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val dilated_conv2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val dilated_conv3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val transpose_conv1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val transpose_conv2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val transpose_conv3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val max_pool1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val max_pool2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val max_pool3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val upsampling2d_ : out:arr -> arr -> int array -> unit
val conv1d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv1d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv2d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv2d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv3d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv3d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val dilated_conv1d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv1d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv2d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv2d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv3d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv3d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val transpose_conv1d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv1d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv2d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv2d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv3d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv3d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val max_pool1d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val max_pool2d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val max_pool3d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool1d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool2d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool3d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val upsampling2d_backward_ : out:arr -> arr -> int array -> arr -> unit
val fused_adagrad_ : ?out:arr -> rate:float -> eps:float -> arr -> unit
\ No newline at end of file diff --git a/owl-base/Owl_computation_engine/Make_Graph/Optimiser/Operator/Symbol/Shape/Type/Device/index.html b/owl-base/Owl_computation_engine/Make_Graph/Optimiser/Operator/Symbol/Shape/Type/Device/index.html deleted file mode 100644 index bc7b01ad9..000000000 --- a/owl-base/Owl_computation_engine/Make_Graph/Optimiser/Operator/Symbol/Shape/Type/Device/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Device (owl-base.Owl_computation_engine.Make_Graph.Optimiser.Operator.Symbol.Shape.Type.Device)

Module Type.Device

\ No newline at end of file diff --git a/owl-base/Owl_computation_engine/Make_Graph/Optimiser/Operator/Symbol/Shape/Type/index.html b/owl-base/Owl_computation_engine/Make_Graph/Optimiser/Operator/Symbol/Shape/Type/index.html deleted file mode 100644 index 832db66d2..000000000 --- a/owl-base/Owl_computation_engine/Make_Graph/Optimiser/Operator/Symbol/Shape/Type/index.html +++ /dev/null @@ -1,17 +0,0 @@ - -Type (owl-base.Owl_computation_engine.Make_Graph.Optimiser.Operator.Symbol.Shape.Type)

Module Shape.Type

module Device : sig ... end
and attr = - Owl_computation_optimiser.Make(Owl_computation_operator.Make(Owl_computation_symbol.Make(Owl_computation_shape.Make(Owl_computation_type.Make(Device))))).Operator.Symbol.Shape.Type.attr = - {
  1. mutable op : op;
  2. mutable freeze : bool;
  3. mutable reuse : bool;
  4. mutable state : state;
  5. mutable shape : int array option array;
  6. mutable value : Device.value array;
  7. mutable block : block array option;
}
and op = - Owl_computation_optimiser.Make(Owl_computation_operator.Make(Owl_computation_symbol.Make(Owl_computation_shape.Make(Owl_computation_type.Make(Device))))).Operator.Symbol.Shape.Type.op = -
  1. | Noop
  2. | Var
  3. | Const
  4. | Empty of int array
  5. | Zeros of int array
  6. | Ones of int array
  7. | Create of int array
  8. | Sequential of int array
  9. | Uniform of int array
  10. | Gaussian of int array
  11. | Bernoulli of int array
  12. | Init of int array * int -> elt
  13. | Get of int array
  14. | Set of int array
  15. | GetSlice of int list list
  16. | SetSlice of int list list
  17. | GetFancy of Owl_types_common.index list
  18. | SetFancy of Owl_types_common.index list
  19. | Copy
  20. | Reset
  21. | Reshape of int array
  22. | Reverse
  23. | Tile of int array
  24. | Repeat of int array
  25. | Pad of elt * int list list
  26. | Concatenate of int
  27. | Stack of int
  28. | Split of int * int array
  29. | Draw of int * int
  30. | Map of elt -> elt
  31. | Fold of int * elt -> elt -> elt
  32. | Scan of int * elt -> elt -> elt
  33. | OneHot of int
  34. | OfArray of int array
  35. | Delay of Device.A.arr -> Device.A.arr
  36. | DelayArray of int array * Device.A.arr array -> Device.A.arr
  37. | LazyPrint of int option - * int option - * bool option - * (Device.A.elt -> string) option
  38. | Abs
  39. | Neg
  40. | Floor
  41. | Ceil
  42. | Round
  43. | Sqr
  44. | Sqrt
  45. | Log
  46. | Log2
  47. | Log10
  48. | Exp
  49. | Sin
  50. | Cos
  51. | Tan
  52. | Sinh
  53. | Cosh
  54. | Tanh
  55. | Asin
  56. | Acos
  57. | Atan
  58. | Asinh
  59. | Acosh
  60. | Atanh
  61. | Min of bool * int
  62. | Max of bool * int
  63. | Sum of bool * int
  64. | SumReduce of int array
  65. | Signum
  66. | Sigmoid
  67. | Relu
  68. | Dawsn
  69. | Min'
  70. | Max'
  71. | Sum'
  72. | LogSumExp'
  73. | LogSumExp of bool * int
  74. | L1norm'
  75. | L2norm'
  76. | L2NormSqr'
  77. | ClipByValue
  78. | ClipByL2norm
  79. | Pow
  80. | ScalarPow
  81. | PowScalar
  82. | Atan2
  83. | ScalarAtan2
  84. | Atan2Scalar
  85. | Hypot
  86. | Min2
  87. | Max2
  88. | Add
  89. | Sub
  90. | Mul
  91. | Div
  92. | AddScalar
  93. | SubScalar
  94. | MulScalar
  95. | DivScalar
  96. | ScalarAdd
  97. | ScalarSub
  98. | ScalarMul
  99. | ScalarDiv
  100. | FMA
  101. | EltEqual
  102. | EltNotEqual
  103. | EltLess
  104. | EltGreater
  105. | EltLessEqual
  106. | EltGreaterEqual
  107. | EltEqualScalar
  108. | EltNotEqualScalar
  109. | EltLessScalar
  110. | EltGreaterScalar
  111. | EltLessEqualScalar
  112. | EltGreaterEqualScalar
  113. | Conv1d of Owl_types_common.padding * int array
  114. | Conv2d of Owl_types_common.padding * int array
  115. | Conv3d of Owl_types_common.padding * int array
  116. | TransposeConv1d of Owl_types_common.padding * int array
  117. | TransposeConv2d of Owl_types_common.padding * int array
  118. | TransposeConv3d of Owl_types_common.padding * int array
  119. | DilatedConv1d of Owl_types_common.padding * int array * int array
  120. | DilatedConv2d of Owl_types_common.padding * int array * int array
  121. | DilatedConv3d of Owl_types_common.padding * int array * int array
  122. | MaxPool1d of Owl_types_common.padding * int array * int array
  123. | MaxPool2d of Owl_types_common.padding * int array * int array
  124. | MaxPool3d of Owl_types_common.padding * int array * int array
  125. | AvgPool1d of Owl_types_common.padding * int array * int array
  126. | AvgPool2d of Owl_types_common.padding * int array * int array
  127. | AvgPool3d of Owl_types_common.padding * int array * int array
  128. | UpSampling2d of int array
  129. | Conv1dBackwardInput of int array
  130. | Conv1dBackwardKernel of int array
  131. | Conv2dBackwardInput of int array
  132. | Conv2dBackwardKernel of int array
  133. | Conv3dBackwardInput of int array
  134. | Conv3dBackwardKernel of int array
  135. | TransposeConv1dBackwardInput of int array
  136. | TransposeConv1dBackwardKernel of int array
  137. | TransposeConv2dBackwardInput of int array
  138. | TransposeConv2dBackwardKernel of int array
  139. | TransposeConv3dBackwardInput of int array
  140. | TransposeConv3dBackwardKernel of int array
  141. | DilatedConv1dBackwardInput of int array * int array
  142. | DilatedConv1dBackwardKernel of int array * int array
  143. | DilatedConv2dBackwardInput of int array * int array
  144. | DilatedConv2dBackwardKernel of int array * int array
  145. | DilatedConv3dBackwardInput of int array * int array
  146. | DilatedConv3dBackwardKernel of int array * int array
  147. | MaxPool1dBackward of Owl_types_common.padding * int array * int array
  148. | MaxPool2dBackward of Owl_types_common.padding * int array * int array
  149. | MaxPool3dBackward of Owl_types_common.padding * int array * int array
  150. | AvgPool1dBackward of Owl_types_common.padding * int array * int array
  151. | AvgPool2dBackward of Owl_types_common.padding * int array * int array
  152. | AvgPool3dBackward of Owl_types_common.padding * int array * int array
  153. | UpSampling2dBackward of int array
  154. | RowNum
  155. | ColNum
  156. | Row
  157. | Rows of int array
  158. | CopyRowTo
  159. | CopyColTo
  160. | Dot of bool * bool * elt * elt
  161. | Inv
  162. | Trace
  163. | Transpose of int array
  164. | ToRows
  165. | OfRows
  166. | Scalar_Add
  167. | Scalar_Sub
  168. | Scalar_Mul
  169. | Scalar_Div
  170. | Scalar_Pow
  171. | Scalar_Atan2
  172. | Scalar_Abs
  173. | Scalar_Neg
  174. | Scalar_Sqr
  175. | Scalar_Sqrt
  176. | Scalar_Exp
  177. | Scalar_Log
  178. | Scalar_Log2
  179. | Scalar_Log10
  180. | Scalar_Signum
  181. | Scalar_Floor
  182. | Scalar_Ceil
  183. | Scalar_Round
  184. | Scalar_Sin
  185. | Scalar_Cos
  186. | Scalar_Tan
  187. | Scalar_Sinh
  188. | Scalar_Cosh
  189. | Scalar_Tanh
  190. | Scalar_Asin
  191. | Scalar_Acos
  192. | Scalar_Atan
  193. | Scalar_Asinh
  194. | Scalar_Acosh
  195. | Scalar_Atanh
  196. | Scalar_Relu
  197. | Scalar_Dawsn
  198. | Scalar_Sigmoid
  199. | Fused_Adagrad of float * float
\ No newline at end of file diff --git a/owl-base/Owl_computation_engine/Make_Graph/Optimiser/Operator/Symbol/Shape/index.html b/owl-base/Owl_computation_engine/Make_Graph/Optimiser/Operator/Symbol/Shape/index.html deleted file mode 100644 index d8d4cefab..000000000 --- a/owl-base/Owl_computation_engine/Make_Graph/Optimiser/Operator/Symbol/Shape/index.html +++ /dev/null @@ -1,5 +0,0 @@ - -Shape (owl-base.Owl_computation_engine.Make_Graph.Optimiser.Operator.Symbol.Shape)

Module Symbol.Shape

module Type : sig ... end
val infer_shape : - Type.op -> - Type.attr Owl_graph.node array -> - int array option array
\ No newline at end of file diff --git a/owl-base/Owl_computation_engine/Make_Graph/Optimiser/Operator/Symbol/index.html b/owl-base/Owl_computation_engine/Make_Graph/Optimiser/Operator/Symbol/index.html deleted file mode 100644 index afca835a0..000000000 --- a/owl-base/Owl_computation_engine/Make_Graph/Optimiser/Operator/Symbol/index.html +++ /dev/null @@ -1,28 +0,0 @@ - -Symbol (owl-base.Owl_computation_engine.Make_Graph.Optimiser.Operator.Symbol)

Module Operator.Symbol

module Shape : sig ... end
val op_to_str : Shape.Type.op -> string
val is_random_variable : Shape.Type.op -> bool
val refnum : 'a Owl_graph.node -> int
val node_shape : Shape.Type.attr Owl_graph.node -> int array
val node_numel : Shape.Type.attr Owl_graph.node -> int
val is_shape_unknown : Shape.Type.attr Owl_graph.node -> bool
val infer_shape_graph : Shape.Type.attr Owl_graph.node array -> unit
val shape_to_str : int array option array -> string
val node_to_str : Shape.Type.attr Owl_graph.node -> string
val node_to_arr : Shape.Type.t -> Shape.Type.arr
val arr_to_node : Shape.Type.arr -> Shape.Type.t
val node_to_elt : Shape.Type.t -> Shape.Type.elt
val elt_to_node : Shape.Type.elt -> Shape.Type.t
val make_node : - ?name:string -> - ?value:Shape.Type.Device.value array -> - ?shape:int array option array -> - ?freeze:bool -> - ?reuse:bool -> - ?state:Shape.Type.state -> - Shape.Type.op -> - Shape.Type.attr Owl_graph.node
val make_then_connect : - ?shape:int array option array -> - Shape.Type.op -> - Shape.Type.attr Owl_graph.node array -> - Shape.Type.attr Owl_graph.node
val var_arr : ?shape:int array -> string -> Shape.Type.arr
val var_elt : string -> Shape.Type.elt
val const_arr : string -> Shape.Type.Device.A.arr -> Shape.Type.arr
val const_elt : string -> Shape.Type.Device.A.elt -> Shape.Type.elt
val new_block_id : unit -> int
val make_empty_block : ?block_id:int -> int -> Shape.Type.block
val make_value_block : - Shape.Type.Device.value -> - Shape.Type.attr Owl_graph.node -> - unit
val add_node_to_block : - Shape.Type.attr Owl_graph.node -> - Shape.Type.block -> - unit
val get_active_node : Shape.Type.block -> Shape.Type.attr Owl_graph.node option
val set_active_node : - Shape.Type.block -> - Shape.Type.attr Owl_graph.node -> - unit
val get_block_id : Shape.Type.attr Owl_graph.node -> int
val set_value : - Shape.Type.attr Owl_graph.node -> - Shape.Type.Device.value array -> - unit
val set_operator : Shape.Type.attr Owl_graph.node -> Shape.Type.op -> unit
val set_reuse : Shape.Type.attr Owl_graph.node -> bool -> unit
val get_reuse : Shape.Type.attr Owl_graph.node -> bool
val is_shared : Shape.Type.attr Owl_graph.node -> bool
val get_shared_nodes : - Shape.Type.attr Owl_graph.node -> - Shape.Type.attr Owl_graph.node array
val is_var : Shape.Type.attr Owl_graph.node -> bool
val is_const : Shape.Type.attr Owl_graph.node -> bool
val is_node_arr : Shape.Type.attr Owl_graph.node -> bool
val is_node_elt : Shape.Type.attr Owl_graph.node -> bool
val is_assigned : Shape.Type.attr Owl_graph.node -> bool
val check_assigned : Shape.Type.attr Owl_graph.node -> unit
val is_valid : Shape.Type.attr Owl_graph.node -> bool
val validate : Shape.Type.attr Owl_graph.node -> unit
val invalidate : Shape.Type.attr Owl_graph.node -> unit
val invalidate_graph : Shape.Type.attr Owl_graph.node -> unit
val is_freeze : Shape.Type.attr Owl_graph.node -> bool
val freeze : Shape.Type.attr Owl_graph.node -> unit
val freeze_descendants : Shape.Type.attr Owl_graph.node array -> unit
val freeze_ancestors : Shape.Type.attr Owl_graph.node array -> unit
val unsafe_assign_arr : Shape.Type.arr -> Shape.Type.Device.A.arr -> unit
val assign_arr : Shape.Type.arr -> Shape.Type.Device.A.arr -> unit
val assign_elt : Shape.Type.elt -> Shape.Type.Device.A.elt -> unit
val float_to_elt : float -> Shape.Type.elt
val elt_to_float : Shape.Type.elt -> float
\ No newline at end of file diff --git a/owl-base/Owl_computation_engine/Make_Graph/Optimiser/Operator/index.html b/owl-base/Owl_computation_engine/Make_Graph/Optimiser/Operator/index.html deleted file mode 100644 index da568755a..000000000 --- a/owl-base/Owl_computation_engine/Make_Graph/Optimiser/Operator/index.html +++ /dev/null @@ -1,420 +0,0 @@ - -Operator (owl-base.Owl_computation_engine.Make_Graph.Optimiser.Operator)

Module Optimiser.Operator

module Symbol : sig ... end
val empty : int array -> Symbol.Shape.Type.arr
val zeros : int array -> Symbol.Shape.Type.arr
val ones : int array -> Symbol.Shape.Type.arr
val create : int array -> Symbol.Shape.Type.elt -> Symbol.Shape.Type.arr
val sequential : - ?a:Symbol.Shape.Type.elt -> - ?step:Symbol.Shape.Type.elt -> - int array -> - Symbol.Shape.Type.arr
val uniform : - ?a:Symbol.Shape.Type.elt -> - ?b:Symbol.Shape.Type.elt -> - int array -> - Symbol.Shape.Type.arr
val gaussian : - ?mu:Symbol.Shape.Type.elt -> - ?sigma:Symbol.Shape.Type.elt -> - int array -> - Symbol.Shape.Type.arr
val bernoulli : ?p:Symbol.Shape.Type.elt -> int array -> Symbol.Shape.Type.arr
val init : int array -> (int -> Symbol.Shape.Type.elt) -> Symbol.Shape.Type.arr
val init_nd : - int array -> - (int array -> Symbol.Shape.Type.elt) -> - Symbol.Shape.Type.arr
val shape : Symbol.Shape.Type.arr -> int array
val numel : Symbol.Shape.Type.arr -> int
val set : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.elt -> unit
val get_slice : int list list -> Symbol.Shape.Type.arr -> Symbol.Shape.Type.arr
val set_slice : - int list list -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - unit
val set_fancy : - Owl_types.index list -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - unit
val copy_ : out:'a -> 'b -> 'c
val reset : Symbol.Shape.Type.arr -> unit
val reshape : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.arr
val tile : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.arr
val repeat : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.arr
val pad : - ?v:Symbol.Shape.Type.elt -> - int list list -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val expand : ?hi:bool -> Symbol.Shape.Type.arr -> int -> Symbol.Shape.Type.arr
val squeeze : ?axis:int array -> Symbol.Shape.Type.arr -> Symbol.Shape.Type.arr
val concatenate : - ?axis:int -> - Symbol.Shape.Type.arr array -> - Symbol.Shape.Type.arr
val stack : ?axis:int -> Symbol.Shape.Type.arr array -> Symbol.Shape.Type.arr
val split : ?axis:int -> 'a -> 'b -> 'c
val draw : - ?axis:int -> - Symbol.Shape.Type.arr -> - int -> - Symbol.Shape.Type.arr * 'a array
val lazy_print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(Symbol.Shape.Type.Device.A.elt -> string) -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val print : ?max_row:'a -> ?max_col:'b -> ?header:'c -> ?fmt:'d -> 'e -> unit
val min : - ?axis:int -> - ?keep_dims:bool -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val max : - ?axis:int -> - ?keep_dims:bool -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val sum : - ?axis:int -> - ?keep_dims:bool -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val sum_reduce : - ?axis:int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val log_sum_exp : - ?axis:int -> - ?keep_dims:bool -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val elt_less_equal_scalar : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.elt -> - Symbol.Shape.Type.arr
val elt_greater_equal_scalar : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.elt -> - Symbol.Shape.Type.arr
val conv1d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr
val conv2d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr
val conv3d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr
val transpose_conv1d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr
val transpose_conv2d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr
val transpose_conv3d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr
val dilated_conv1d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr
val dilated_conv2d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr
val dilated_conv3d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr
val max_pool1d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr
val max_pool2d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr
val max_pool3d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr
val avg_pool1d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr
val avg_pool2d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr
val avg_pool3d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr
val upsampling2d : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.arr
val conv1d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val conv1d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val conv2d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val conv2d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val conv3d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val conv3d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val transpose_conv1d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val transpose_conv1d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val transpose_conv2d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val transpose_conv2d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val transpose_conv3d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val transpose_conv3d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val dilated_conv1d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val dilated_conv1d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val dilated_conv2d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val dilated_conv2d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val dilated_conv3d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val dilated_conv3d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val max_pool1d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val max_pool2d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val max_pool3d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val avg_pool1d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val avg_pool2d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val avg_pool3d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val upsampling2d_backward : - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val row_num : Symbol.Shape.Type.arr -> int
val col_num : Symbol.Shape.Type.arr -> int
val rows : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.arr
val copy_row_to : Symbol.Shape.Type.arr -> 'a -> 'b -> unit
val copy_col_to : Symbol.Shape.Type.arr -> 'a -> 'b -> unit
val transpose : - ?axis:int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val to_rows : Symbol.Shape.Type.arr -> 'a array
val to_cols : Symbol.Shape.Type.arr -> 'a array
val of_array : - Symbol.Shape.Type.elt array -> - int array -> - Symbol.Shape.Type.arr
val of_arrays : Symbol.Shape.Type.elt array array -> Symbol.Shape.Type.arr
val to_arrays : Symbol.Shape.Type.arr -> Symbol.Shape.Type.elt array array
module Scalar : sig ... end
module Mat : sig ... end
module Linalg : sig ... end
\ No newline at end of file diff --git a/owl-base/Owl_computation_engine/Make_Graph/Optimiser/index.html b/owl-base/Owl_computation_engine/Make_Graph/Optimiser/index.html deleted file mode 100644 index 56badd689..000000000 --- a/owl-base/Owl_computation_engine/Make_Graph/Optimiser/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Optimiser (owl-base.Owl_computation_engine.Make_Graph.Optimiser)

Module Make_Graph.Optimiser

module Operator : sig ... end
val estimate_complexity : 'a Owl_graph.node array -> int * int
val optimise_nodes : - Operator.Symbol.Shape.Type.attr Owl_graph.node array -> - unit
\ No newline at end of file diff --git a/owl-base/Owl_computation_engine/Make_Graph/argument-1-Device/A/Linalg/index.html b/owl-base/Owl_computation_engine/Make_Graph/argument-1-Device/A/Linalg/index.html deleted file mode 100644 index 67097ebde..000000000 --- a/owl-base/Owl_computation_engine/Make_Graph/argument-1-Device/A/Linalg/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -Linalg (owl-base.Owl_computation_engine.Make_Graph.Device.A.Linalg)

Module A.Linalg

val inv : arr -> arr
val logdet : arr -> elt
val chol : ?upper:bool -> arr -> arr
val svd : ?thin:bool -> arr -> arr * arr * arr
val qr : arr -> arr * arr
val lq : arr -> arr * arr
val sylvester : arr -> arr -> arr -> arr
val lyapunov : arr -> arr -> arr
val discrete_lyapunov : - ?solver:[ `default | `bilinear | `direct ] -> - arr -> - arr -> - arr
val linsolve : ?trans:bool -> ?typ:[ `n | `u | `l ] -> arr -> arr -> arr
val care : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
val dare : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
\ No newline at end of file diff --git a/owl-base/Owl_computation_engine/Make_Graph/argument-1-Device/A/Mat/index.html b/owl-base/Owl_computation_engine/Make_Graph/argument-1-Device/A/Mat/index.html deleted file mode 100644 index 99df3f15b..000000000 --- a/owl-base/Owl_computation_engine/Make_Graph/argument-1-Device/A/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl-base.Owl_computation_engine.Make_Graph.Device.A.Mat)

Module A.Mat

val diagm : ?k:int -> arr -> arr
val triu : ?k:int -> arr -> arr
val tril : ?k:int -> arr -> arr
val eye : int -> arr
\ No newline at end of file diff --git a/owl-base/Owl_computation_engine/Make_Graph/argument-1-Device/A/Scalar/index.html b/owl-base/Owl_computation_engine/Make_Graph/argument-1-Device/A/Scalar/index.html deleted file mode 100644 index f8d9be63e..000000000 --- a/owl-base/Owl_computation_engine/Make_Graph/argument-1-Device/A/Scalar/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Scalar (owl-base.Owl_computation_engine.Make_Graph.Device.A.Scalar)

Module A.Scalar

val add : elt -> elt -> elt
val sub : elt -> elt -> elt
val mul : elt -> elt -> elt
val div : elt -> elt -> elt
val pow : elt -> elt -> elt
val atan2 : elt -> elt -> elt
val abs : elt -> elt
val neg : elt -> elt
val sqr : elt -> elt
val sqrt : elt -> elt
val exp : elt -> elt
val log : elt -> elt
val log2 : elt -> elt
val log10 : elt -> elt
val signum : elt -> elt
val floor : elt -> elt
val ceil : elt -> elt
val round : elt -> elt
val sin : elt -> elt
val cos : elt -> elt
val tan : elt -> elt
val sinh : elt -> elt
val cosh : elt -> elt
val tanh : elt -> elt
val asin : elt -> elt
val acos : elt -> elt
val atan : elt -> elt
val asinh : elt -> elt
val acosh : elt -> elt
val atanh : elt -> elt
val relu : elt -> elt
val dawsn : elt -> elt
val sigmoid : elt -> elt
\ No newline at end of file diff --git a/owl-base/Owl_computation_engine/Make_Graph/argument-1-Device/A/index.html b/owl-base/Owl_computation_engine/Make_Graph/argument-1-Device/A/index.html deleted file mode 100644 index d54c3413a..000000000 --- a/owl-base/Owl_computation_engine/Make_Graph/argument-1-Device/A/index.html +++ /dev/null @@ -1,379 +0,0 @@ - -A (owl-base.Owl_computation_engine.Make_Graph.Device.A)

Module Device.A

include Owl_types_ndarray_algodiff.Sig
include Owl_types_ndarray_eltcmp.Sig
include Owl_types_ndarray_basic.Sig
type arr
type elt
val empty : int array -> arr
val zeros : int array -> arr
val ones : int array -> arr
val create : int array -> elt -> arr
val sequential : ?a:elt -> ?step:elt -> int array -> arr
val uniform : ?a:elt -> ?b:elt -> int array -> arr
val gaussian : ?mu:elt -> ?sigma:elt -> int array -> arr
val bernoulli : ?p:elt -> int array -> arr
val init : int array -> (int -> elt) -> arr
val init_nd : int array -> (int array -> elt) -> arr
val shape : arr -> int array
val numel : arr -> int
val get : arr -> int array -> elt
val set : arr -> int array -> elt -> unit
val get_slice : int list list -> arr -> arr
val set_slice : int list list -> arr -> arr -> unit
val get_fancy : Owl_types_common.index list -> arr -> arr
val set_fancy : Owl_types_common.index list -> arr -> arr -> unit
val copy : arr -> arr
val copy_ : out:arr -> arr -> unit
val reset : arr -> unit
val reshape : arr -> int array -> arr
val reverse : arr -> arr
val tile : arr -> int array -> arr
val repeat : arr -> int array -> arr
val concatenate : ?axis:int -> arr array -> arr
val stack : ?axis:int -> arr array -> arr
val split : ?axis:int -> int array -> arr -> arr array
val expand : ?hi:bool -> arr -> int -> arr
val squeeze : ?axis:int array -> arr -> arr
val draw : ?axis:int -> arr -> int -> arr * int array
val map : (elt -> elt) -> arr -> arr
val fold : ?axis:int -> (elt -> elt -> elt) -> elt -> arr -> arr
val scan : ?axis:int -> (elt -> elt -> elt) -> arr -> arr
val one_hot : int -> arr -> arr
val pad : ?v:elt -> int list list -> arr -> arr
val print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(elt -> string) -> - arr -> - unit
val abs : arr -> arr
val neg : arr -> arr
val floor : arr -> arr
val ceil : arr -> arr
val round : arr -> arr
val sqr : arr -> arr
val sqrt : arr -> arr
val log : arr -> arr
val log2 : arr -> arr
val log10 : arr -> arr
val exp : arr -> arr
val sin : arr -> arr
val cos : arr -> arr
val tan : arr -> arr
val sinh : arr -> arr
val cosh : arr -> arr
val tanh : arr -> arr
val asin : arr -> arr
val acos : arr -> arr
val atan : arr -> arr
val asinh : arr -> arr
val acosh : arr -> arr
val atanh : arr -> arr
val sum_reduce : ?axis:int array -> arr -> arr
val signum : arr -> arr
val sigmoid : arr -> arr
val relu : arr -> arr
val dawsn : arr -> arr
val min' : arr -> elt
val max' : arr -> elt
val sum' : arr -> elt
val log_sum_exp' : arr -> elt
val log_sum_exp : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm' : arr -> elt
val l2norm' : arr -> elt
val l2norm_sqr' : arr -> elt
val clip_by_value : ?amin:elt -> ?amax:elt -> arr -> arr
val clip_by_l2norm : elt -> arr -> arr
val pow : arr -> arr -> arr
val scalar_pow : elt -> arr -> arr
val pow_scalar : arr -> elt -> arr
val atan2 : arr -> arr -> arr
val scalar_atan2 : elt -> arr -> arr
val atan2_scalar : arr -> elt -> arr
val add : arr -> arr -> arr
val sub : arr -> arr -> arr
val mul : arr -> arr -> arr
val div : arr -> arr -> arr
val add_scalar : arr -> elt -> arr
val sub_scalar : arr -> elt -> arr
val mul_scalar : arr -> elt -> arr
val div_scalar : arr -> elt -> arr
val scalar_add : elt -> arr -> arr
val scalar_sub : elt -> arr -> arr
val scalar_mul : elt -> arr -> arr
val scalar_div : elt -> arr -> arr
val fma : arr -> arr -> arr -> arr
val conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val dilated_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val transpose_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val max_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val upsampling2d : arr -> int array -> arr
val conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val dilated_conv1d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv1d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val transpose_conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val max_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val upsampling2d_backward : arr -> int array -> arr -> arr
val row_num : arr -> int
val col_num : arr -> int
val row : arr -> int -> arr
val rows : arr -> int array -> arr
val copy_row_to : arr -> arr -> int -> unit
val copy_col_to : arr -> arr -> int -> unit
val dot : arr -> arr -> arr
val diag : ?k:int -> arr -> arr
val trace : arr -> elt
val transpose : ?axis:int array -> arr -> arr
val to_rows : arr -> arr array
val of_rows : arr array -> arr
val to_cols : arr -> arr array
val of_cols : arr array -> arr
val of_array : elt array -> int array -> arr
val of_arrays : elt array array -> arr
val float_to_elt : float -> elt
val elt_to_float : elt -> float
val elt_equal : arr -> arr -> arr
val elt_not_equal : arr -> arr -> arr
val elt_less : arr -> arr -> arr
val elt_greater : arr -> arr -> arr
val elt_less_equal : arr -> arr -> arr
val elt_greater_equal : arr -> arr -> arr
val elt_equal_scalar : arr -> elt -> arr
val elt_not_equal_scalar : arr -> elt -> arr
val elt_less_scalar : arr -> elt -> arr
val elt_greater_scalar : arr -> elt -> arr
val elt_less_equal_scalar : arr -> elt -> arr
val elt_greater_equal_scalar : arr -> elt -> arr
module Scalar : sig ... end
module Mat : sig ... end
module Linalg : sig ... end
val create_ : out:arr -> elt -> unit
val uniform_ : ?a:elt -> ?b:elt -> out:arr -> unit
val gaussian_ : ?mu:elt -> ?sigma:elt -> out:arr -> unit
val sequential_ : ?a:elt -> ?step:elt -> out:arr -> unit
val bernoulli_ : ?p:elt -> out:arr -> unit
val zeros_ : out:arr -> unit
val ones_ : out:arr -> unit
val one_hot_ : out:arr -> int -> arr -> unit
val get_slice_ : out:arr -> int list list -> arr -> unit
val set_slice_ : out:arr -> int list list -> arr -> arr -> unit
val sub_left : arr -> int -> int -> arr
val reshape_ : out:arr -> arr -> unit
val reverse_ : out:arr -> arr -> unit
val transpose_ : out:arr -> ?axis:int array -> arr -> unit
val repeat_ : out:arr -> arr -> int array -> unit
val tile_ : out:arr -> arr -> int array -> unit
val pad_ : out:arr -> ?v:elt -> int list list -> arr -> unit
val hypot : arr -> arr -> arr
val fmod : arr -> arr -> arr
val min2 : arr -> arr -> arr
val max2 : arr -> arr -> arr
val add_ : ?out:arr -> arr -> arr -> unit
val sub_ : ?out:arr -> arr -> arr -> unit
val mul_ : ?out:arr -> arr -> arr -> unit
val div_ : ?out:arr -> arr -> arr -> unit
val pow_ : ?out:arr -> arr -> arr -> unit
val atan2_ : ?out:arr -> arr -> arr -> unit
val hypot_ : ?out:arr -> arr -> arr -> unit
val fmod_ : ?out:arr -> arr -> arr -> unit
val min2_ : ?out:arr -> arr -> arr -> unit
val max2_ : ?out:arr -> arr -> arr -> unit
val add_scalar_ : ?out:arr -> arr -> elt -> unit
val sub_scalar_ : ?out:arr -> arr -> elt -> unit
val mul_scalar_ : ?out:arr -> arr -> elt -> unit
val div_scalar_ : ?out:arr -> arr -> elt -> unit
val pow_scalar_ : ?out:arr -> arr -> elt -> unit
val atan2_scalar_ : ?out:arr -> arr -> elt -> unit
val fmod_scalar_ : ?out:arr -> arr -> elt -> unit
val scalar_add_ : ?out:arr -> elt -> arr -> unit
val scalar_sub_ : ?out:arr -> elt -> arr -> unit
val scalar_mul_ : ?out:arr -> elt -> arr -> unit
val scalar_div_ : ?out:arr -> elt -> arr -> unit
val scalar_pow_ : ?out:arr -> elt -> arr -> unit
val scalar_atan2_ : ?out:arr -> elt -> arr -> unit
val scalar_fmod_ : ?out:arr -> elt -> arr -> unit
val fma_ : ?out:arr -> arr -> arr -> arr -> unit
val clip_by_value_ : ?out:arr -> ?amin:elt -> ?amax:elt -> arr -> unit
val clip_by_l2norm_ : ?out:arr -> elt -> arr -> unit
val dot_ : - ?transa:bool -> - ?transb:bool -> - ?alpha:elt -> - ?beta:elt -> - c:arr -> - arr -> - arr -> - unit
val abs_ : ?out:arr -> arr -> unit
val neg_ : ?out:arr -> arr -> unit
val conj_ : ?out:arr -> arr -> unit
val reci_ : ?out:arr -> arr -> unit
val signum_ : ?out:arr -> arr -> unit
val sqr_ : ?out:arr -> arr -> unit
val sqrt_ : ?out:arr -> arr -> unit
val cbrt_ : ?out:arr -> arr -> unit
val exp_ : ?out:arr -> arr -> unit
val exp2_ : ?out:arr -> arr -> unit
val exp10_ : ?out:arr -> arr -> unit
val expm1_ : ?out:arr -> arr -> unit
val log_ : ?out:arr -> arr -> unit
val log2_ : ?out:arr -> arr -> unit
val log10_ : ?out:arr -> arr -> unit
val log1p_ : ?out:arr -> arr -> unit
val sin_ : ?out:arr -> arr -> unit
val cos_ : ?out:arr -> arr -> unit
val tan_ : ?out:arr -> arr -> unit
val asin_ : ?out:arr -> arr -> unit
val acos_ : ?out:arr -> arr -> unit
val atan_ : ?out:arr -> arr -> unit
val sinh_ : ?out:arr -> arr -> unit
val cosh_ : ?out:arr -> arr -> unit
val tanh_ : ?out:arr -> arr -> unit
val asinh_ : ?out:arr -> arr -> unit
val acosh_ : ?out:arr -> arr -> unit
val atanh_ : ?out:arr -> arr -> unit
val floor_ : ?out:arr -> arr -> unit
val ceil_ : ?out:arr -> arr -> unit
val round_ : ?out:arr -> arr -> unit
val trunc_ : ?out:arr -> arr -> unit
val fix_ : ?out:arr -> arr -> unit
val erf_ : ?out:arr -> arr -> unit
val erfc_ : ?out:arr -> arr -> unit
val relu_ : ?out:arr -> arr -> unit
val softplus_ : ?out:arr -> arr -> unit
val softsign_ : ?out:arr -> arr -> unit
val softmax_ : ?out:arr -> ?axis:int -> arr -> unit
val sigmoid_ : ?out:arr -> arr -> unit
val sum_ : out:arr -> axis:int -> arr -> unit
val min_ : out:arr -> axis:int -> arr -> unit
val max_ : out:arr -> axis:int -> arr -> unit
val sum : ?axis:int -> ?keep_dims:bool -> arr -> arr
val prod : ?axis:int -> ?keep_dims:bool -> arr -> arr
val min : ?axis:int -> ?keep_dims:bool -> arr -> arr
val max : ?axis:int -> ?keep_dims:bool -> arr -> arr
val mean : ?axis:int -> ?keep_dims:bool -> arr -> arr
val var : ?axis:int -> ?keep_dims:bool -> arr -> arr
val std : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l2norm : ?axis:int -> ?keep_dims:bool -> arr -> arr
val cumsum_ : ?out:arr -> ?axis:int -> arr -> unit
val cumprod_ : ?out:arr -> ?axis:int -> arr -> unit
val cummin_ : ?out:arr -> ?axis:int -> arr -> unit
val cummax_ : ?out:arr -> ?axis:int -> arr -> unit
val dropout_ : ?out:arr -> ?rate:float -> arr -> unit
val prod' : arr -> elt
val mean' : arr -> elt
val var' : arr -> elt
val std' : arr -> elt
val elt_equal_ : ?out:arr -> arr -> arr -> unit
val elt_not_equal_ : ?out:arr -> arr -> arr -> unit
val elt_less_ : ?out:arr -> arr -> arr -> unit
val elt_greater_ : ?out:arr -> arr -> arr -> unit
val elt_less_equal_ : ?out:arr -> arr -> arr -> unit
val elt_greater_equal_ : ?out:arr -> arr -> arr -> unit
val elt_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_not_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_less_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_greater_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_less_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_greater_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val conv1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val conv2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val conv3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val dilated_conv1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val dilated_conv2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val dilated_conv3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val transpose_conv1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val transpose_conv2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val transpose_conv3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val max_pool1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val max_pool2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val max_pool3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val upsampling2d_ : out:arr -> arr -> int array -> unit
val conv1d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv1d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv2d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv2d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv3d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv3d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val dilated_conv1d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv1d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv2d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv2d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv3d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv3d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val transpose_conv1d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv1d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv2d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv2d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv3d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv3d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val max_pool1d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val max_pool2d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val max_pool3d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool1d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool2d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool3d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val upsampling2d_backward_ : out:arr -> arr -> int array -> arr -> unit
val fused_adagrad_ : ?out:arr -> rate:float -> eps:float -> arr -> unit
\ No newline at end of file diff --git a/owl-base/Owl_computation_engine/Make_Graph/argument-1-Device/index.html b/owl-base/Owl_computation_engine/Make_Graph/argument-1-Device/index.html deleted file mode 100644 index 79c45c852..000000000 --- a/owl-base/Owl_computation_engine/Make_Graph/argument-1-Device/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Device (owl-base.Owl_computation_engine.Make_Graph.Device)

Parameter Make_Graph.Device

Type definition
type device

TODO

type value

TODO

Core functions
val make_device : unit -> device

TODO

val arr_to_value : A.arr -> value

TODO

val value_to_arr : value -> A.arr

TODO

val elt_to_value : A.elt -> value

TODO

val value_to_elt : value -> A.elt

TODO

val value_to_float : value -> float

TODO

val is_arr : value -> bool

TODO

val is_elt : value -> bool

TODO

\ No newline at end of file diff --git a/owl-base/Owl_computation_engine/Make_Graph/index.html b/owl-base/Owl_computation_engine/Make_Graph/index.html deleted file mode 100644 index 59d07edc4..000000000 --- a/owl-base/Owl_computation_engine/Make_Graph/index.html +++ /dev/null @@ -1,39 +0,0 @@ - -Make_Graph (owl-base.Owl_computation_engine.Make_Graph)

Module Owl_computation_engine.Make_Graph

Parameters

Signature

include sig ... end
module Optimiser : sig ... end
val shape_or_value : Optimiser.Operator.Symbol.Shape.Type.t -> string
val _block_colour : int -> string
val graph_to_dot : graph -> string
val graph_to_trace : graph -> string
val save_graph : 'a -> string -> unit
val load_graph : string -> 'a * 'b
val invalidate_rvs : graph -> unit
val is_iopair_safe : 'a Owl_graph.node -> 'a Owl_graph.node -> bool
val update_iopair : graph -> unit
val remove_unused_iopair : - 'a Owl_graph.node array -> - 'b array -> - 'a Owl_graph.node array * 'b array
val optimise : graph -> unit
\ No newline at end of file diff --git a/owl-base/Owl_computation_engine_sig/.dummy b/owl-base/Owl_computation_engine_sig/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/A/Linalg/index.html b/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/A/Linalg/index.html deleted file mode 100644 index 586c9f978..000000000 --- a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/A/Linalg/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -Linalg (owl-base.Owl_computation_engine_sig.Flatten_Sig.A.Linalg)

Module A.Linalg

val inv : arr -> arr
val logdet : arr -> elt
val chol : ?upper:bool -> arr -> arr
val svd : ?thin:bool -> arr -> arr * arr * arr
val qr : arr -> arr * arr
val lq : arr -> arr * arr
val sylvester : arr -> arr -> arr -> arr
val lyapunov : arr -> arr -> arr
val discrete_lyapunov : - ?solver:[ `default | `bilinear | `direct ] -> - arr -> - arr -> - arr
val linsolve : ?trans:bool -> ?typ:[ `n | `u | `l ] -> arr -> arr -> arr
val care : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
val dare : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
\ No newline at end of file diff --git a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/A/Mat/index.html b/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/A/Mat/index.html deleted file mode 100644 index 4ead59605..000000000 --- a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/A/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl-base.Owl_computation_engine_sig.Flatten_Sig.A.Mat)

Module A.Mat

val diagm : ?k:int -> arr -> arr
val triu : ?k:int -> arr -> arr
val tril : ?k:int -> arr -> arr
val eye : int -> arr
\ No newline at end of file diff --git a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/A/Scalar/index.html b/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/A/Scalar/index.html deleted file mode 100644 index 24d9f2ddf..000000000 --- a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/A/Scalar/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Scalar (owl-base.Owl_computation_engine_sig.Flatten_Sig.A.Scalar)

Module A.Scalar

val add : elt -> elt -> elt
val sub : elt -> elt -> elt
val mul : elt -> elt -> elt
val div : elt -> elt -> elt
val pow : elt -> elt -> elt
val atan2 : elt -> elt -> elt
val abs : elt -> elt
val neg : elt -> elt
val sqr : elt -> elt
val sqrt : elt -> elt
val exp : elt -> elt
val log : elt -> elt
val log2 : elt -> elt
val log10 : elt -> elt
val signum : elt -> elt
val floor : elt -> elt
val ceil : elt -> elt
val round : elt -> elt
val sin : elt -> elt
val cos : elt -> elt
val tan : elt -> elt
val sinh : elt -> elt
val cosh : elt -> elt
val tanh : elt -> elt
val asin : elt -> elt
val acos : elt -> elt
val atan : elt -> elt
val asinh : elt -> elt
val acosh : elt -> elt
val atanh : elt -> elt
val relu : elt -> elt
val dawsn : elt -> elt
val sigmoid : elt -> elt
\ No newline at end of file diff --git a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/A/index.html b/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/A/index.html deleted file mode 100644 index 730ceab60..000000000 --- a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/A/index.html +++ /dev/null @@ -1,379 +0,0 @@ - -A (owl-base.Owl_computation_engine_sig.Flatten_Sig.A)

Module Flatten_Sig.A

include Owl_types_ndarray_algodiff.Sig
include Owl_types_ndarray_eltcmp.Sig
include Owl_types_ndarray_basic.Sig
type arr
type elt
val empty : int array -> arr
val zeros : int array -> arr
val ones : int array -> arr
val create : int array -> elt -> arr
val sequential : ?a:elt -> ?step:elt -> int array -> arr
val uniform : ?a:elt -> ?b:elt -> int array -> arr
val gaussian : ?mu:elt -> ?sigma:elt -> int array -> arr
val bernoulli : ?p:elt -> int array -> arr
val init : int array -> (int -> elt) -> arr
val init_nd : int array -> (int array -> elt) -> arr
val shape : arr -> int array
val numel : arr -> int
val get : arr -> int array -> elt
val set : arr -> int array -> elt -> unit
val get_slice : int list list -> arr -> arr
val set_slice : int list list -> arr -> arr -> unit
val get_fancy : Owl_types_common.index list -> arr -> arr
val set_fancy : Owl_types_common.index list -> arr -> arr -> unit
val copy : arr -> arr
val copy_ : out:arr -> arr -> unit
val reset : arr -> unit
val reshape : arr -> int array -> arr
val reverse : arr -> arr
val tile : arr -> int array -> arr
val repeat : arr -> int array -> arr
val concatenate : ?axis:int -> arr array -> arr
val stack : ?axis:int -> arr array -> arr
val split : ?axis:int -> int array -> arr -> arr array
val expand : ?hi:bool -> arr -> int -> arr
val squeeze : ?axis:int array -> arr -> arr
val draw : ?axis:int -> arr -> int -> arr * int array
val map : (elt -> elt) -> arr -> arr
val fold : ?axis:int -> (elt -> elt -> elt) -> elt -> arr -> arr
val scan : ?axis:int -> (elt -> elt -> elt) -> arr -> arr
val one_hot : int -> arr -> arr
val pad : ?v:elt -> int list list -> arr -> arr
val print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(elt -> string) -> - arr -> - unit
val abs : arr -> arr
val neg : arr -> arr
val floor : arr -> arr
val ceil : arr -> arr
val round : arr -> arr
val sqr : arr -> arr
val sqrt : arr -> arr
val log : arr -> arr
val log2 : arr -> arr
val log10 : arr -> arr
val exp : arr -> arr
val sin : arr -> arr
val cos : arr -> arr
val tan : arr -> arr
val sinh : arr -> arr
val cosh : arr -> arr
val tanh : arr -> arr
val asin : arr -> arr
val acos : arr -> arr
val atan : arr -> arr
val asinh : arr -> arr
val acosh : arr -> arr
val atanh : arr -> arr
val sum_reduce : ?axis:int array -> arr -> arr
val signum : arr -> arr
val sigmoid : arr -> arr
val relu : arr -> arr
val dawsn : arr -> arr
val min' : arr -> elt
val max' : arr -> elt
val sum' : arr -> elt
val log_sum_exp' : arr -> elt
val log_sum_exp : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm' : arr -> elt
val l2norm' : arr -> elt
val l2norm_sqr' : arr -> elt
val clip_by_value : ?amin:elt -> ?amax:elt -> arr -> arr
val clip_by_l2norm : elt -> arr -> arr
val pow : arr -> arr -> arr
val scalar_pow : elt -> arr -> arr
val pow_scalar : arr -> elt -> arr
val atan2 : arr -> arr -> arr
val scalar_atan2 : elt -> arr -> arr
val atan2_scalar : arr -> elt -> arr
val add : arr -> arr -> arr
val sub : arr -> arr -> arr
val mul : arr -> arr -> arr
val div : arr -> arr -> arr
val add_scalar : arr -> elt -> arr
val sub_scalar : arr -> elt -> arr
val mul_scalar : arr -> elt -> arr
val div_scalar : arr -> elt -> arr
val scalar_add : elt -> arr -> arr
val scalar_sub : elt -> arr -> arr
val scalar_mul : elt -> arr -> arr
val scalar_div : elt -> arr -> arr
val fma : arr -> arr -> arr -> arr
val conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val dilated_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val transpose_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val max_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val upsampling2d : arr -> int array -> arr
val conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val dilated_conv1d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv1d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val transpose_conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val max_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val upsampling2d_backward : arr -> int array -> arr -> arr
val row_num : arr -> int
val col_num : arr -> int
val row : arr -> int -> arr
val rows : arr -> int array -> arr
val copy_row_to : arr -> arr -> int -> unit
val copy_col_to : arr -> arr -> int -> unit
val dot : arr -> arr -> arr
val diag : ?k:int -> arr -> arr
val trace : arr -> elt
val transpose : ?axis:int array -> arr -> arr
val to_rows : arr -> arr array
val of_rows : arr array -> arr
val to_cols : arr -> arr array
val of_cols : arr array -> arr
val of_array : elt array -> int array -> arr
val of_arrays : elt array array -> arr
val float_to_elt : float -> elt
val elt_to_float : elt -> float
val elt_equal : arr -> arr -> arr
val elt_not_equal : arr -> arr -> arr
val elt_less : arr -> arr -> arr
val elt_greater : arr -> arr -> arr
val elt_less_equal : arr -> arr -> arr
val elt_greater_equal : arr -> arr -> arr
val elt_equal_scalar : arr -> elt -> arr
val elt_not_equal_scalar : arr -> elt -> arr
val elt_less_scalar : arr -> elt -> arr
val elt_greater_scalar : arr -> elt -> arr
val elt_less_equal_scalar : arr -> elt -> arr
val elt_greater_equal_scalar : arr -> elt -> arr
module Scalar : sig ... end
module Mat : sig ... end
module Linalg : sig ... end
val create_ : out:arr -> elt -> unit
val uniform_ : ?a:elt -> ?b:elt -> out:arr -> unit
val gaussian_ : ?mu:elt -> ?sigma:elt -> out:arr -> unit
val sequential_ : ?a:elt -> ?step:elt -> out:arr -> unit
val bernoulli_ : ?p:elt -> out:arr -> unit
val zeros_ : out:arr -> unit
val ones_ : out:arr -> unit
val one_hot_ : out:arr -> int -> arr -> unit
val get_slice_ : out:arr -> int list list -> arr -> unit
val set_slice_ : out:arr -> int list list -> arr -> arr -> unit
val sub_left : arr -> int -> int -> arr
val reshape_ : out:arr -> arr -> unit
val reverse_ : out:arr -> arr -> unit
val transpose_ : out:arr -> ?axis:int array -> arr -> unit
val repeat_ : out:arr -> arr -> int array -> unit
val tile_ : out:arr -> arr -> int array -> unit
val pad_ : out:arr -> ?v:elt -> int list list -> arr -> unit
val hypot : arr -> arr -> arr
val fmod : arr -> arr -> arr
val min2 : arr -> arr -> arr
val max2 : arr -> arr -> arr
val add_ : ?out:arr -> arr -> arr -> unit
val sub_ : ?out:arr -> arr -> arr -> unit
val mul_ : ?out:arr -> arr -> arr -> unit
val div_ : ?out:arr -> arr -> arr -> unit
val pow_ : ?out:arr -> arr -> arr -> unit
val atan2_ : ?out:arr -> arr -> arr -> unit
val hypot_ : ?out:arr -> arr -> arr -> unit
val fmod_ : ?out:arr -> arr -> arr -> unit
val min2_ : ?out:arr -> arr -> arr -> unit
val max2_ : ?out:arr -> arr -> arr -> unit
val add_scalar_ : ?out:arr -> arr -> elt -> unit
val sub_scalar_ : ?out:arr -> arr -> elt -> unit
val mul_scalar_ : ?out:arr -> arr -> elt -> unit
val div_scalar_ : ?out:arr -> arr -> elt -> unit
val pow_scalar_ : ?out:arr -> arr -> elt -> unit
val atan2_scalar_ : ?out:arr -> arr -> elt -> unit
val fmod_scalar_ : ?out:arr -> arr -> elt -> unit
val scalar_add_ : ?out:arr -> elt -> arr -> unit
val scalar_sub_ : ?out:arr -> elt -> arr -> unit
val scalar_mul_ : ?out:arr -> elt -> arr -> unit
val scalar_div_ : ?out:arr -> elt -> arr -> unit
val scalar_pow_ : ?out:arr -> elt -> arr -> unit
val scalar_atan2_ : ?out:arr -> elt -> arr -> unit
val scalar_fmod_ : ?out:arr -> elt -> arr -> unit
val fma_ : ?out:arr -> arr -> arr -> arr -> unit
val clip_by_value_ : ?out:arr -> ?amin:elt -> ?amax:elt -> arr -> unit
val clip_by_l2norm_ : ?out:arr -> elt -> arr -> unit
val dot_ : - ?transa:bool -> - ?transb:bool -> - ?alpha:elt -> - ?beta:elt -> - c:arr -> - arr -> - arr -> - unit
val abs_ : ?out:arr -> arr -> unit
val neg_ : ?out:arr -> arr -> unit
val conj_ : ?out:arr -> arr -> unit
val reci_ : ?out:arr -> arr -> unit
val signum_ : ?out:arr -> arr -> unit
val sqr_ : ?out:arr -> arr -> unit
val sqrt_ : ?out:arr -> arr -> unit
val cbrt_ : ?out:arr -> arr -> unit
val exp_ : ?out:arr -> arr -> unit
val exp2_ : ?out:arr -> arr -> unit
val exp10_ : ?out:arr -> arr -> unit
val expm1_ : ?out:arr -> arr -> unit
val log_ : ?out:arr -> arr -> unit
val log2_ : ?out:arr -> arr -> unit
val log10_ : ?out:arr -> arr -> unit
val log1p_ : ?out:arr -> arr -> unit
val sin_ : ?out:arr -> arr -> unit
val cos_ : ?out:arr -> arr -> unit
val tan_ : ?out:arr -> arr -> unit
val asin_ : ?out:arr -> arr -> unit
val acos_ : ?out:arr -> arr -> unit
val atan_ : ?out:arr -> arr -> unit
val sinh_ : ?out:arr -> arr -> unit
val cosh_ : ?out:arr -> arr -> unit
val tanh_ : ?out:arr -> arr -> unit
val asinh_ : ?out:arr -> arr -> unit
val acosh_ : ?out:arr -> arr -> unit
val atanh_ : ?out:arr -> arr -> unit
val floor_ : ?out:arr -> arr -> unit
val ceil_ : ?out:arr -> arr -> unit
val round_ : ?out:arr -> arr -> unit
val trunc_ : ?out:arr -> arr -> unit
val fix_ : ?out:arr -> arr -> unit
val erf_ : ?out:arr -> arr -> unit
val erfc_ : ?out:arr -> arr -> unit
val relu_ : ?out:arr -> arr -> unit
val softplus_ : ?out:arr -> arr -> unit
val softsign_ : ?out:arr -> arr -> unit
val softmax_ : ?out:arr -> ?axis:int -> arr -> unit
val sigmoid_ : ?out:arr -> arr -> unit
val sum_ : out:arr -> axis:int -> arr -> unit
val min_ : out:arr -> axis:int -> arr -> unit
val max_ : out:arr -> axis:int -> arr -> unit
val sum : ?axis:int -> ?keep_dims:bool -> arr -> arr
val prod : ?axis:int -> ?keep_dims:bool -> arr -> arr
val min : ?axis:int -> ?keep_dims:bool -> arr -> arr
val max : ?axis:int -> ?keep_dims:bool -> arr -> arr
val mean : ?axis:int -> ?keep_dims:bool -> arr -> arr
val var : ?axis:int -> ?keep_dims:bool -> arr -> arr
val std : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l2norm : ?axis:int -> ?keep_dims:bool -> arr -> arr
val cumsum_ : ?out:arr -> ?axis:int -> arr -> unit
val cumprod_ : ?out:arr -> ?axis:int -> arr -> unit
val cummin_ : ?out:arr -> ?axis:int -> arr -> unit
val cummax_ : ?out:arr -> ?axis:int -> arr -> unit
val dropout_ : ?out:arr -> ?rate:float -> arr -> unit
val prod' : arr -> elt
val mean' : arr -> elt
val var' : arr -> elt
val std' : arr -> elt
val elt_equal_ : ?out:arr -> arr -> arr -> unit
val elt_not_equal_ : ?out:arr -> arr -> arr -> unit
val elt_less_ : ?out:arr -> arr -> arr -> unit
val elt_greater_ : ?out:arr -> arr -> arr -> unit
val elt_less_equal_ : ?out:arr -> arr -> arr -> unit
val elt_greater_equal_ : ?out:arr -> arr -> arr -> unit
val elt_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_not_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_less_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_greater_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_less_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_greater_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val conv1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val conv2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val conv3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val dilated_conv1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val dilated_conv2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val dilated_conv3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val transpose_conv1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val transpose_conv2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val transpose_conv3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val max_pool1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val max_pool2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val max_pool3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val upsampling2d_ : out:arr -> arr -> int array -> unit
val conv1d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv1d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv2d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv2d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv3d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv3d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val dilated_conv1d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv1d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv2d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv2d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv3d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv3d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val transpose_conv1d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv1d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv2d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv2d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv3d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv3d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val max_pool1d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val max_pool2d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val max_pool3d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool1d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool2d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool3d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val upsampling2d_backward_ : out:arr -> arr -> int array -> arr -> unit
val fused_adagrad_ : ?out:arr -> rate:float -> eps:float -> arr -> unit
\ No newline at end of file diff --git a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Device/A/Linalg/index.html b/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Device/A/Linalg/index.html deleted file mode 100644 index 4e6664ac4..000000000 --- a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Device/A/Linalg/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -Linalg (owl-base.Owl_computation_engine_sig.Flatten_Sig.Device.A.Linalg)

Module A.Linalg

val inv : arr -> arr
val logdet : arr -> elt
val chol : ?upper:bool -> arr -> arr
val svd : ?thin:bool -> arr -> arr * arr * arr
val qr : arr -> arr * arr
val lq : arr -> arr * arr
val sylvester : arr -> arr -> arr -> arr
val lyapunov : arr -> arr -> arr
val discrete_lyapunov : - ?solver:[ `default | `bilinear | `direct ] -> - arr -> - arr -> - arr
val linsolve : ?trans:bool -> ?typ:[ `n | `u | `l ] -> arr -> arr -> arr
val care : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
val dare : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
\ No newline at end of file diff --git a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Device/A/Mat/index.html b/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Device/A/Mat/index.html deleted file mode 100644 index cb808939b..000000000 --- a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Device/A/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl-base.Owl_computation_engine_sig.Flatten_Sig.Device.A.Mat)

Module A.Mat

val diagm : ?k:int -> arr -> arr
val triu : ?k:int -> arr -> arr
val tril : ?k:int -> arr -> arr
val eye : int -> arr
\ No newline at end of file diff --git a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Device/A/Scalar/index.html b/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Device/A/Scalar/index.html deleted file mode 100644 index fa181f911..000000000 --- a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Device/A/Scalar/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Scalar (owl-base.Owl_computation_engine_sig.Flatten_Sig.Device.A.Scalar)

Module A.Scalar

val add : elt -> elt -> elt
val sub : elt -> elt -> elt
val mul : elt -> elt -> elt
val div : elt -> elt -> elt
val pow : elt -> elt -> elt
val atan2 : elt -> elt -> elt
val abs : elt -> elt
val neg : elt -> elt
val sqr : elt -> elt
val sqrt : elt -> elt
val exp : elt -> elt
val log : elt -> elt
val log2 : elt -> elt
val log10 : elt -> elt
val signum : elt -> elt
val floor : elt -> elt
val ceil : elt -> elt
val round : elt -> elt
val sin : elt -> elt
val cos : elt -> elt
val tan : elt -> elt
val sinh : elt -> elt
val cosh : elt -> elt
val tanh : elt -> elt
val asin : elt -> elt
val acos : elt -> elt
val atan : elt -> elt
val asinh : elt -> elt
val acosh : elt -> elt
val atanh : elt -> elt
val relu : elt -> elt
val dawsn : elt -> elt
val sigmoid : elt -> elt
\ No newline at end of file diff --git a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Device/A/index.html b/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Device/A/index.html deleted file mode 100644 index f02354a8d..000000000 --- a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Device/A/index.html +++ /dev/null @@ -1,379 +0,0 @@ - -A (owl-base.Owl_computation_engine_sig.Flatten_Sig.Device.A)

Module Device.A

include Owl_types_ndarray_algodiff.Sig
include Owl_types_ndarray_eltcmp.Sig
include Owl_types_ndarray_basic.Sig
type arr
type elt
val empty : int array -> arr
val zeros : int array -> arr
val ones : int array -> arr
val create : int array -> elt -> arr
val sequential : ?a:elt -> ?step:elt -> int array -> arr
val uniform : ?a:elt -> ?b:elt -> int array -> arr
val gaussian : ?mu:elt -> ?sigma:elt -> int array -> arr
val bernoulli : ?p:elt -> int array -> arr
val init : int array -> (int -> elt) -> arr
val init_nd : int array -> (int array -> elt) -> arr
val shape : arr -> int array
val numel : arr -> int
val get : arr -> int array -> elt
val set : arr -> int array -> elt -> unit
val get_slice : int list list -> arr -> arr
val set_slice : int list list -> arr -> arr -> unit
val get_fancy : Owl_types_common.index list -> arr -> arr
val set_fancy : Owl_types_common.index list -> arr -> arr -> unit
val copy : arr -> arr
val copy_ : out:arr -> arr -> unit
val reset : arr -> unit
val reshape : arr -> int array -> arr
val reverse : arr -> arr
val tile : arr -> int array -> arr
val repeat : arr -> int array -> arr
val concatenate : ?axis:int -> arr array -> arr
val stack : ?axis:int -> arr array -> arr
val split : ?axis:int -> int array -> arr -> arr array
val expand : ?hi:bool -> arr -> int -> arr
val squeeze : ?axis:int array -> arr -> arr
val draw : ?axis:int -> arr -> int -> arr * int array
val map : (elt -> elt) -> arr -> arr
val fold : ?axis:int -> (elt -> elt -> elt) -> elt -> arr -> arr
val scan : ?axis:int -> (elt -> elt -> elt) -> arr -> arr
val one_hot : int -> arr -> arr
val pad : ?v:elt -> int list list -> arr -> arr
val print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(elt -> string) -> - arr -> - unit
val abs : arr -> arr
val neg : arr -> arr
val floor : arr -> arr
val ceil : arr -> arr
val round : arr -> arr
val sqr : arr -> arr
val sqrt : arr -> arr
val log : arr -> arr
val log2 : arr -> arr
val log10 : arr -> arr
val exp : arr -> arr
val sin : arr -> arr
val cos : arr -> arr
val tan : arr -> arr
val sinh : arr -> arr
val cosh : arr -> arr
val tanh : arr -> arr
val asin : arr -> arr
val acos : arr -> arr
val atan : arr -> arr
val asinh : arr -> arr
val acosh : arr -> arr
val atanh : arr -> arr
val sum_reduce : ?axis:int array -> arr -> arr
val signum : arr -> arr
val sigmoid : arr -> arr
val relu : arr -> arr
val dawsn : arr -> arr
val min' : arr -> elt
val max' : arr -> elt
val sum' : arr -> elt
val log_sum_exp' : arr -> elt
val log_sum_exp : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm' : arr -> elt
val l2norm' : arr -> elt
val l2norm_sqr' : arr -> elt
val clip_by_value : ?amin:elt -> ?amax:elt -> arr -> arr
val clip_by_l2norm : elt -> arr -> arr
val pow : arr -> arr -> arr
val scalar_pow : elt -> arr -> arr
val pow_scalar : arr -> elt -> arr
val atan2 : arr -> arr -> arr
val scalar_atan2 : elt -> arr -> arr
val atan2_scalar : arr -> elt -> arr
val add : arr -> arr -> arr
val sub : arr -> arr -> arr
val mul : arr -> arr -> arr
val div : arr -> arr -> arr
val add_scalar : arr -> elt -> arr
val sub_scalar : arr -> elt -> arr
val mul_scalar : arr -> elt -> arr
val div_scalar : arr -> elt -> arr
val scalar_add : elt -> arr -> arr
val scalar_sub : elt -> arr -> arr
val scalar_mul : elt -> arr -> arr
val scalar_div : elt -> arr -> arr
val fma : arr -> arr -> arr -> arr
val conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val dilated_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val transpose_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val max_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val upsampling2d : arr -> int array -> arr
val conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val dilated_conv1d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv1d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val transpose_conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val max_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val upsampling2d_backward : arr -> int array -> arr -> arr
val row_num : arr -> int
val col_num : arr -> int
val row : arr -> int -> arr
val rows : arr -> int array -> arr
val copy_row_to : arr -> arr -> int -> unit
val copy_col_to : arr -> arr -> int -> unit
val dot : arr -> arr -> arr
val diag : ?k:int -> arr -> arr
val trace : arr -> elt
val transpose : ?axis:int array -> arr -> arr
val to_rows : arr -> arr array
val of_rows : arr array -> arr
val to_cols : arr -> arr array
val of_cols : arr array -> arr
val of_array : elt array -> int array -> arr
val of_arrays : elt array array -> arr
val float_to_elt : float -> elt
val elt_to_float : elt -> float
val elt_equal : arr -> arr -> arr
val elt_not_equal : arr -> arr -> arr
val elt_less : arr -> arr -> arr
val elt_greater : arr -> arr -> arr
val elt_less_equal : arr -> arr -> arr
val elt_greater_equal : arr -> arr -> arr
val elt_equal_scalar : arr -> elt -> arr
val elt_not_equal_scalar : arr -> elt -> arr
val elt_less_scalar : arr -> elt -> arr
val elt_greater_scalar : arr -> elt -> arr
val elt_less_equal_scalar : arr -> elt -> arr
val elt_greater_equal_scalar : arr -> elt -> arr
module Scalar : sig ... end
module Mat : sig ... end
module Linalg : sig ... end
val create_ : out:arr -> elt -> unit
val uniform_ : ?a:elt -> ?b:elt -> out:arr -> unit
val gaussian_ : ?mu:elt -> ?sigma:elt -> out:arr -> unit
val sequential_ : ?a:elt -> ?step:elt -> out:arr -> unit
val bernoulli_ : ?p:elt -> out:arr -> unit
val zeros_ : out:arr -> unit
val ones_ : out:arr -> unit
val one_hot_ : out:arr -> int -> arr -> unit
val get_slice_ : out:arr -> int list list -> arr -> unit
val set_slice_ : out:arr -> int list list -> arr -> arr -> unit
val sub_left : arr -> int -> int -> arr
val reshape_ : out:arr -> arr -> unit
val reverse_ : out:arr -> arr -> unit
val transpose_ : out:arr -> ?axis:int array -> arr -> unit
val repeat_ : out:arr -> arr -> int array -> unit
val tile_ : out:arr -> arr -> int array -> unit
val pad_ : out:arr -> ?v:elt -> int list list -> arr -> unit
val hypot : arr -> arr -> arr
val fmod : arr -> arr -> arr
val min2 : arr -> arr -> arr
val max2 : arr -> arr -> arr
val add_ : ?out:arr -> arr -> arr -> unit
val sub_ : ?out:arr -> arr -> arr -> unit
val mul_ : ?out:arr -> arr -> arr -> unit
val div_ : ?out:arr -> arr -> arr -> unit
val pow_ : ?out:arr -> arr -> arr -> unit
val atan2_ : ?out:arr -> arr -> arr -> unit
val hypot_ : ?out:arr -> arr -> arr -> unit
val fmod_ : ?out:arr -> arr -> arr -> unit
val min2_ : ?out:arr -> arr -> arr -> unit
val max2_ : ?out:arr -> arr -> arr -> unit
val add_scalar_ : ?out:arr -> arr -> elt -> unit
val sub_scalar_ : ?out:arr -> arr -> elt -> unit
val mul_scalar_ : ?out:arr -> arr -> elt -> unit
val div_scalar_ : ?out:arr -> arr -> elt -> unit
val pow_scalar_ : ?out:arr -> arr -> elt -> unit
val atan2_scalar_ : ?out:arr -> arr -> elt -> unit
val fmod_scalar_ : ?out:arr -> arr -> elt -> unit
val scalar_add_ : ?out:arr -> elt -> arr -> unit
val scalar_sub_ : ?out:arr -> elt -> arr -> unit
val scalar_mul_ : ?out:arr -> elt -> arr -> unit
val scalar_div_ : ?out:arr -> elt -> arr -> unit
val scalar_pow_ : ?out:arr -> elt -> arr -> unit
val scalar_atan2_ : ?out:arr -> elt -> arr -> unit
val scalar_fmod_ : ?out:arr -> elt -> arr -> unit
val fma_ : ?out:arr -> arr -> arr -> arr -> unit
val clip_by_value_ : ?out:arr -> ?amin:elt -> ?amax:elt -> arr -> unit
val clip_by_l2norm_ : ?out:arr -> elt -> arr -> unit
val dot_ : - ?transa:bool -> - ?transb:bool -> - ?alpha:elt -> - ?beta:elt -> - c:arr -> - arr -> - arr -> - unit
val abs_ : ?out:arr -> arr -> unit
val neg_ : ?out:arr -> arr -> unit
val conj_ : ?out:arr -> arr -> unit
val reci_ : ?out:arr -> arr -> unit
val signum_ : ?out:arr -> arr -> unit
val sqr_ : ?out:arr -> arr -> unit
val sqrt_ : ?out:arr -> arr -> unit
val cbrt_ : ?out:arr -> arr -> unit
val exp_ : ?out:arr -> arr -> unit
val exp2_ : ?out:arr -> arr -> unit
val exp10_ : ?out:arr -> arr -> unit
val expm1_ : ?out:arr -> arr -> unit
val log_ : ?out:arr -> arr -> unit
val log2_ : ?out:arr -> arr -> unit
val log10_ : ?out:arr -> arr -> unit
val log1p_ : ?out:arr -> arr -> unit
val sin_ : ?out:arr -> arr -> unit
val cos_ : ?out:arr -> arr -> unit
val tan_ : ?out:arr -> arr -> unit
val asin_ : ?out:arr -> arr -> unit
val acos_ : ?out:arr -> arr -> unit
val atan_ : ?out:arr -> arr -> unit
val sinh_ : ?out:arr -> arr -> unit
val cosh_ : ?out:arr -> arr -> unit
val tanh_ : ?out:arr -> arr -> unit
val asinh_ : ?out:arr -> arr -> unit
val acosh_ : ?out:arr -> arr -> unit
val atanh_ : ?out:arr -> arr -> unit
val floor_ : ?out:arr -> arr -> unit
val ceil_ : ?out:arr -> arr -> unit
val round_ : ?out:arr -> arr -> unit
val trunc_ : ?out:arr -> arr -> unit
val fix_ : ?out:arr -> arr -> unit
val erf_ : ?out:arr -> arr -> unit
val erfc_ : ?out:arr -> arr -> unit
val relu_ : ?out:arr -> arr -> unit
val softplus_ : ?out:arr -> arr -> unit
val softsign_ : ?out:arr -> arr -> unit
val softmax_ : ?out:arr -> ?axis:int -> arr -> unit
val sigmoid_ : ?out:arr -> arr -> unit
val sum_ : out:arr -> axis:int -> arr -> unit
val min_ : out:arr -> axis:int -> arr -> unit
val max_ : out:arr -> axis:int -> arr -> unit
val sum : ?axis:int -> ?keep_dims:bool -> arr -> arr
val prod : ?axis:int -> ?keep_dims:bool -> arr -> arr
val min : ?axis:int -> ?keep_dims:bool -> arr -> arr
val max : ?axis:int -> ?keep_dims:bool -> arr -> arr
val mean : ?axis:int -> ?keep_dims:bool -> arr -> arr
val var : ?axis:int -> ?keep_dims:bool -> arr -> arr
val std : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l2norm : ?axis:int -> ?keep_dims:bool -> arr -> arr
val cumsum_ : ?out:arr -> ?axis:int -> arr -> unit
val cumprod_ : ?out:arr -> ?axis:int -> arr -> unit
val cummin_ : ?out:arr -> ?axis:int -> arr -> unit
val cummax_ : ?out:arr -> ?axis:int -> arr -> unit
val dropout_ : ?out:arr -> ?rate:float -> arr -> unit
val prod' : arr -> elt
val mean' : arr -> elt
val var' : arr -> elt
val std' : arr -> elt
val elt_equal_ : ?out:arr -> arr -> arr -> unit
val elt_not_equal_ : ?out:arr -> arr -> arr -> unit
val elt_less_ : ?out:arr -> arr -> arr -> unit
val elt_greater_ : ?out:arr -> arr -> arr -> unit
val elt_less_equal_ : ?out:arr -> arr -> arr -> unit
val elt_greater_equal_ : ?out:arr -> arr -> arr -> unit
val elt_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_not_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_less_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_greater_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_less_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_greater_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val conv1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val conv2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val conv3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val dilated_conv1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val dilated_conv2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val dilated_conv3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val transpose_conv1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val transpose_conv2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val transpose_conv3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val max_pool1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val max_pool2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val max_pool3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val upsampling2d_ : out:arr -> arr -> int array -> unit
val conv1d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv1d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv2d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv2d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv3d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv3d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val dilated_conv1d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv1d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv2d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv2d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv3d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv3d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val transpose_conv1d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv1d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv2d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv2d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv3d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv3d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val max_pool1d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val max_pool2d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val max_pool3d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool1d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool2d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool3d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val upsampling2d_backward_ : out:arr -> arr -> int array -> arr -> unit
val fused_adagrad_ : ?out:arr -> rate:float -> eps:float -> arr -> unit
\ No newline at end of file diff --git a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Device/index.html b/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Device/index.html deleted file mode 100644 index be8188a8c..000000000 --- a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Device/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Device (owl-base.Owl_computation_engine_sig.Flatten_Sig.Device)

Module Flatten_Sig.Device

Type definition
type device

TODO

type value

TODO

Core functions
val make_device : unit -> device

TODO

val arr_to_value : A.arr -> value

TODO

val value_to_arr : value -> A.arr

TODO

val elt_to_value : A.elt -> value

TODO

val value_to_elt : value -> A.elt

TODO

val value_to_float : value -> float

TODO

val is_arr : value -> bool

TODO

val is_elt : value -> bool

TODO

\ No newline at end of file diff --git a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Graph/Optimiser/Operator/Linalg/index.html b/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Graph/Optimiser/Operator/Linalg/index.html deleted file mode 100644 index 2a6838e60..000000000 --- a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Graph/Optimiser/Operator/Linalg/index.html +++ /dev/null @@ -1,33 +0,0 @@ - -Linalg (owl-base.Owl_computation_engine_sig.Flatten_Sig.Graph.Optimiser.Operator.Linalg)

Module Operator.Linalg

\ No newline at end of file diff --git a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Graph/Optimiser/Operator/Mat/index.html b/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Graph/Optimiser/Operator/Mat/index.html deleted file mode 100644 index 863071621..000000000 --- a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Graph/Optimiser/Operator/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl-base.Owl_computation_engine_sig.Flatten_Sig.Graph.Optimiser.Operator.Mat)

Module Operator.Mat

val eye : int -> Symbol.Shape.Type.arr

TODO

TODO

TODO

TODO

\ No newline at end of file diff --git a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Graph/Optimiser/Operator/Scalar/index.html b/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Graph/Optimiser/Operator/Scalar/index.html deleted file mode 100644 index c03d49685..000000000 --- a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Graph/Optimiser/Operator/Scalar/index.html +++ /dev/null @@ -1,20 +0,0 @@ - -Scalar (owl-base.Owl_computation_engine_sig.Flatten_Sig.Graph.Optimiser.Operator.Scalar)

Module Operator.Scalar

\ No newline at end of file diff --git a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Graph/Optimiser/Operator/Symbol/Shape/Type/Device/A/Linalg/index.html b/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Graph/Optimiser/Operator/Symbol/Shape/Type/Device/A/Linalg/index.html deleted file mode 100644 index 003a2a9d8..000000000 --- a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Graph/Optimiser/Operator/Symbol/Shape/Type/Device/A/Linalg/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -Linalg (owl-base.Owl_computation_engine_sig.Flatten_Sig.Graph.Optimiser.Operator.Symbol.Shape.Type.Device.A.Linalg)

Module A.Linalg

val inv : arr -> arr
val logdet : arr -> elt
val chol : ?upper:bool -> arr -> arr
val svd : ?thin:bool -> arr -> arr * arr * arr
val qr : arr -> arr * arr
val lq : arr -> arr * arr
val sylvester : arr -> arr -> arr -> arr
val lyapunov : arr -> arr -> arr
val discrete_lyapunov : - ?solver:[ `default | `bilinear | `direct ] -> - arr -> - arr -> - arr
val linsolve : ?trans:bool -> ?typ:[ `n | `u | `l ] -> arr -> arr -> arr
val care : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
val dare : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
\ No newline at end of file diff --git a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Graph/Optimiser/Operator/Symbol/Shape/Type/Device/A/Mat/index.html b/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Graph/Optimiser/Operator/Symbol/Shape/Type/Device/A/Mat/index.html deleted file mode 100644 index 532a58b42..000000000 --- a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Graph/Optimiser/Operator/Symbol/Shape/Type/Device/A/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl-base.Owl_computation_engine_sig.Flatten_Sig.Graph.Optimiser.Operator.Symbol.Shape.Type.Device.A.Mat)

Module A.Mat

val diagm : ?k:int -> arr -> arr
val triu : ?k:int -> arr -> arr
val tril : ?k:int -> arr -> arr
val eye : int -> arr
\ No newline at end of file diff --git a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Graph/Optimiser/Operator/Symbol/Shape/Type/Device/A/Scalar/index.html b/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Graph/Optimiser/Operator/Symbol/Shape/Type/Device/A/Scalar/index.html deleted file mode 100644 index 9b4b669db..000000000 --- a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Graph/Optimiser/Operator/Symbol/Shape/Type/Device/A/Scalar/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Scalar (owl-base.Owl_computation_engine_sig.Flatten_Sig.Graph.Optimiser.Operator.Symbol.Shape.Type.Device.A.Scalar)

Module A.Scalar

val add : elt -> elt -> elt
val sub : elt -> elt -> elt
val mul : elt -> elt -> elt
val div : elt -> elt -> elt
val pow : elt -> elt -> elt
val atan2 : elt -> elt -> elt
val abs : elt -> elt
val neg : elt -> elt
val sqr : elt -> elt
val sqrt : elt -> elt
val exp : elt -> elt
val log : elt -> elt
val log2 : elt -> elt
val log10 : elt -> elt
val signum : elt -> elt
val floor : elt -> elt
val ceil : elt -> elt
val round : elt -> elt
val sin : elt -> elt
val cos : elt -> elt
val tan : elt -> elt
val sinh : elt -> elt
val cosh : elt -> elt
val tanh : elt -> elt
val asin : elt -> elt
val acos : elt -> elt
val atan : elt -> elt
val asinh : elt -> elt
val acosh : elt -> elt
val atanh : elt -> elt
val relu : elt -> elt
val dawsn : elt -> elt
val sigmoid : elt -> elt
\ No newline at end of file diff --git a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Graph/Optimiser/Operator/Symbol/Shape/Type/Device/A/index.html b/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Graph/Optimiser/Operator/Symbol/Shape/Type/Device/A/index.html deleted file mode 100644 index a8e0dbf2f..000000000 --- a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Graph/Optimiser/Operator/Symbol/Shape/Type/Device/A/index.html +++ /dev/null @@ -1,379 +0,0 @@ - -A (owl-base.Owl_computation_engine_sig.Flatten_Sig.Graph.Optimiser.Operator.Symbol.Shape.Type.Device.A)

Module Device.A

include Owl_types_ndarray_algodiff.Sig
include Owl_types_ndarray_eltcmp.Sig
include Owl_types_ndarray_basic.Sig
type arr
type elt
val empty : int array -> arr
val zeros : int array -> arr
val ones : int array -> arr
val create : int array -> elt -> arr
val sequential : ?a:elt -> ?step:elt -> int array -> arr
val uniform : ?a:elt -> ?b:elt -> int array -> arr
val gaussian : ?mu:elt -> ?sigma:elt -> int array -> arr
val bernoulli : ?p:elt -> int array -> arr
val init : int array -> (int -> elt) -> arr
val init_nd : int array -> (int array -> elt) -> arr
val shape : arr -> int array
val numel : arr -> int
val get : arr -> int array -> elt
val set : arr -> int array -> elt -> unit
val get_slice : int list list -> arr -> arr
val set_slice : int list list -> arr -> arr -> unit
val get_fancy : Owl_types_common.index list -> arr -> arr
val set_fancy : Owl_types_common.index list -> arr -> arr -> unit
val copy : arr -> arr
val copy_ : out:arr -> arr -> unit
val reset : arr -> unit
val reshape : arr -> int array -> arr
val reverse : arr -> arr
val tile : arr -> int array -> arr
val repeat : arr -> int array -> arr
val concatenate : ?axis:int -> arr array -> arr
val stack : ?axis:int -> arr array -> arr
val split : ?axis:int -> int array -> arr -> arr array
val expand : ?hi:bool -> arr -> int -> arr
val squeeze : ?axis:int array -> arr -> arr
val draw : ?axis:int -> arr -> int -> arr * int array
val map : (elt -> elt) -> arr -> arr
val fold : ?axis:int -> (elt -> elt -> elt) -> elt -> arr -> arr
val scan : ?axis:int -> (elt -> elt -> elt) -> arr -> arr
val one_hot : int -> arr -> arr
val pad : ?v:elt -> int list list -> arr -> arr
val print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(elt -> string) -> - arr -> - unit
val abs : arr -> arr
val neg : arr -> arr
val floor : arr -> arr
val ceil : arr -> arr
val round : arr -> arr
val sqr : arr -> arr
val sqrt : arr -> arr
val log : arr -> arr
val log2 : arr -> arr
val log10 : arr -> arr
val exp : arr -> arr
val sin : arr -> arr
val cos : arr -> arr
val tan : arr -> arr
val sinh : arr -> arr
val cosh : arr -> arr
val tanh : arr -> arr
val asin : arr -> arr
val acos : arr -> arr
val atan : arr -> arr
val asinh : arr -> arr
val acosh : arr -> arr
val atanh : arr -> arr
val sum_reduce : ?axis:int array -> arr -> arr
val signum : arr -> arr
val sigmoid : arr -> arr
val relu : arr -> arr
val dawsn : arr -> arr
val min' : arr -> elt
val max' : arr -> elt
val sum' : arr -> elt
val log_sum_exp' : arr -> elt
val log_sum_exp : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm' : arr -> elt
val l2norm' : arr -> elt
val l2norm_sqr' : arr -> elt
val clip_by_value : ?amin:elt -> ?amax:elt -> arr -> arr
val clip_by_l2norm : elt -> arr -> arr
val pow : arr -> arr -> arr
val scalar_pow : elt -> arr -> arr
val pow_scalar : arr -> elt -> arr
val atan2 : arr -> arr -> arr
val scalar_atan2 : elt -> arr -> arr
val atan2_scalar : arr -> elt -> arr
val add : arr -> arr -> arr
val sub : arr -> arr -> arr
val mul : arr -> arr -> arr
val div : arr -> arr -> arr
val add_scalar : arr -> elt -> arr
val sub_scalar : arr -> elt -> arr
val mul_scalar : arr -> elt -> arr
val div_scalar : arr -> elt -> arr
val scalar_add : elt -> arr -> arr
val scalar_sub : elt -> arr -> arr
val scalar_mul : elt -> arr -> arr
val scalar_div : elt -> arr -> arr
val fma : arr -> arr -> arr -> arr
val conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val dilated_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val transpose_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val max_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val upsampling2d : arr -> int array -> arr
val conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val dilated_conv1d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv1d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val transpose_conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val max_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val upsampling2d_backward : arr -> int array -> arr -> arr
val row_num : arr -> int
val col_num : arr -> int
val row : arr -> int -> arr
val rows : arr -> int array -> arr
val copy_row_to : arr -> arr -> int -> unit
val copy_col_to : arr -> arr -> int -> unit
val dot : arr -> arr -> arr
val diag : ?k:int -> arr -> arr
val trace : arr -> elt
val transpose : ?axis:int array -> arr -> arr
val to_rows : arr -> arr array
val of_rows : arr array -> arr
val to_cols : arr -> arr array
val of_cols : arr array -> arr
val of_array : elt array -> int array -> arr
val of_arrays : elt array array -> arr
val float_to_elt : float -> elt
val elt_to_float : elt -> float
val elt_equal : arr -> arr -> arr
val elt_not_equal : arr -> arr -> arr
val elt_less : arr -> arr -> arr
val elt_greater : arr -> arr -> arr
val elt_less_equal : arr -> arr -> arr
val elt_greater_equal : arr -> arr -> arr
val elt_equal_scalar : arr -> elt -> arr
val elt_not_equal_scalar : arr -> elt -> arr
val elt_less_scalar : arr -> elt -> arr
val elt_greater_scalar : arr -> elt -> arr
val elt_less_equal_scalar : arr -> elt -> arr
val elt_greater_equal_scalar : arr -> elt -> arr
module Scalar : sig ... end
module Mat : sig ... end
module Linalg : sig ... end
val create_ : out:arr -> elt -> unit
val uniform_ : ?a:elt -> ?b:elt -> out:arr -> unit
val gaussian_ : ?mu:elt -> ?sigma:elt -> out:arr -> unit
val sequential_ : ?a:elt -> ?step:elt -> out:arr -> unit
val bernoulli_ : ?p:elt -> out:arr -> unit
val zeros_ : out:arr -> unit
val ones_ : out:arr -> unit
val one_hot_ : out:arr -> int -> arr -> unit
val get_slice_ : out:arr -> int list list -> arr -> unit
val set_slice_ : out:arr -> int list list -> arr -> arr -> unit
val sub_left : arr -> int -> int -> arr
val reshape_ : out:arr -> arr -> unit
val reverse_ : out:arr -> arr -> unit
val transpose_ : out:arr -> ?axis:int array -> arr -> unit
val repeat_ : out:arr -> arr -> int array -> unit
val tile_ : out:arr -> arr -> int array -> unit
val pad_ : out:arr -> ?v:elt -> int list list -> arr -> unit
val hypot : arr -> arr -> arr
val fmod : arr -> arr -> arr
val min2 : arr -> arr -> arr
val max2 : arr -> arr -> arr
val add_ : ?out:arr -> arr -> arr -> unit
val sub_ : ?out:arr -> arr -> arr -> unit
val mul_ : ?out:arr -> arr -> arr -> unit
val div_ : ?out:arr -> arr -> arr -> unit
val pow_ : ?out:arr -> arr -> arr -> unit
val atan2_ : ?out:arr -> arr -> arr -> unit
val hypot_ : ?out:arr -> arr -> arr -> unit
val fmod_ : ?out:arr -> arr -> arr -> unit
val min2_ : ?out:arr -> arr -> arr -> unit
val max2_ : ?out:arr -> arr -> arr -> unit
val add_scalar_ : ?out:arr -> arr -> elt -> unit
val sub_scalar_ : ?out:arr -> arr -> elt -> unit
val mul_scalar_ : ?out:arr -> arr -> elt -> unit
val div_scalar_ : ?out:arr -> arr -> elt -> unit
val pow_scalar_ : ?out:arr -> arr -> elt -> unit
val atan2_scalar_ : ?out:arr -> arr -> elt -> unit
val fmod_scalar_ : ?out:arr -> arr -> elt -> unit
val scalar_add_ : ?out:arr -> elt -> arr -> unit
val scalar_sub_ : ?out:arr -> elt -> arr -> unit
val scalar_mul_ : ?out:arr -> elt -> arr -> unit
val scalar_div_ : ?out:arr -> elt -> arr -> unit
val scalar_pow_ : ?out:arr -> elt -> arr -> unit
val scalar_atan2_ : ?out:arr -> elt -> arr -> unit
val scalar_fmod_ : ?out:arr -> elt -> arr -> unit
val fma_ : ?out:arr -> arr -> arr -> arr -> unit
val clip_by_value_ : ?out:arr -> ?amin:elt -> ?amax:elt -> arr -> unit
val clip_by_l2norm_ : ?out:arr -> elt -> arr -> unit
val dot_ : - ?transa:bool -> - ?transb:bool -> - ?alpha:elt -> - ?beta:elt -> - c:arr -> - arr -> - arr -> - unit
val abs_ : ?out:arr -> arr -> unit
val neg_ : ?out:arr -> arr -> unit
val conj_ : ?out:arr -> arr -> unit
val reci_ : ?out:arr -> arr -> unit
val signum_ : ?out:arr -> arr -> unit
val sqr_ : ?out:arr -> arr -> unit
val sqrt_ : ?out:arr -> arr -> unit
val cbrt_ : ?out:arr -> arr -> unit
val exp_ : ?out:arr -> arr -> unit
val exp2_ : ?out:arr -> arr -> unit
val exp10_ : ?out:arr -> arr -> unit
val expm1_ : ?out:arr -> arr -> unit
val log_ : ?out:arr -> arr -> unit
val log2_ : ?out:arr -> arr -> unit
val log10_ : ?out:arr -> arr -> unit
val log1p_ : ?out:arr -> arr -> unit
val sin_ : ?out:arr -> arr -> unit
val cos_ : ?out:arr -> arr -> unit
val tan_ : ?out:arr -> arr -> unit
val asin_ : ?out:arr -> arr -> unit
val acos_ : ?out:arr -> arr -> unit
val atan_ : ?out:arr -> arr -> unit
val sinh_ : ?out:arr -> arr -> unit
val cosh_ : ?out:arr -> arr -> unit
val tanh_ : ?out:arr -> arr -> unit
val asinh_ : ?out:arr -> arr -> unit
val acosh_ : ?out:arr -> arr -> unit
val atanh_ : ?out:arr -> arr -> unit
val floor_ : ?out:arr -> arr -> unit
val ceil_ : ?out:arr -> arr -> unit
val round_ : ?out:arr -> arr -> unit
val trunc_ : ?out:arr -> arr -> unit
val fix_ : ?out:arr -> arr -> unit
val erf_ : ?out:arr -> arr -> unit
val erfc_ : ?out:arr -> arr -> unit
val relu_ : ?out:arr -> arr -> unit
val softplus_ : ?out:arr -> arr -> unit
val softsign_ : ?out:arr -> arr -> unit
val softmax_ : ?out:arr -> ?axis:int -> arr -> unit
val sigmoid_ : ?out:arr -> arr -> unit
val sum_ : out:arr -> axis:int -> arr -> unit
val min_ : out:arr -> axis:int -> arr -> unit
val max_ : out:arr -> axis:int -> arr -> unit
val sum : ?axis:int -> ?keep_dims:bool -> arr -> arr
val prod : ?axis:int -> ?keep_dims:bool -> arr -> arr
val min : ?axis:int -> ?keep_dims:bool -> arr -> arr
val max : ?axis:int -> ?keep_dims:bool -> arr -> arr
val mean : ?axis:int -> ?keep_dims:bool -> arr -> arr
val var : ?axis:int -> ?keep_dims:bool -> arr -> arr
val std : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l2norm : ?axis:int -> ?keep_dims:bool -> arr -> arr
val cumsum_ : ?out:arr -> ?axis:int -> arr -> unit
val cumprod_ : ?out:arr -> ?axis:int -> arr -> unit
val cummin_ : ?out:arr -> ?axis:int -> arr -> unit
val cummax_ : ?out:arr -> ?axis:int -> arr -> unit
val dropout_ : ?out:arr -> ?rate:float -> arr -> unit
val prod' : arr -> elt
val mean' : arr -> elt
val var' : arr -> elt
val std' : arr -> elt
val elt_equal_ : ?out:arr -> arr -> arr -> unit
val elt_not_equal_ : ?out:arr -> arr -> arr -> unit
val elt_less_ : ?out:arr -> arr -> arr -> unit
val elt_greater_ : ?out:arr -> arr -> arr -> unit
val elt_less_equal_ : ?out:arr -> arr -> arr -> unit
val elt_greater_equal_ : ?out:arr -> arr -> arr -> unit
val elt_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_not_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_less_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_greater_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_less_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_greater_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val conv1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val conv2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val conv3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val dilated_conv1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val dilated_conv2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val dilated_conv3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val transpose_conv1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val transpose_conv2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val transpose_conv3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val max_pool1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val max_pool2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val max_pool3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val upsampling2d_ : out:arr -> arr -> int array -> unit
val conv1d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv1d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv2d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv2d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv3d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv3d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val dilated_conv1d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv1d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv2d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv2d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv3d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv3d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val transpose_conv1d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv1d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv2d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv2d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv3d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv3d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val max_pool1d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val max_pool2d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val max_pool3d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool1d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool2d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool3d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val upsampling2d_backward_ : out:arr -> arr -> int array -> arr -> unit
val fused_adagrad_ : ?out:arr -> rate:float -> eps:float -> arr -> unit
\ No newline at end of file diff --git a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Graph/Optimiser/Operator/Symbol/Shape/Type/Device/index.html b/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Graph/Optimiser/Operator/Symbol/Shape/Type/Device/index.html deleted file mode 100644 index f2eb5488a..000000000 --- a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Graph/Optimiser/Operator/Symbol/Shape/Type/Device/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Device (owl-base.Owl_computation_engine_sig.Flatten_Sig.Graph.Optimiser.Operator.Symbol.Shape.Type.Device)

Module Type.Device

Type definition
type device

TODO

type value

TODO

Core functions
val make_device : unit -> device

TODO

val arr_to_value : A.arr -> value

TODO

val value_to_arr : value -> A.arr

TODO

val elt_to_value : A.elt -> value

TODO

val value_to_elt : value -> A.elt

TODO

val value_to_float : value -> float

TODO

val is_arr : value -> bool

TODO

val is_elt : value -> bool

TODO

\ No newline at end of file diff --git a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Graph/Optimiser/Operator/Symbol/Shape/Type/index.html b/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Graph/Optimiser/Operator/Symbol/Shape/Type/index.html deleted file mode 100644 index 9fb38a67f..000000000 --- a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Graph/Optimiser/Operator/Symbol/Shape/Type/index.html +++ /dev/null @@ -1,5 +0,0 @@ - -Type (owl-base.Owl_computation_engine_sig.Flatten_Sig.Graph.Optimiser.Operator.Symbol.Shape.Type)

Module Shape.Type

Type definition
type state =
  1. | Valid
  2. | Invalid
    (*

    TODO

    *)

TODO

and block = {
  1. size : int;
  2. block_id : int;
  3. mutable active : t option;
  4. mutable memory : Device.value;
  5. mutable nodes : t list;
}

block type keeps a reference to a block of memory and to the nodes sharing that block.

and attr = {
  1. mutable op : op;
  2. mutable freeze : bool;
  3. mutable reuse : bool;
  4. mutable state : state;
  5. mutable shape : int array option array;
  6. mutable value : Device.value array;
  7. mutable block : block array option;
}

TODO

and arr =
  1. | Arr of t
and elt =
  1. | Elt of t
and op =
  1. | Noop
  2. | Var
  3. | Const
  4. | Empty of int array
  5. | Zeros of int array
  6. | Ones of int array
  7. | Create of int array
  8. | Sequential of int array
  9. | Uniform of int array
  10. | Gaussian of int array
  11. | Bernoulli of int array
  12. | Init of int array * int -> elt
  13. | Get of int array
  14. | Set of int array
  15. | GetSlice of int list list
  16. | SetSlice of int list list
  17. | GetFancy of Owl_types_common.index list
  18. | SetFancy of Owl_types_common.index list
  19. | Copy
  20. | Reset
  21. | Reshape of int array
  22. | Reverse
  23. | Tile of int array
  24. | Repeat of int array
  25. | Pad of elt * int list list
  26. | Concatenate of int
  27. | Stack of int
  28. | Split of int * int array
  29. | Draw of int * int
  30. | Map of elt -> elt
  31. | Fold of int * elt -> elt -> elt
  32. | Scan of int * elt -> elt -> elt
  33. | OneHot of int
  34. | OfArray of int array
  35. | Delay of Device.A.arr -> Device.A.arr
  36. | DelayArray of int array * Device.A.arr array -> Device.A.arr
  37. | LazyPrint of int option - * int option - * bool option - * (Device.A.elt -> string) option
  38. | Abs
  39. | Neg
  40. | Floor
  41. | Ceil
  42. | Round
  43. | Sqr
  44. | Sqrt
  45. | Log
  46. | Log2
  47. | Log10
  48. | Exp
  49. | Sin
  50. | Cos
  51. | Tan
  52. | Sinh
  53. | Cosh
  54. | Tanh
  55. | Asin
  56. | Acos
  57. | Atan
  58. | Asinh
  59. | Acosh
  60. | Atanh
  61. | Min of bool * int
  62. | Max of bool * int
  63. | Sum of bool * int
  64. | SumReduce of int array
  65. | Signum
  66. | Sigmoid
  67. | Relu
  68. | Dawsn
  69. | Min'
  70. | Max'
  71. | Sum'
  72. | LogSumExp'
  73. | LogSumExp of bool * int
  74. | L1norm'
  75. | L2norm'
  76. | L2NormSqr'
  77. | ClipByValue
  78. | ClipByL2norm
  79. | Pow
  80. | ScalarPow
  81. | PowScalar
  82. | Atan2
  83. | ScalarAtan2
  84. | Atan2Scalar
  85. | Hypot
  86. | Min2
  87. | Max2
  88. | Add
  89. | Sub
  90. | Mul
  91. | Div
  92. | AddScalar
  93. | SubScalar
  94. | MulScalar
  95. | DivScalar
  96. | ScalarAdd
  97. | ScalarSub
  98. | ScalarMul
  99. | ScalarDiv
  100. | FMA
  101. | EltEqual
  102. | EltNotEqual
  103. | EltLess
  104. | EltGreater
  105. | EltLessEqual
  106. | EltGreaterEqual
  107. | EltEqualScalar
  108. | EltNotEqualScalar
  109. | EltLessScalar
  110. | EltGreaterScalar
  111. | EltLessEqualScalar
  112. | EltGreaterEqualScalar
  113. | Conv1d of Owl_types_common.padding * int array
  114. | Conv2d of Owl_types_common.padding * int array
  115. | Conv3d of Owl_types_common.padding * int array
  116. | TransposeConv1d of Owl_types_common.padding * int array
  117. | TransposeConv2d of Owl_types_common.padding * int array
  118. | TransposeConv3d of Owl_types_common.padding * int array
  119. | DilatedConv1d of Owl_types_common.padding * int array * int array
  120. | DilatedConv2d of Owl_types_common.padding * int array * int array
  121. | DilatedConv3d of Owl_types_common.padding * int array * int array
  122. | MaxPool1d of Owl_types_common.padding * int array * int array
  123. | MaxPool2d of Owl_types_common.padding * int array * int array
  124. | MaxPool3d of Owl_types_common.padding * int array * int array
  125. | AvgPool1d of Owl_types_common.padding * int array * int array
  126. | AvgPool2d of Owl_types_common.padding * int array * int array
  127. | AvgPool3d of Owl_types_common.padding * int array * int array
  128. | UpSampling2d of int array
  129. | Conv1dBackwardInput of int array
  130. | Conv1dBackwardKernel of int array
  131. | Conv2dBackwardInput of int array
  132. | Conv2dBackwardKernel of int array
  133. | Conv3dBackwardInput of int array
  134. | Conv3dBackwardKernel of int array
  135. | TransposeConv1dBackwardInput of int array
  136. | TransposeConv1dBackwardKernel of int array
  137. | TransposeConv2dBackwardInput of int array
  138. | TransposeConv2dBackwardKernel of int array
  139. | TransposeConv3dBackwardInput of int array
  140. | TransposeConv3dBackwardKernel of int array
  141. | DilatedConv1dBackwardInput of int array * int array
  142. | DilatedConv1dBackwardKernel of int array * int array
  143. | DilatedConv2dBackwardInput of int array * int array
  144. | DilatedConv2dBackwardKernel of int array * int array
  145. | DilatedConv3dBackwardInput of int array * int array
  146. | DilatedConv3dBackwardKernel of int array * int array
  147. | MaxPool1dBackward of Owl_types_common.padding * int array * int array
  148. | MaxPool2dBackward of Owl_types_common.padding * int array * int array
  149. | MaxPool3dBackward of Owl_types_common.padding * int array * int array
  150. | AvgPool1dBackward of Owl_types_common.padding * int array * int array
  151. | AvgPool2dBackward of Owl_types_common.padding * int array * int array
  152. | AvgPool3dBackward of Owl_types_common.padding * int array * int array
  153. | UpSampling2dBackward of int array
  154. | RowNum
  155. | ColNum
  156. | Row
  157. | Rows of int array
  158. | CopyRowTo
  159. | CopyColTo
  160. | Dot of bool * bool * elt * elt
  161. | Inv
  162. | Trace
  163. | Transpose of int array
  164. | ToRows
  165. | OfRows
  166. | Scalar_Add
  167. | Scalar_Sub
  168. | Scalar_Mul
  169. | Scalar_Div
  170. | Scalar_Pow
  171. | Scalar_Atan2
  172. | Scalar_Abs
  173. | Scalar_Neg
  174. | Scalar_Sqr
  175. | Scalar_Sqrt
  176. | Scalar_Exp
  177. | Scalar_Log
  178. | Scalar_Log2
  179. | Scalar_Log10
  180. | Scalar_Signum
  181. | Scalar_Floor
  182. | Scalar_Ceil
  183. | Scalar_Round
  184. | Scalar_Sin
  185. | Scalar_Cos
  186. | Scalar_Tan
  187. | Scalar_Sinh
  188. | Scalar_Cosh
  189. | Scalar_Tanh
  190. | Scalar_Asin
  191. | Scalar_Acos
  192. | Scalar_Atan
  193. | Scalar_Asinh
  194. | Scalar_Acosh
  195. | Scalar_Atanh
  196. | Scalar_Relu
  197. | Scalar_Dawsn
  198. | Scalar_Sigmoid
  199. | Fused_Adagrad of float * float
    (*

    TODO

    *)
\ No newline at end of file diff --git a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Graph/Optimiser/Operator/Symbol/Shape/index.html b/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Graph/Optimiser/Operator/Symbol/Shape/index.html deleted file mode 100644 index 194098722..000000000 --- a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Graph/Optimiser/Operator/Symbol/Shape/index.html +++ /dev/null @@ -1,5 +0,0 @@ - -Shape (owl-base.Owl_computation_engine_sig.Flatten_Sig.Graph.Optimiser.Operator.Symbol.Shape)

Module Symbol.Shape

Core functions
val infer_shape : - Type.op -> - Type.attr Owl_graph.node array -> - int array option array

TODO

\ No newline at end of file diff --git a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Graph/Optimiser/Operator/Symbol/index.html b/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Graph/Optimiser/Operator/Symbol/index.html deleted file mode 100644 index 14ae67eec..000000000 --- a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Graph/Optimiser/Operator/Symbol/index.html +++ /dev/null @@ -1,28 +0,0 @@ - -Symbol (owl-base.Owl_computation_engine_sig.Flatten_Sig.Graph.Optimiser.Operator.Symbol)

Module Operator.Symbol

Core functions
val op_to_str : Shape.Type.op -> string

TODO

val is_random_variable : Shape.Type.op -> bool

TODO

val refnum : 'a Owl_graph.node -> int

TODO

val node_shape : Shape.Type.attr Owl_graph.node -> int array

TODO

val node_numel : Shape.Type.attr Owl_graph.node -> int

TODO

val is_shape_unknown : Shape.Type.attr Owl_graph.node -> bool

TODO

val infer_shape_graph : Shape.Type.attr Owl_graph.node array -> unit

TODO

val shape_to_str : int array option array -> string

TODO

val node_to_str : Shape.Type.attr Owl_graph.node -> string

TODO

val node_to_arr : Shape.Type.t -> Shape.Type.arr

TODO

val arr_to_node : Shape.Type.arr -> Shape.Type.t

TODO

val node_to_elt : Shape.Type.t -> Shape.Type.elt

TODO

val elt_to_node : Shape.Type.elt -> Shape.Type.t

TODO

val make_node : - ?name:string -> - ?value:Shape.Type.Device.value array -> - ?shape:int array option array -> - ?freeze:bool -> - ?reuse:bool -> - ?state:Shape.Type.state -> - Shape.Type.op -> - Shape.Type.attr Owl_graph.node

TODO

val make_then_connect : - ?shape:int array option array -> - Shape.Type.op -> - Shape.Type.attr Owl_graph.node array -> - Shape.Type.attr Owl_graph.node

TODO

val var_arr : ?shape:int array -> string -> Shape.Type.arr

TODO

val var_elt : string -> Shape.Type.elt

TODO

val const_arr : string -> Shape.Type.Device.A.arr -> Shape.Type.arr

TODO

val const_elt : string -> Shape.Type.Device.A.elt -> Shape.Type.elt

TODO

val new_block_id : unit -> int

new_block_id () returns an unused block id.

val make_empty_block : ?block_id:int -> int -> Shape.Type.block

make_empty_block s returns an empty block of memory of size s.

val make_value_block : - Shape.Type.Device.value -> - Shape.Type.attr Owl_graph.node -> - unit

make_value_block value node creates a block of memory initialised with value and links the new block to node.

get_block node returns the memory block allocated to node. If no block is allocated, throws an exception.

val add_node_to_block : - Shape.Type.attr Owl_graph.node -> - Shape.Type.block -> - unit

Link a node to a reusable block and initialises its memory on the memory of the block.

val get_active_node : Shape.Type.block -> Shape.Type.attr Owl_graph.node option

Return the node that is currently using the memory of the block.

val set_active_node : - Shape.Type.block -> - Shape.Type.attr Owl_graph.node -> - unit

Update the node that is currently using the block of memory.

val get_block_id : Shape.Type.attr Owl_graph.node -> int

get_block_id node returns the id of the block assigned to node. If node has not been assigned yet, returns -1.

val set_value : - Shape.Type.attr Owl_graph.node -> - Shape.Type.Device.value array -> - unit

TODO

val set_operator : Shape.Type.attr Owl_graph.node -> Shape.Type.op -> unit

TODO

TODO

val set_reuse : Shape.Type.attr Owl_graph.node -> bool -> unit

TODO

val get_reuse : Shape.Type.attr Owl_graph.node -> bool

TODO

val is_shared : Shape.Type.attr Owl_graph.node -> bool

TODO

val get_shared_nodes : - Shape.Type.attr Owl_graph.node -> - Shape.Type.attr Owl_graph.node array

get_shared_nodes node returns the nodes sharing the same block of memory as node.

val is_var : Shape.Type.attr Owl_graph.node -> bool

TODO

val is_const : Shape.Type.attr Owl_graph.node -> bool

TODO

val is_node_arr : Shape.Type.attr Owl_graph.node -> bool

TODO

val is_node_elt : Shape.Type.attr Owl_graph.node -> bool

TODO

val is_assigned : Shape.Type.attr Owl_graph.node -> bool

is_assigned node checks if a block of memory has been assigned to node.

val check_assigned : Shape.Type.attr Owl_graph.node -> unit

check_assigned node throws an exception if node has not been assigned to a block.

val is_valid : Shape.Type.attr Owl_graph.node -> bool

TODO

val validate : Shape.Type.attr Owl_graph.node -> unit

TODO

val invalidate : Shape.Type.attr Owl_graph.node -> unit

TODO

val invalidate_graph : Shape.Type.attr Owl_graph.node -> unit

TODO

val is_freeze : Shape.Type.attr Owl_graph.node -> bool

TODO

val freeze : Shape.Type.attr Owl_graph.node -> unit

TODO

val freeze_descendants : Shape.Type.attr Owl_graph.node array -> unit

TODO

val freeze_ancestors : Shape.Type.attr Owl_graph.node array -> unit

TODO

TODO

TODO

val unsafe_assign_arr : Shape.Type.arr -> Shape.Type.Device.A.arr -> unit

TODO

val assign_arr : Shape.Type.arr -> Shape.Type.Device.A.arr -> unit

TODO

val assign_elt : Shape.Type.elt -> Shape.Type.Device.A.elt -> unit

TODO

val float_to_elt : float -> Shape.Type.elt

TODO

val elt_to_float : Shape.Type.elt -> float

TODO

\ No newline at end of file diff --git a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Graph/Optimiser/Operator/index.html b/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Graph/Optimiser/Operator/index.html deleted file mode 100644 index 511beb1a3..000000000 --- a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Graph/Optimiser/Operator/index.html +++ /dev/null @@ -1,420 +0,0 @@ - -Operator (owl-base.Owl_computation_engine_sig.Flatten_Sig.Graph.Optimiser.Operator)

Module Optimiser.Operator

Vectorised functions
val empty : int array -> Symbol.Shape.Type.arr

TODO

val zeros : int array -> Symbol.Shape.Type.arr

TODO

val ones : int array -> Symbol.Shape.Type.arr

TODO

val create : int array -> Symbol.Shape.Type.elt -> Symbol.Shape.Type.arr

TODO

val sequential : - ?a:Symbol.Shape.Type.elt -> - ?step:Symbol.Shape.Type.elt -> - int array -> - Symbol.Shape.Type.arr

TODO

val uniform : - ?a:Symbol.Shape.Type.elt -> - ?b:Symbol.Shape.Type.elt -> - int array -> - Symbol.Shape.Type.arr

TODO

val gaussian : - ?mu:Symbol.Shape.Type.elt -> - ?sigma:Symbol.Shape.Type.elt -> - int array -> - Symbol.Shape.Type.arr

TODO

val bernoulli : ?p:Symbol.Shape.Type.elt -> int array -> Symbol.Shape.Type.arr

TODO

val init : int array -> (int -> Symbol.Shape.Type.elt) -> Symbol.Shape.Type.arr

TODO

val init_nd : - int array -> - (int array -> Symbol.Shape.Type.elt) -> - Symbol.Shape.Type.arr

TODO

val shape : Symbol.Shape.Type.arr -> int array

TODO

val numel : Symbol.Shape.Type.arr -> int

TODO

TODO

val set : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.elt -> unit

TODO

val get_slice : int list list -> Symbol.Shape.Type.arr -> Symbol.Shape.Type.arr

TODO

val set_slice : - int list list -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - unit

TODO

TODO

val set_fancy : - Owl_types.index list -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - unit

TODO

val copy_ : out:'a -> 'b -> 'c

TODO

val reset : Symbol.Shape.Type.arr -> unit

TODO

val reshape : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.arr

TODO

val tile : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.arr

TODO

val repeat : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.arr

TODO

val pad : - ?v:Symbol.Shape.Type.elt -> - int list list -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val expand : ?hi:bool -> Symbol.Shape.Type.arr -> int -> Symbol.Shape.Type.arr

TODO

val squeeze : ?axis:int array -> Symbol.Shape.Type.arr -> Symbol.Shape.Type.arr

TODO

val concatenate : - ?axis:int -> - Symbol.Shape.Type.arr array -> - Symbol.Shape.Type.arr

TODO

val stack : ?axis:int -> Symbol.Shape.Type.arr array -> Symbol.Shape.Type.arr

TODO

TODO

val split : ?axis:int -> 'a -> 'b -> 'c

TODO

val draw : - ?axis:int -> - Symbol.Shape.Type.arr -> - int -> - Symbol.Shape.Type.arr * 'a array

TODO

TODO

delay f x returns f x. It allows to use a function that is not tracked by the computation graph and delay its evaluation. The output must have the same shape as the input.

delay_array out_shape f x works in the same way as delay but is applied on an array of ndarrays. Needs the shape of the output as an argument.

val lazy_print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(Symbol.Shape.Type.Device.A.elt -> string) -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

lazy_print x prints the output of x when it is evaluated. Is implemented as an identity node. For information about the optional parameters, refer to the print function of the Ndarray module.

val print : ?max_row:'a -> ?max_col:'b -> ?header:'c -> ?fmt:'d -> 'e -> unit

TODO

val min : - ?axis:int -> - ?keep_dims:bool -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val max : - ?axis:int -> - ?keep_dims:bool -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val sum : - ?axis:int -> - ?keep_dims:bool -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val sum_reduce : - ?axis:int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val log_sum_exp : - ?axis:int -> - ?keep_dims:bool -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

TODO

TODO

TODO

TODO

val elt_less_equal_scalar : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.elt -> - Symbol.Shape.Type.arr

TODO

val elt_greater_equal_scalar : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.elt -> - Symbol.Shape.Type.arr

TODO

val conv1d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr

TODO

val conv2d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr

TODO

val conv3d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr

TODO

val transpose_conv1d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr

TODO

val transpose_conv2d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr

TODO

val transpose_conv3d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr

TODO

val dilated_conv1d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val dilated_conv2d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val dilated_conv3d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val max_pool1d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val max_pool2d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val max_pool3d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val avg_pool1d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val avg_pool2d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val avg_pool3d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val upsampling2d : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.arr

TODO

val conv1d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val conv1d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val conv2d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val conv2d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val conv3d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val conv3d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val transpose_conv1d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val transpose_conv1d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val transpose_conv2d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val transpose_conv2d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val transpose_conv3d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val transpose_conv3d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val dilated_conv1d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val dilated_conv1d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val dilated_conv2d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val dilated_conv2d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val dilated_conv3d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val dilated_conv3d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val max_pool1d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val max_pool2d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val max_pool3d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val avg_pool1d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val avg_pool2d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val avg_pool3d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val upsampling2d_backward : - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val row_num : Symbol.Shape.Type.arr -> int

TODO

val col_num : Symbol.Shape.Type.arr -> int

TODO

val rows : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.arr

TODO

val copy_row_to : Symbol.Shape.Type.arr -> 'a -> 'b -> unit

TODO

val copy_col_to : Symbol.Shape.Type.arr -> 'a -> 'b -> unit

TODO

TODO

val transpose : - ?axis:int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val to_rows : Symbol.Shape.Type.arr -> 'a array

TODO

TODO

val to_cols : Symbol.Shape.Type.arr -> 'a array

TODO

TODO

val of_array : - Symbol.Shape.Type.elt array -> - int array -> - Symbol.Shape.Type.arr

TODO

val of_arrays : Symbol.Shape.Type.elt array array -> Symbol.Shape.Type.arr

TODO

val to_arrays : Symbol.Shape.Type.arr -> Symbol.Shape.Type.elt array array

TODO

Scalar functions
module Scalar : sig ... end
module Mat : sig ... end
module Linalg : sig ... end
\ No newline at end of file diff --git a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Graph/Optimiser/index.html b/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Graph/Optimiser/index.html deleted file mode 100644 index 50e76900c..000000000 --- a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Graph/Optimiser/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Optimiser (owl-base.Owl_computation_engine_sig.Flatten_Sig.Graph.Optimiser)

Module Graph.Optimiser

Core functions
val estimate_complexity : 'a Owl_graph.node array -> int * int

TODO

val optimise_nodes : - Operator.Symbol.Shape.Type.attr Owl_graph.node array -> - unit

TODO

\ No newline at end of file diff --git a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Graph/index.html b/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Graph/index.html deleted file mode 100644 index 2c7bcb42a..000000000 --- a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Graph/index.html +++ /dev/null @@ -1,33 +0,0 @@ - -Graph (owl-base.Owl_computation_engine_sig.Flatten_Sig.Graph)

Module Flatten_Sig.Graph

Type definition
type graph

TODO

Core functions
val shape_or_value : Optimiser.Operator.Symbol.Shape.Type.t -> string

TODO

val graph_to_dot : graph -> string

TODO

val graph_to_trace : graph -> string

TODO

val save_graph : 'a -> string -> unit

TODO

val load_graph : string -> 'a * 'b

TODO

val invalidate_rvs : graph -> unit

TODO

val is_iopair_safe : 'a Owl_graph.node -> 'a Owl_graph.node -> bool

TODO

val update_iopair : graph -> unit

TODO

val remove_unused_iopair : - 'a Owl_graph.node array -> - 'b array -> - 'a Owl_graph.node array * 'b array

TODO

val optimise : graph -> unit

TODO

\ No newline at end of file diff --git a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Linalg/index.html b/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Linalg/index.html deleted file mode 100644 index 5222c4915..000000000 --- a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Linalg/index.html +++ /dev/null @@ -1,33 +0,0 @@ - -Linalg (owl-base.Owl_computation_engine_sig.Flatten_Sig.Linalg)

Module Flatten_Sig.Linalg

\ No newline at end of file diff --git a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Mat/index.html b/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Mat/index.html deleted file mode 100644 index 7873b8f88..000000000 --- a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl-base.Owl_computation_engine_sig.Flatten_Sig.Mat)

Module Flatten_Sig.Mat

val eye : int -> Symbol.Shape.Type.arr

TODO

TODO

TODO

TODO

\ No newline at end of file diff --git a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Operator/Linalg/index.html b/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Operator/Linalg/index.html deleted file mode 100644 index a49b782ce..000000000 --- a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Operator/Linalg/index.html +++ /dev/null @@ -1,33 +0,0 @@ - -Linalg (owl-base.Owl_computation_engine_sig.Flatten_Sig.Operator.Linalg)

Module Operator.Linalg

\ No newline at end of file diff --git a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Operator/Mat/index.html b/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Operator/Mat/index.html deleted file mode 100644 index 48de2cf04..000000000 --- a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Operator/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl-base.Owl_computation_engine_sig.Flatten_Sig.Operator.Mat)

Module Operator.Mat

val eye : int -> Symbol.Shape.Type.arr

TODO

TODO

TODO

TODO

\ No newline at end of file diff --git a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Operator/Scalar/index.html b/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Operator/Scalar/index.html deleted file mode 100644 index 411266f18..000000000 --- a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Operator/Scalar/index.html +++ /dev/null @@ -1,20 +0,0 @@ - -Scalar (owl-base.Owl_computation_engine_sig.Flatten_Sig.Operator.Scalar)

Module Operator.Scalar

\ No newline at end of file diff --git a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Operator/Symbol/Shape/Type/Device/A/Linalg/index.html b/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Operator/Symbol/Shape/Type/Device/A/Linalg/index.html deleted file mode 100644 index 0606dd140..000000000 --- a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Operator/Symbol/Shape/Type/Device/A/Linalg/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -Linalg (owl-base.Owl_computation_engine_sig.Flatten_Sig.Operator.Symbol.Shape.Type.Device.A.Linalg)

Module A.Linalg

val inv : arr -> arr
val logdet : arr -> elt
val chol : ?upper:bool -> arr -> arr
val svd : ?thin:bool -> arr -> arr * arr * arr
val qr : arr -> arr * arr
val lq : arr -> arr * arr
val sylvester : arr -> arr -> arr -> arr
val lyapunov : arr -> arr -> arr
val discrete_lyapunov : - ?solver:[ `default | `bilinear | `direct ] -> - arr -> - arr -> - arr
val linsolve : ?trans:bool -> ?typ:[ `n | `u | `l ] -> arr -> arr -> arr
val care : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
val dare : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
\ No newline at end of file diff --git a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Operator/Symbol/Shape/Type/Device/A/Mat/index.html b/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Operator/Symbol/Shape/Type/Device/A/Mat/index.html deleted file mode 100644 index 87161e8e4..000000000 --- a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Operator/Symbol/Shape/Type/Device/A/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl-base.Owl_computation_engine_sig.Flatten_Sig.Operator.Symbol.Shape.Type.Device.A.Mat)

Module A.Mat

val diagm : ?k:int -> arr -> arr
val triu : ?k:int -> arr -> arr
val tril : ?k:int -> arr -> arr
val eye : int -> arr
\ No newline at end of file diff --git a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Operator/Symbol/Shape/Type/Device/A/Scalar/index.html b/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Operator/Symbol/Shape/Type/Device/A/Scalar/index.html deleted file mode 100644 index 0b022d4aa..000000000 --- a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Operator/Symbol/Shape/Type/Device/A/Scalar/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Scalar (owl-base.Owl_computation_engine_sig.Flatten_Sig.Operator.Symbol.Shape.Type.Device.A.Scalar)

Module A.Scalar

val add : elt -> elt -> elt
val sub : elt -> elt -> elt
val mul : elt -> elt -> elt
val div : elt -> elt -> elt
val pow : elt -> elt -> elt
val atan2 : elt -> elt -> elt
val abs : elt -> elt
val neg : elt -> elt
val sqr : elt -> elt
val sqrt : elt -> elt
val exp : elt -> elt
val log : elt -> elt
val log2 : elt -> elt
val log10 : elt -> elt
val signum : elt -> elt
val floor : elt -> elt
val ceil : elt -> elt
val round : elt -> elt
val sin : elt -> elt
val cos : elt -> elt
val tan : elt -> elt
val sinh : elt -> elt
val cosh : elt -> elt
val tanh : elt -> elt
val asin : elt -> elt
val acos : elt -> elt
val atan : elt -> elt
val asinh : elt -> elt
val acosh : elt -> elt
val atanh : elt -> elt
val relu : elt -> elt
val dawsn : elt -> elt
val sigmoid : elt -> elt
\ No newline at end of file diff --git a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Operator/Symbol/Shape/Type/Device/A/index.html b/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Operator/Symbol/Shape/Type/Device/A/index.html deleted file mode 100644 index 45639ad90..000000000 --- a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Operator/Symbol/Shape/Type/Device/A/index.html +++ /dev/null @@ -1,379 +0,0 @@ - -A (owl-base.Owl_computation_engine_sig.Flatten_Sig.Operator.Symbol.Shape.Type.Device.A)

Module Device.A

include Owl_types_ndarray_algodiff.Sig
include Owl_types_ndarray_eltcmp.Sig
include Owl_types_ndarray_basic.Sig
type arr
type elt
val empty : int array -> arr
val zeros : int array -> arr
val ones : int array -> arr
val create : int array -> elt -> arr
val sequential : ?a:elt -> ?step:elt -> int array -> arr
val uniform : ?a:elt -> ?b:elt -> int array -> arr
val gaussian : ?mu:elt -> ?sigma:elt -> int array -> arr
val bernoulli : ?p:elt -> int array -> arr
val init : int array -> (int -> elt) -> arr
val init_nd : int array -> (int array -> elt) -> arr
val shape : arr -> int array
val numel : arr -> int
val get : arr -> int array -> elt
val set : arr -> int array -> elt -> unit
val get_slice : int list list -> arr -> arr
val set_slice : int list list -> arr -> arr -> unit
val get_fancy : Owl_types_common.index list -> arr -> arr
val set_fancy : Owl_types_common.index list -> arr -> arr -> unit
val copy : arr -> arr
val copy_ : out:arr -> arr -> unit
val reset : arr -> unit
val reshape : arr -> int array -> arr
val reverse : arr -> arr
val tile : arr -> int array -> arr
val repeat : arr -> int array -> arr
val concatenate : ?axis:int -> arr array -> arr
val stack : ?axis:int -> arr array -> arr
val split : ?axis:int -> int array -> arr -> arr array
val expand : ?hi:bool -> arr -> int -> arr
val squeeze : ?axis:int array -> arr -> arr
val draw : ?axis:int -> arr -> int -> arr * int array
val map : (elt -> elt) -> arr -> arr
val fold : ?axis:int -> (elt -> elt -> elt) -> elt -> arr -> arr
val scan : ?axis:int -> (elt -> elt -> elt) -> arr -> arr
val one_hot : int -> arr -> arr
val pad : ?v:elt -> int list list -> arr -> arr
val print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(elt -> string) -> - arr -> - unit
val abs : arr -> arr
val neg : arr -> arr
val floor : arr -> arr
val ceil : arr -> arr
val round : arr -> arr
val sqr : arr -> arr
val sqrt : arr -> arr
val log : arr -> arr
val log2 : arr -> arr
val log10 : arr -> arr
val exp : arr -> arr
val sin : arr -> arr
val cos : arr -> arr
val tan : arr -> arr
val sinh : arr -> arr
val cosh : arr -> arr
val tanh : arr -> arr
val asin : arr -> arr
val acos : arr -> arr
val atan : arr -> arr
val asinh : arr -> arr
val acosh : arr -> arr
val atanh : arr -> arr
val sum_reduce : ?axis:int array -> arr -> arr
val signum : arr -> arr
val sigmoid : arr -> arr
val relu : arr -> arr
val dawsn : arr -> arr
val min' : arr -> elt
val max' : arr -> elt
val sum' : arr -> elt
val log_sum_exp' : arr -> elt
val log_sum_exp : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm' : arr -> elt
val l2norm' : arr -> elt
val l2norm_sqr' : arr -> elt
val clip_by_value : ?amin:elt -> ?amax:elt -> arr -> arr
val clip_by_l2norm : elt -> arr -> arr
val pow : arr -> arr -> arr
val scalar_pow : elt -> arr -> arr
val pow_scalar : arr -> elt -> arr
val atan2 : arr -> arr -> arr
val scalar_atan2 : elt -> arr -> arr
val atan2_scalar : arr -> elt -> arr
val add : arr -> arr -> arr
val sub : arr -> arr -> arr
val mul : arr -> arr -> arr
val div : arr -> arr -> arr
val add_scalar : arr -> elt -> arr
val sub_scalar : arr -> elt -> arr
val mul_scalar : arr -> elt -> arr
val div_scalar : arr -> elt -> arr
val scalar_add : elt -> arr -> arr
val scalar_sub : elt -> arr -> arr
val scalar_mul : elt -> arr -> arr
val scalar_div : elt -> arr -> arr
val fma : arr -> arr -> arr -> arr
val conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val dilated_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val transpose_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val max_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val upsampling2d : arr -> int array -> arr
val conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val dilated_conv1d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv1d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val transpose_conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val max_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val upsampling2d_backward : arr -> int array -> arr -> arr
val row_num : arr -> int
val col_num : arr -> int
val row : arr -> int -> arr
val rows : arr -> int array -> arr
val copy_row_to : arr -> arr -> int -> unit
val copy_col_to : arr -> arr -> int -> unit
val dot : arr -> arr -> arr
val diag : ?k:int -> arr -> arr
val trace : arr -> elt
val transpose : ?axis:int array -> arr -> arr
val to_rows : arr -> arr array
val of_rows : arr array -> arr
val to_cols : arr -> arr array
val of_cols : arr array -> arr
val of_array : elt array -> int array -> arr
val of_arrays : elt array array -> arr
val float_to_elt : float -> elt
val elt_to_float : elt -> float
val elt_equal : arr -> arr -> arr
val elt_not_equal : arr -> arr -> arr
val elt_less : arr -> arr -> arr
val elt_greater : arr -> arr -> arr
val elt_less_equal : arr -> arr -> arr
val elt_greater_equal : arr -> arr -> arr
val elt_equal_scalar : arr -> elt -> arr
val elt_not_equal_scalar : arr -> elt -> arr
val elt_less_scalar : arr -> elt -> arr
val elt_greater_scalar : arr -> elt -> arr
val elt_less_equal_scalar : arr -> elt -> arr
val elt_greater_equal_scalar : arr -> elt -> arr
module Scalar : sig ... end
module Mat : sig ... end
module Linalg : sig ... end
val create_ : out:arr -> elt -> unit
val uniform_ : ?a:elt -> ?b:elt -> out:arr -> unit
val gaussian_ : ?mu:elt -> ?sigma:elt -> out:arr -> unit
val sequential_ : ?a:elt -> ?step:elt -> out:arr -> unit
val bernoulli_ : ?p:elt -> out:arr -> unit
val zeros_ : out:arr -> unit
val ones_ : out:arr -> unit
val one_hot_ : out:arr -> int -> arr -> unit
val get_slice_ : out:arr -> int list list -> arr -> unit
val set_slice_ : out:arr -> int list list -> arr -> arr -> unit
val sub_left : arr -> int -> int -> arr
val reshape_ : out:arr -> arr -> unit
val reverse_ : out:arr -> arr -> unit
val transpose_ : out:arr -> ?axis:int array -> arr -> unit
val repeat_ : out:arr -> arr -> int array -> unit
val tile_ : out:arr -> arr -> int array -> unit
val pad_ : out:arr -> ?v:elt -> int list list -> arr -> unit
val hypot : arr -> arr -> arr
val fmod : arr -> arr -> arr
val min2 : arr -> arr -> arr
val max2 : arr -> arr -> arr
val add_ : ?out:arr -> arr -> arr -> unit
val sub_ : ?out:arr -> arr -> arr -> unit
val mul_ : ?out:arr -> arr -> arr -> unit
val div_ : ?out:arr -> arr -> arr -> unit
val pow_ : ?out:arr -> arr -> arr -> unit
val atan2_ : ?out:arr -> arr -> arr -> unit
val hypot_ : ?out:arr -> arr -> arr -> unit
val fmod_ : ?out:arr -> arr -> arr -> unit
val min2_ : ?out:arr -> arr -> arr -> unit
val max2_ : ?out:arr -> arr -> arr -> unit
val add_scalar_ : ?out:arr -> arr -> elt -> unit
val sub_scalar_ : ?out:arr -> arr -> elt -> unit
val mul_scalar_ : ?out:arr -> arr -> elt -> unit
val div_scalar_ : ?out:arr -> arr -> elt -> unit
val pow_scalar_ : ?out:arr -> arr -> elt -> unit
val atan2_scalar_ : ?out:arr -> arr -> elt -> unit
val fmod_scalar_ : ?out:arr -> arr -> elt -> unit
val scalar_add_ : ?out:arr -> elt -> arr -> unit
val scalar_sub_ : ?out:arr -> elt -> arr -> unit
val scalar_mul_ : ?out:arr -> elt -> arr -> unit
val scalar_div_ : ?out:arr -> elt -> arr -> unit
val scalar_pow_ : ?out:arr -> elt -> arr -> unit
val scalar_atan2_ : ?out:arr -> elt -> arr -> unit
val scalar_fmod_ : ?out:arr -> elt -> arr -> unit
val fma_ : ?out:arr -> arr -> arr -> arr -> unit
val clip_by_value_ : ?out:arr -> ?amin:elt -> ?amax:elt -> arr -> unit
val clip_by_l2norm_ : ?out:arr -> elt -> arr -> unit
val dot_ : - ?transa:bool -> - ?transb:bool -> - ?alpha:elt -> - ?beta:elt -> - c:arr -> - arr -> - arr -> - unit
val abs_ : ?out:arr -> arr -> unit
val neg_ : ?out:arr -> arr -> unit
val conj_ : ?out:arr -> arr -> unit
val reci_ : ?out:arr -> arr -> unit
val signum_ : ?out:arr -> arr -> unit
val sqr_ : ?out:arr -> arr -> unit
val sqrt_ : ?out:arr -> arr -> unit
val cbrt_ : ?out:arr -> arr -> unit
val exp_ : ?out:arr -> arr -> unit
val exp2_ : ?out:arr -> arr -> unit
val exp10_ : ?out:arr -> arr -> unit
val expm1_ : ?out:arr -> arr -> unit
val log_ : ?out:arr -> arr -> unit
val log2_ : ?out:arr -> arr -> unit
val log10_ : ?out:arr -> arr -> unit
val log1p_ : ?out:arr -> arr -> unit
val sin_ : ?out:arr -> arr -> unit
val cos_ : ?out:arr -> arr -> unit
val tan_ : ?out:arr -> arr -> unit
val asin_ : ?out:arr -> arr -> unit
val acos_ : ?out:arr -> arr -> unit
val atan_ : ?out:arr -> arr -> unit
val sinh_ : ?out:arr -> arr -> unit
val cosh_ : ?out:arr -> arr -> unit
val tanh_ : ?out:arr -> arr -> unit
val asinh_ : ?out:arr -> arr -> unit
val acosh_ : ?out:arr -> arr -> unit
val atanh_ : ?out:arr -> arr -> unit
val floor_ : ?out:arr -> arr -> unit
val ceil_ : ?out:arr -> arr -> unit
val round_ : ?out:arr -> arr -> unit
val trunc_ : ?out:arr -> arr -> unit
val fix_ : ?out:arr -> arr -> unit
val erf_ : ?out:arr -> arr -> unit
val erfc_ : ?out:arr -> arr -> unit
val relu_ : ?out:arr -> arr -> unit
val softplus_ : ?out:arr -> arr -> unit
val softsign_ : ?out:arr -> arr -> unit
val softmax_ : ?out:arr -> ?axis:int -> arr -> unit
val sigmoid_ : ?out:arr -> arr -> unit
val sum_ : out:arr -> axis:int -> arr -> unit
val min_ : out:arr -> axis:int -> arr -> unit
val max_ : out:arr -> axis:int -> arr -> unit
val sum : ?axis:int -> ?keep_dims:bool -> arr -> arr
val prod : ?axis:int -> ?keep_dims:bool -> arr -> arr
val min : ?axis:int -> ?keep_dims:bool -> arr -> arr
val max : ?axis:int -> ?keep_dims:bool -> arr -> arr
val mean : ?axis:int -> ?keep_dims:bool -> arr -> arr
val var : ?axis:int -> ?keep_dims:bool -> arr -> arr
val std : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l2norm : ?axis:int -> ?keep_dims:bool -> arr -> arr
val cumsum_ : ?out:arr -> ?axis:int -> arr -> unit
val cumprod_ : ?out:arr -> ?axis:int -> arr -> unit
val cummin_ : ?out:arr -> ?axis:int -> arr -> unit
val cummax_ : ?out:arr -> ?axis:int -> arr -> unit
val dropout_ : ?out:arr -> ?rate:float -> arr -> unit
val prod' : arr -> elt
val mean' : arr -> elt
val var' : arr -> elt
val std' : arr -> elt
val elt_equal_ : ?out:arr -> arr -> arr -> unit
val elt_not_equal_ : ?out:arr -> arr -> arr -> unit
val elt_less_ : ?out:arr -> arr -> arr -> unit
val elt_greater_ : ?out:arr -> arr -> arr -> unit
val elt_less_equal_ : ?out:arr -> arr -> arr -> unit
val elt_greater_equal_ : ?out:arr -> arr -> arr -> unit
val elt_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_not_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_less_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_greater_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_less_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_greater_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val conv1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val conv2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val conv3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val dilated_conv1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val dilated_conv2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val dilated_conv3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val transpose_conv1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val transpose_conv2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val transpose_conv3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val max_pool1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val max_pool2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val max_pool3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val upsampling2d_ : out:arr -> arr -> int array -> unit
val conv1d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv1d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv2d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv2d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv3d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv3d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val dilated_conv1d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv1d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv2d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv2d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv3d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv3d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val transpose_conv1d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv1d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv2d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv2d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv3d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv3d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val max_pool1d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val max_pool2d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val max_pool3d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool1d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool2d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool3d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val upsampling2d_backward_ : out:arr -> arr -> int array -> arr -> unit
val fused_adagrad_ : ?out:arr -> rate:float -> eps:float -> arr -> unit
\ No newline at end of file diff --git a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Operator/Symbol/Shape/Type/Device/index.html b/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Operator/Symbol/Shape/Type/Device/index.html deleted file mode 100644 index 6f75b920e..000000000 --- a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Operator/Symbol/Shape/Type/Device/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Device (owl-base.Owl_computation_engine_sig.Flatten_Sig.Operator.Symbol.Shape.Type.Device)

Module Type.Device

Type definition
type device

TODO

type value

TODO

Core functions
val make_device : unit -> device

TODO

val arr_to_value : A.arr -> value

TODO

val value_to_arr : value -> A.arr

TODO

val elt_to_value : A.elt -> value

TODO

val value_to_elt : value -> A.elt

TODO

val value_to_float : value -> float

TODO

val is_arr : value -> bool

TODO

val is_elt : value -> bool

TODO

\ No newline at end of file diff --git a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Operator/Symbol/Shape/Type/index.html b/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Operator/Symbol/Shape/Type/index.html deleted file mode 100644 index e6ebf0c18..000000000 --- a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Operator/Symbol/Shape/Type/index.html +++ /dev/null @@ -1,5 +0,0 @@ - -Type (owl-base.Owl_computation_engine_sig.Flatten_Sig.Operator.Symbol.Shape.Type)

Module Shape.Type

Type definition
type state =
  1. | Valid
  2. | Invalid
    (*

    TODO

    *)

TODO

and block = {
  1. size : int;
  2. block_id : int;
  3. mutable active : t option;
  4. mutable memory : Device.value;
  5. mutable nodes : t list;
}

block type keeps a reference to a block of memory and to the nodes sharing that block.

and attr = {
  1. mutable op : op;
  2. mutable freeze : bool;
  3. mutable reuse : bool;
  4. mutable state : state;
  5. mutable shape : int array option array;
  6. mutable value : Device.value array;
  7. mutable block : block array option;
}

TODO

and arr =
  1. | Arr of t
and elt =
  1. | Elt of t
and op =
  1. | Noop
  2. | Var
  3. | Const
  4. | Empty of int array
  5. | Zeros of int array
  6. | Ones of int array
  7. | Create of int array
  8. | Sequential of int array
  9. | Uniform of int array
  10. | Gaussian of int array
  11. | Bernoulli of int array
  12. | Init of int array * int -> elt
  13. | Get of int array
  14. | Set of int array
  15. | GetSlice of int list list
  16. | SetSlice of int list list
  17. | GetFancy of Owl_types_common.index list
  18. | SetFancy of Owl_types_common.index list
  19. | Copy
  20. | Reset
  21. | Reshape of int array
  22. | Reverse
  23. | Tile of int array
  24. | Repeat of int array
  25. | Pad of elt * int list list
  26. | Concatenate of int
  27. | Stack of int
  28. | Split of int * int array
  29. | Draw of int * int
  30. | Map of elt -> elt
  31. | Fold of int * elt -> elt -> elt
  32. | Scan of int * elt -> elt -> elt
  33. | OneHot of int
  34. | OfArray of int array
  35. | Delay of Device.A.arr -> Device.A.arr
  36. | DelayArray of int array * Device.A.arr array -> Device.A.arr
  37. | LazyPrint of int option - * int option - * bool option - * (Device.A.elt -> string) option
  38. | Abs
  39. | Neg
  40. | Floor
  41. | Ceil
  42. | Round
  43. | Sqr
  44. | Sqrt
  45. | Log
  46. | Log2
  47. | Log10
  48. | Exp
  49. | Sin
  50. | Cos
  51. | Tan
  52. | Sinh
  53. | Cosh
  54. | Tanh
  55. | Asin
  56. | Acos
  57. | Atan
  58. | Asinh
  59. | Acosh
  60. | Atanh
  61. | Min of bool * int
  62. | Max of bool * int
  63. | Sum of bool * int
  64. | SumReduce of int array
  65. | Signum
  66. | Sigmoid
  67. | Relu
  68. | Dawsn
  69. | Min'
  70. | Max'
  71. | Sum'
  72. | LogSumExp'
  73. | LogSumExp of bool * int
  74. | L1norm'
  75. | L2norm'
  76. | L2NormSqr'
  77. | ClipByValue
  78. | ClipByL2norm
  79. | Pow
  80. | ScalarPow
  81. | PowScalar
  82. | Atan2
  83. | ScalarAtan2
  84. | Atan2Scalar
  85. | Hypot
  86. | Min2
  87. | Max2
  88. | Add
  89. | Sub
  90. | Mul
  91. | Div
  92. | AddScalar
  93. | SubScalar
  94. | MulScalar
  95. | DivScalar
  96. | ScalarAdd
  97. | ScalarSub
  98. | ScalarMul
  99. | ScalarDiv
  100. | FMA
  101. | EltEqual
  102. | EltNotEqual
  103. | EltLess
  104. | EltGreater
  105. | EltLessEqual
  106. | EltGreaterEqual
  107. | EltEqualScalar
  108. | EltNotEqualScalar
  109. | EltLessScalar
  110. | EltGreaterScalar
  111. | EltLessEqualScalar
  112. | EltGreaterEqualScalar
  113. | Conv1d of Owl_types_common.padding * int array
  114. | Conv2d of Owl_types_common.padding * int array
  115. | Conv3d of Owl_types_common.padding * int array
  116. | TransposeConv1d of Owl_types_common.padding * int array
  117. | TransposeConv2d of Owl_types_common.padding * int array
  118. | TransposeConv3d of Owl_types_common.padding * int array
  119. | DilatedConv1d of Owl_types_common.padding * int array * int array
  120. | DilatedConv2d of Owl_types_common.padding * int array * int array
  121. | DilatedConv3d of Owl_types_common.padding * int array * int array
  122. | MaxPool1d of Owl_types_common.padding * int array * int array
  123. | MaxPool2d of Owl_types_common.padding * int array * int array
  124. | MaxPool3d of Owl_types_common.padding * int array * int array
  125. | AvgPool1d of Owl_types_common.padding * int array * int array
  126. | AvgPool2d of Owl_types_common.padding * int array * int array
  127. | AvgPool3d of Owl_types_common.padding * int array * int array
  128. | UpSampling2d of int array
  129. | Conv1dBackwardInput of int array
  130. | Conv1dBackwardKernel of int array
  131. | Conv2dBackwardInput of int array
  132. | Conv2dBackwardKernel of int array
  133. | Conv3dBackwardInput of int array
  134. | Conv3dBackwardKernel of int array
  135. | TransposeConv1dBackwardInput of int array
  136. | TransposeConv1dBackwardKernel of int array
  137. | TransposeConv2dBackwardInput of int array
  138. | TransposeConv2dBackwardKernel of int array
  139. | TransposeConv3dBackwardInput of int array
  140. | TransposeConv3dBackwardKernel of int array
  141. | DilatedConv1dBackwardInput of int array * int array
  142. | DilatedConv1dBackwardKernel of int array * int array
  143. | DilatedConv2dBackwardInput of int array * int array
  144. | DilatedConv2dBackwardKernel of int array * int array
  145. | DilatedConv3dBackwardInput of int array * int array
  146. | DilatedConv3dBackwardKernel of int array * int array
  147. | MaxPool1dBackward of Owl_types_common.padding * int array * int array
  148. | MaxPool2dBackward of Owl_types_common.padding * int array * int array
  149. | MaxPool3dBackward of Owl_types_common.padding * int array * int array
  150. | AvgPool1dBackward of Owl_types_common.padding * int array * int array
  151. | AvgPool2dBackward of Owl_types_common.padding * int array * int array
  152. | AvgPool3dBackward of Owl_types_common.padding * int array * int array
  153. | UpSampling2dBackward of int array
  154. | RowNum
  155. | ColNum
  156. | Row
  157. | Rows of int array
  158. | CopyRowTo
  159. | CopyColTo
  160. | Dot of bool * bool * elt * elt
  161. | Inv
  162. | Trace
  163. | Transpose of int array
  164. | ToRows
  165. | OfRows
  166. | Scalar_Add
  167. | Scalar_Sub
  168. | Scalar_Mul
  169. | Scalar_Div
  170. | Scalar_Pow
  171. | Scalar_Atan2
  172. | Scalar_Abs
  173. | Scalar_Neg
  174. | Scalar_Sqr
  175. | Scalar_Sqrt
  176. | Scalar_Exp
  177. | Scalar_Log
  178. | Scalar_Log2
  179. | Scalar_Log10
  180. | Scalar_Signum
  181. | Scalar_Floor
  182. | Scalar_Ceil
  183. | Scalar_Round
  184. | Scalar_Sin
  185. | Scalar_Cos
  186. | Scalar_Tan
  187. | Scalar_Sinh
  188. | Scalar_Cosh
  189. | Scalar_Tanh
  190. | Scalar_Asin
  191. | Scalar_Acos
  192. | Scalar_Atan
  193. | Scalar_Asinh
  194. | Scalar_Acosh
  195. | Scalar_Atanh
  196. | Scalar_Relu
  197. | Scalar_Dawsn
  198. | Scalar_Sigmoid
  199. | Fused_Adagrad of float * float
    (*

    TODO

    *)
\ No newline at end of file diff --git a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Operator/Symbol/Shape/index.html b/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Operator/Symbol/Shape/index.html deleted file mode 100644 index 6895163cb..000000000 --- a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Operator/Symbol/Shape/index.html +++ /dev/null @@ -1,5 +0,0 @@ - -Shape (owl-base.Owl_computation_engine_sig.Flatten_Sig.Operator.Symbol.Shape)

Module Symbol.Shape

Core functions
val infer_shape : - Type.op -> - Type.attr Owl_graph.node array -> - int array option array

TODO

\ No newline at end of file diff --git a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Operator/Symbol/index.html b/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Operator/Symbol/index.html deleted file mode 100644 index 5798d06e7..000000000 --- a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Operator/Symbol/index.html +++ /dev/null @@ -1,28 +0,0 @@ - -Symbol (owl-base.Owl_computation_engine_sig.Flatten_Sig.Operator.Symbol)

Module Operator.Symbol

Core functions
val op_to_str : Shape.Type.op -> string

TODO

val is_random_variable : Shape.Type.op -> bool

TODO

val refnum : 'a Owl_graph.node -> int

TODO

val node_shape : Shape.Type.attr Owl_graph.node -> int array

TODO

val node_numel : Shape.Type.attr Owl_graph.node -> int

TODO

val is_shape_unknown : Shape.Type.attr Owl_graph.node -> bool

TODO

val infer_shape_graph : Shape.Type.attr Owl_graph.node array -> unit

TODO

val shape_to_str : int array option array -> string

TODO

val node_to_str : Shape.Type.attr Owl_graph.node -> string

TODO

val node_to_arr : Shape.Type.t -> Shape.Type.arr

TODO

val arr_to_node : Shape.Type.arr -> Shape.Type.t

TODO

val node_to_elt : Shape.Type.t -> Shape.Type.elt

TODO

val elt_to_node : Shape.Type.elt -> Shape.Type.t

TODO

val make_node : - ?name:string -> - ?value:Shape.Type.Device.value array -> - ?shape:int array option array -> - ?freeze:bool -> - ?reuse:bool -> - ?state:Shape.Type.state -> - Shape.Type.op -> - Shape.Type.attr Owl_graph.node

TODO

val make_then_connect : - ?shape:int array option array -> - Shape.Type.op -> - Shape.Type.attr Owl_graph.node array -> - Shape.Type.attr Owl_graph.node

TODO

val var_arr : ?shape:int array -> string -> Shape.Type.arr

TODO

val var_elt : string -> Shape.Type.elt

TODO

val const_arr : string -> Shape.Type.Device.A.arr -> Shape.Type.arr

TODO

val const_elt : string -> Shape.Type.Device.A.elt -> Shape.Type.elt

TODO

val new_block_id : unit -> int

new_block_id () returns an unused block id.

val make_empty_block : ?block_id:int -> int -> Shape.Type.block

make_empty_block s returns an empty block of memory of size s.

val make_value_block : - Shape.Type.Device.value -> - Shape.Type.attr Owl_graph.node -> - unit

make_value_block value node creates a block of memory initialised with value and links the new block to node.

get_block node returns the memory block allocated to node. If no block is allocated, throws an exception.

val add_node_to_block : - Shape.Type.attr Owl_graph.node -> - Shape.Type.block -> - unit

Link a node to a reusable block and initialises its memory on the memory of the block.

val get_active_node : Shape.Type.block -> Shape.Type.attr Owl_graph.node option

Return the node that is currently using the memory of the block.

val set_active_node : - Shape.Type.block -> - Shape.Type.attr Owl_graph.node -> - unit

Update the node that is currently using the block of memory.

val get_block_id : Shape.Type.attr Owl_graph.node -> int

get_block_id node returns the id of the block assigned to node. If node has not been assigned yet, returns -1.

val set_value : - Shape.Type.attr Owl_graph.node -> - Shape.Type.Device.value array -> - unit

TODO

val set_operator : Shape.Type.attr Owl_graph.node -> Shape.Type.op -> unit

TODO

TODO

val set_reuse : Shape.Type.attr Owl_graph.node -> bool -> unit

TODO

val get_reuse : Shape.Type.attr Owl_graph.node -> bool

TODO

val is_shared : Shape.Type.attr Owl_graph.node -> bool

TODO

val get_shared_nodes : - Shape.Type.attr Owl_graph.node -> - Shape.Type.attr Owl_graph.node array

get_shared_nodes node returns the nodes sharing the same block of memory as node.

val is_var : Shape.Type.attr Owl_graph.node -> bool

TODO

val is_const : Shape.Type.attr Owl_graph.node -> bool

TODO

val is_node_arr : Shape.Type.attr Owl_graph.node -> bool

TODO

val is_node_elt : Shape.Type.attr Owl_graph.node -> bool

TODO

val is_assigned : Shape.Type.attr Owl_graph.node -> bool

is_assigned node checks if a block of memory has been assigned to node.

val check_assigned : Shape.Type.attr Owl_graph.node -> unit

check_assigned node throws an exception if node has not been assigned to a block.

val is_valid : Shape.Type.attr Owl_graph.node -> bool

TODO

val validate : Shape.Type.attr Owl_graph.node -> unit

TODO

val invalidate : Shape.Type.attr Owl_graph.node -> unit

TODO

val invalidate_graph : Shape.Type.attr Owl_graph.node -> unit

TODO

val is_freeze : Shape.Type.attr Owl_graph.node -> bool

TODO

val freeze : Shape.Type.attr Owl_graph.node -> unit

TODO

val freeze_descendants : Shape.Type.attr Owl_graph.node array -> unit

TODO

val freeze_ancestors : Shape.Type.attr Owl_graph.node array -> unit

TODO

TODO

TODO

val unsafe_assign_arr : Shape.Type.arr -> Shape.Type.Device.A.arr -> unit

TODO

val assign_arr : Shape.Type.arr -> Shape.Type.Device.A.arr -> unit

TODO

val assign_elt : Shape.Type.elt -> Shape.Type.Device.A.elt -> unit

TODO

val float_to_elt : float -> Shape.Type.elt

TODO

val elt_to_float : Shape.Type.elt -> float

TODO

\ No newline at end of file diff --git a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Operator/index.html b/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Operator/index.html deleted file mode 100644 index 1a2653ad8..000000000 --- a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Operator/index.html +++ /dev/null @@ -1,420 +0,0 @@ - -Operator (owl-base.Owl_computation_engine_sig.Flatten_Sig.Operator)

Module Flatten_Sig.Operator

Vectorised functions
val empty : int array -> Symbol.Shape.Type.arr

TODO

val zeros : int array -> Symbol.Shape.Type.arr

TODO

val ones : int array -> Symbol.Shape.Type.arr

TODO

val create : int array -> Symbol.Shape.Type.elt -> Symbol.Shape.Type.arr

TODO

val sequential : - ?a:Symbol.Shape.Type.elt -> - ?step:Symbol.Shape.Type.elt -> - int array -> - Symbol.Shape.Type.arr

TODO

val uniform : - ?a:Symbol.Shape.Type.elt -> - ?b:Symbol.Shape.Type.elt -> - int array -> - Symbol.Shape.Type.arr

TODO

val gaussian : - ?mu:Symbol.Shape.Type.elt -> - ?sigma:Symbol.Shape.Type.elt -> - int array -> - Symbol.Shape.Type.arr

TODO

val bernoulli : ?p:Symbol.Shape.Type.elt -> int array -> Symbol.Shape.Type.arr

TODO

val init : int array -> (int -> Symbol.Shape.Type.elt) -> Symbol.Shape.Type.arr

TODO

val init_nd : - int array -> - (int array -> Symbol.Shape.Type.elt) -> - Symbol.Shape.Type.arr

TODO

val shape : Symbol.Shape.Type.arr -> int array

TODO

val numel : Symbol.Shape.Type.arr -> int

TODO

TODO

val set : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.elt -> unit

TODO

val get_slice : int list list -> Symbol.Shape.Type.arr -> Symbol.Shape.Type.arr

TODO

val set_slice : - int list list -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - unit

TODO

TODO

val set_fancy : - Owl_types.index list -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - unit

TODO

val copy_ : out:'a -> 'b -> 'c

TODO

val reset : Symbol.Shape.Type.arr -> unit

TODO

val reshape : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.arr

TODO

val tile : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.arr

TODO

val repeat : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.arr

TODO

val pad : - ?v:Symbol.Shape.Type.elt -> - int list list -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val expand : ?hi:bool -> Symbol.Shape.Type.arr -> int -> Symbol.Shape.Type.arr

TODO

val squeeze : ?axis:int array -> Symbol.Shape.Type.arr -> Symbol.Shape.Type.arr

TODO

val concatenate : - ?axis:int -> - Symbol.Shape.Type.arr array -> - Symbol.Shape.Type.arr

TODO

val stack : ?axis:int -> Symbol.Shape.Type.arr array -> Symbol.Shape.Type.arr

TODO

TODO

val split : ?axis:int -> 'a -> 'b -> 'c

TODO

val draw : - ?axis:int -> - Symbol.Shape.Type.arr -> - int -> - Symbol.Shape.Type.arr * 'a array

TODO

TODO

delay f x returns f x. It allows to use a function that is not tracked by the computation graph and delay its evaluation. The output must have the same shape as the input.

delay_array out_shape f x works in the same way as delay but is applied on an array of ndarrays. Needs the shape of the output as an argument.

val lazy_print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(Symbol.Shape.Type.Device.A.elt -> string) -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

lazy_print x prints the output of x when it is evaluated. Is implemented as an identity node. For information about the optional parameters, refer to the print function of the Ndarray module.

val print : ?max_row:'a -> ?max_col:'b -> ?header:'c -> ?fmt:'d -> 'e -> unit

TODO

val min : - ?axis:int -> - ?keep_dims:bool -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val max : - ?axis:int -> - ?keep_dims:bool -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val sum : - ?axis:int -> - ?keep_dims:bool -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val sum_reduce : - ?axis:int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val log_sum_exp : - ?axis:int -> - ?keep_dims:bool -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

TODO

TODO

TODO

TODO

val elt_less_equal_scalar : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.elt -> - Symbol.Shape.Type.arr

TODO

val elt_greater_equal_scalar : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.elt -> - Symbol.Shape.Type.arr

TODO

val conv1d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr

TODO

val conv2d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr

TODO

val conv3d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr

TODO

val transpose_conv1d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr

TODO

val transpose_conv2d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr

TODO

val transpose_conv3d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr

TODO

val dilated_conv1d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val dilated_conv2d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val dilated_conv3d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val max_pool1d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val max_pool2d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val max_pool3d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val avg_pool1d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val avg_pool2d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val avg_pool3d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val upsampling2d : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.arr

TODO

val conv1d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val conv1d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val conv2d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val conv2d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val conv3d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val conv3d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val transpose_conv1d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val transpose_conv1d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val transpose_conv2d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val transpose_conv2d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val transpose_conv3d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val transpose_conv3d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val dilated_conv1d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val dilated_conv1d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val dilated_conv2d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val dilated_conv2d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val dilated_conv3d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val dilated_conv3d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val max_pool1d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val max_pool2d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val max_pool3d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val avg_pool1d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val avg_pool2d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val avg_pool3d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val upsampling2d_backward : - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val row_num : Symbol.Shape.Type.arr -> int

TODO

val col_num : Symbol.Shape.Type.arr -> int

TODO

val rows : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.arr

TODO

val copy_row_to : Symbol.Shape.Type.arr -> 'a -> 'b -> unit

TODO

val copy_col_to : Symbol.Shape.Type.arr -> 'a -> 'b -> unit

TODO

TODO

val transpose : - ?axis:int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val to_rows : Symbol.Shape.Type.arr -> 'a array

TODO

TODO

val to_cols : Symbol.Shape.Type.arr -> 'a array

TODO

TODO

val of_array : - Symbol.Shape.Type.elt array -> - int array -> - Symbol.Shape.Type.arr

TODO

val of_arrays : Symbol.Shape.Type.elt array array -> Symbol.Shape.Type.arr

TODO

val to_arrays : Symbol.Shape.Type.arr -> Symbol.Shape.Type.elt array array

TODO

Scalar functions
module Scalar : sig ... end
module Mat : sig ... end
module Linalg : sig ... end
\ No newline at end of file diff --git a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Optimiser/Operator/Linalg/index.html b/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Optimiser/Operator/Linalg/index.html deleted file mode 100644 index 7bb150a82..000000000 --- a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Optimiser/Operator/Linalg/index.html +++ /dev/null @@ -1,33 +0,0 @@ - -Linalg (owl-base.Owl_computation_engine_sig.Flatten_Sig.Optimiser.Operator.Linalg)

Module Operator.Linalg

\ No newline at end of file diff --git a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Optimiser/Operator/Mat/index.html b/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Optimiser/Operator/Mat/index.html deleted file mode 100644 index 7f4dd56e0..000000000 --- a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Optimiser/Operator/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl-base.Owl_computation_engine_sig.Flatten_Sig.Optimiser.Operator.Mat)

Module Operator.Mat

val eye : int -> Symbol.Shape.Type.arr

TODO

TODO

TODO

TODO

\ No newline at end of file diff --git a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Optimiser/Operator/Scalar/index.html b/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Optimiser/Operator/Scalar/index.html deleted file mode 100644 index 17d9e77a2..000000000 --- a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Optimiser/Operator/Scalar/index.html +++ /dev/null @@ -1,20 +0,0 @@ - -Scalar (owl-base.Owl_computation_engine_sig.Flatten_Sig.Optimiser.Operator.Scalar)

Module Operator.Scalar

\ No newline at end of file diff --git a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Optimiser/Operator/Symbol/Shape/Type/Device/A/Linalg/index.html b/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Optimiser/Operator/Symbol/Shape/Type/Device/A/Linalg/index.html deleted file mode 100644 index bfce37c0c..000000000 --- a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Optimiser/Operator/Symbol/Shape/Type/Device/A/Linalg/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -Linalg (owl-base.Owl_computation_engine_sig.Flatten_Sig.Optimiser.Operator.Symbol.Shape.Type.Device.A.Linalg)

Module A.Linalg

val inv : arr -> arr
val logdet : arr -> elt
val chol : ?upper:bool -> arr -> arr
val svd : ?thin:bool -> arr -> arr * arr * arr
val qr : arr -> arr * arr
val lq : arr -> arr * arr
val sylvester : arr -> arr -> arr -> arr
val lyapunov : arr -> arr -> arr
val discrete_lyapunov : - ?solver:[ `default | `bilinear | `direct ] -> - arr -> - arr -> - arr
val linsolve : ?trans:bool -> ?typ:[ `n | `u | `l ] -> arr -> arr -> arr
val care : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
val dare : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
\ No newline at end of file diff --git a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Optimiser/Operator/Symbol/Shape/Type/Device/A/Mat/index.html b/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Optimiser/Operator/Symbol/Shape/Type/Device/A/Mat/index.html deleted file mode 100644 index 6ad560d9c..000000000 --- a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Optimiser/Operator/Symbol/Shape/Type/Device/A/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl-base.Owl_computation_engine_sig.Flatten_Sig.Optimiser.Operator.Symbol.Shape.Type.Device.A.Mat)

Module A.Mat

val diagm : ?k:int -> arr -> arr
val triu : ?k:int -> arr -> arr
val tril : ?k:int -> arr -> arr
val eye : int -> arr
\ No newline at end of file diff --git a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Optimiser/Operator/Symbol/Shape/Type/Device/A/Scalar/index.html b/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Optimiser/Operator/Symbol/Shape/Type/Device/A/Scalar/index.html deleted file mode 100644 index 286a06013..000000000 --- a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Optimiser/Operator/Symbol/Shape/Type/Device/A/Scalar/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Scalar (owl-base.Owl_computation_engine_sig.Flatten_Sig.Optimiser.Operator.Symbol.Shape.Type.Device.A.Scalar)

Module A.Scalar

val add : elt -> elt -> elt
val sub : elt -> elt -> elt
val mul : elt -> elt -> elt
val div : elt -> elt -> elt
val pow : elt -> elt -> elt
val atan2 : elt -> elt -> elt
val abs : elt -> elt
val neg : elt -> elt
val sqr : elt -> elt
val sqrt : elt -> elt
val exp : elt -> elt
val log : elt -> elt
val log2 : elt -> elt
val log10 : elt -> elt
val signum : elt -> elt
val floor : elt -> elt
val ceil : elt -> elt
val round : elt -> elt
val sin : elt -> elt
val cos : elt -> elt
val tan : elt -> elt
val sinh : elt -> elt
val cosh : elt -> elt
val tanh : elt -> elt
val asin : elt -> elt
val acos : elt -> elt
val atan : elt -> elt
val asinh : elt -> elt
val acosh : elt -> elt
val atanh : elt -> elt
val relu : elt -> elt
val dawsn : elt -> elt
val sigmoid : elt -> elt
\ No newline at end of file diff --git a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Optimiser/Operator/Symbol/Shape/Type/Device/A/index.html b/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Optimiser/Operator/Symbol/Shape/Type/Device/A/index.html deleted file mode 100644 index 767d6e342..000000000 --- a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Optimiser/Operator/Symbol/Shape/Type/Device/A/index.html +++ /dev/null @@ -1,379 +0,0 @@ - -A (owl-base.Owl_computation_engine_sig.Flatten_Sig.Optimiser.Operator.Symbol.Shape.Type.Device.A)

Module Device.A

include Owl_types_ndarray_algodiff.Sig
include Owl_types_ndarray_eltcmp.Sig
include Owl_types_ndarray_basic.Sig
type arr
type elt
val empty : int array -> arr
val zeros : int array -> arr
val ones : int array -> arr
val create : int array -> elt -> arr
val sequential : ?a:elt -> ?step:elt -> int array -> arr
val uniform : ?a:elt -> ?b:elt -> int array -> arr
val gaussian : ?mu:elt -> ?sigma:elt -> int array -> arr
val bernoulli : ?p:elt -> int array -> arr
val init : int array -> (int -> elt) -> arr
val init_nd : int array -> (int array -> elt) -> arr
val shape : arr -> int array
val numel : arr -> int
val get : arr -> int array -> elt
val set : arr -> int array -> elt -> unit
val get_slice : int list list -> arr -> arr
val set_slice : int list list -> arr -> arr -> unit
val get_fancy : Owl_types_common.index list -> arr -> arr
val set_fancy : Owl_types_common.index list -> arr -> arr -> unit
val copy : arr -> arr
val copy_ : out:arr -> arr -> unit
val reset : arr -> unit
val reshape : arr -> int array -> arr
val reverse : arr -> arr
val tile : arr -> int array -> arr
val repeat : arr -> int array -> arr
val concatenate : ?axis:int -> arr array -> arr
val stack : ?axis:int -> arr array -> arr
val split : ?axis:int -> int array -> arr -> arr array
val expand : ?hi:bool -> arr -> int -> arr
val squeeze : ?axis:int array -> arr -> arr
val draw : ?axis:int -> arr -> int -> arr * int array
val map : (elt -> elt) -> arr -> arr
val fold : ?axis:int -> (elt -> elt -> elt) -> elt -> arr -> arr
val scan : ?axis:int -> (elt -> elt -> elt) -> arr -> arr
val one_hot : int -> arr -> arr
val pad : ?v:elt -> int list list -> arr -> arr
val print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(elt -> string) -> - arr -> - unit
val abs : arr -> arr
val neg : arr -> arr
val floor : arr -> arr
val ceil : arr -> arr
val round : arr -> arr
val sqr : arr -> arr
val sqrt : arr -> arr
val log : arr -> arr
val log2 : arr -> arr
val log10 : arr -> arr
val exp : arr -> arr
val sin : arr -> arr
val cos : arr -> arr
val tan : arr -> arr
val sinh : arr -> arr
val cosh : arr -> arr
val tanh : arr -> arr
val asin : arr -> arr
val acos : arr -> arr
val atan : arr -> arr
val asinh : arr -> arr
val acosh : arr -> arr
val atanh : arr -> arr
val sum_reduce : ?axis:int array -> arr -> arr
val signum : arr -> arr
val sigmoid : arr -> arr
val relu : arr -> arr
val dawsn : arr -> arr
val min' : arr -> elt
val max' : arr -> elt
val sum' : arr -> elt
val log_sum_exp' : arr -> elt
val log_sum_exp : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm' : arr -> elt
val l2norm' : arr -> elt
val l2norm_sqr' : arr -> elt
val clip_by_value : ?amin:elt -> ?amax:elt -> arr -> arr
val clip_by_l2norm : elt -> arr -> arr
val pow : arr -> arr -> arr
val scalar_pow : elt -> arr -> arr
val pow_scalar : arr -> elt -> arr
val atan2 : arr -> arr -> arr
val scalar_atan2 : elt -> arr -> arr
val atan2_scalar : arr -> elt -> arr
val add : arr -> arr -> arr
val sub : arr -> arr -> arr
val mul : arr -> arr -> arr
val div : arr -> arr -> arr
val add_scalar : arr -> elt -> arr
val sub_scalar : arr -> elt -> arr
val mul_scalar : arr -> elt -> arr
val div_scalar : arr -> elt -> arr
val scalar_add : elt -> arr -> arr
val scalar_sub : elt -> arr -> arr
val scalar_mul : elt -> arr -> arr
val scalar_div : elt -> arr -> arr
val fma : arr -> arr -> arr -> arr
val conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val dilated_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val transpose_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val max_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val upsampling2d : arr -> int array -> arr
val conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val dilated_conv1d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv1d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val transpose_conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val max_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val upsampling2d_backward : arr -> int array -> arr -> arr
val row_num : arr -> int
val col_num : arr -> int
val row : arr -> int -> arr
val rows : arr -> int array -> arr
val copy_row_to : arr -> arr -> int -> unit
val copy_col_to : arr -> arr -> int -> unit
val dot : arr -> arr -> arr
val diag : ?k:int -> arr -> arr
val trace : arr -> elt
val transpose : ?axis:int array -> arr -> arr
val to_rows : arr -> arr array
val of_rows : arr array -> arr
val to_cols : arr -> arr array
val of_cols : arr array -> arr
val of_array : elt array -> int array -> arr
val of_arrays : elt array array -> arr
val float_to_elt : float -> elt
val elt_to_float : elt -> float
val elt_equal : arr -> arr -> arr
val elt_not_equal : arr -> arr -> arr
val elt_less : arr -> arr -> arr
val elt_greater : arr -> arr -> arr
val elt_less_equal : arr -> arr -> arr
val elt_greater_equal : arr -> arr -> arr
val elt_equal_scalar : arr -> elt -> arr
val elt_not_equal_scalar : arr -> elt -> arr
val elt_less_scalar : arr -> elt -> arr
val elt_greater_scalar : arr -> elt -> arr
val elt_less_equal_scalar : arr -> elt -> arr
val elt_greater_equal_scalar : arr -> elt -> arr
module Scalar : sig ... end
module Mat : sig ... end
module Linalg : sig ... end
val create_ : out:arr -> elt -> unit
val uniform_ : ?a:elt -> ?b:elt -> out:arr -> unit
val gaussian_ : ?mu:elt -> ?sigma:elt -> out:arr -> unit
val sequential_ : ?a:elt -> ?step:elt -> out:arr -> unit
val bernoulli_ : ?p:elt -> out:arr -> unit
val zeros_ : out:arr -> unit
val ones_ : out:arr -> unit
val one_hot_ : out:arr -> int -> arr -> unit
val get_slice_ : out:arr -> int list list -> arr -> unit
val set_slice_ : out:arr -> int list list -> arr -> arr -> unit
val sub_left : arr -> int -> int -> arr
val reshape_ : out:arr -> arr -> unit
val reverse_ : out:arr -> arr -> unit
val transpose_ : out:arr -> ?axis:int array -> arr -> unit
val repeat_ : out:arr -> arr -> int array -> unit
val tile_ : out:arr -> arr -> int array -> unit
val pad_ : out:arr -> ?v:elt -> int list list -> arr -> unit
val hypot : arr -> arr -> arr
val fmod : arr -> arr -> arr
val min2 : arr -> arr -> arr
val max2 : arr -> arr -> arr
val add_ : ?out:arr -> arr -> arr -> unit
val sub_ : ?out:arr -> arr -> arr -> unit
val mul_ : ?out:arr -> arr -> arr -> unit
val div_ : ?out:arr -> arr -> arr -> unit
val pow_ : ?out:arr -> arr -> arr -> unit
val atan2_ : ?out:arr -> arr -> arr -> unit
val hypot_ : ?out:arr -> arr -> arr -> unit
val fmod_ : ?out:arr -> arr -> arr -> unit
val min2_ : ?out:arr -> arr -> arr -> unit
val max2_ : ?out:arr -> arr -> arr -> unit
val add_scalar_ : ?out:arr -> arr -> elt -> unit
val sub_scalar_ : ?out:arr -> arr -> elt -> unit
val mul_scalar_ : ?out:arr -> arr -> elt -> unit
val div_scalar_ : ?out:arr -> arr -> elt -> unit
val pow_scalar_ : ?out:arr -> arr -> elt -> unit
val atan2_scalar_ : ?out:arr -> arr -> elt -> unit
val fmod_scalar_ : ?out:arr -> arr -> elt -> unit
val scalar_add_ : ?out:arr -> elt -> arr -> unit
val scalar_sub_ : ?out:arr -> elt -> arr -> unit
val scalar_mul_ : ?out:arr -> elt -> arr -> unit
val scalar_div_ : ?out:arr -> elt -> arr -> unit
val scalar_pow_ : ?out:arr -> elt -> arr -> unit
val scalar_atan2_ : ?out:arr -> elt -> arr -> unit
val scalar_fmod_ : ?out:arr -> elt -> arr -> unit
val fma_ : ?out:arr -> arr -> arr -> arr -> unit
val clip_by_value_ : ?out:arr -> ?amin:elt -> ?amax:elt -> arr -> unit
val clip_by_l2norm_ : ?out:arr -> elt -> arr -> unit
val dot_ : - ?transa:bool -> - ?transb:bool -> - ?alpha:elt -> - ?beta:elt -> - c:arr -> - arr -> - arr -> - unit
val abs_ : ?out:arr -> arr -> unit
val neg_ : ?out:arr -> arr -> unit
val conj_ : ?out:arr -> arr -> unit
val reci_ : ?out:arr -> arr -> unit
val signum_ : ?out:arr -> arr -> unit
val sqr_ : ?out:arr -> arr -> unit
val sqrt_ : ?out:arr -> arr -> unit
val cbrt_ : ?out:arr -> arr -> unit
val exp_ : ?out:arr -> arr -> unit
val exp2_ : ?out:arr -> arr -> unit
val exp10_ : ?out:arr -> arr -> unit
val expm1_ : ?out:arr -> arr -> unit
val log_ : ?out:arr -> arr -> unit
val log2_ : ?out:arr -> arr -> unit
val log10_ : ?out:arr -> arr -> unit
val log1p_ : ?out:arr -> arr -> unit
val sin_ : ?out:arr -> arr -> unit
val cos_ : ?out:arr -> arr -> unit
val tan_ : ?out:arr -> arr -> unit
val asin_ : ?out:arr -> arr -> unit
val acos_ : ?out:arr -> arr -> unit
val atan_ : ?out:arr -> arr -> unit
val sinh_ : ?out:arr -> arr -> unit
val cosh_ : ?out:arr -> arr -> unit
val tanh_ : ?out:arr -> arr -> unit
val asinh_ : ?out:arr -> arr -> unit
val acosh_ : ?out:arr -> arr -> unit
val atanh_ : ?out:arr -> arr -> unit
val floor_ : ?out:arr -> arr -> unit
val ceil_ : ?out:arr -> arr -> unit
val round_ : ?out:arr -> arr -> unit
val trunc_ : ?out:arr -> arr -> unit
val fix_ : ?out:arr -> arr -> unit
val erf_ : ?out:arr -> arr -> unit
val erfc_ : ?out:arr -> arr -> unit
val relu_ : ?out:arr -> arr -> unit
val softplus_ : ?out:arr -> arr -> unit
val softsign_ : ?out:arr -> arr -> unit
val softmax_ : ?out:arr -> ?axis:int -> arr -> unit
val sigmoid_ : ?out:arr -> arr -> unit
val sum_ : out:arr -> axis:int -> arr -> unit
val min_ : out:arr -> axis:int -> arr -> unit
val max_ : out:arr -> axis:int -> arr -> unit
val sum : ?axis:int -> ?keep_dims:bool -> arr -> arr
val prod : ?axis:int -> ?keep_dims:bool -> arr -> arr
val min : ?axis:int -> ?keep_dims:bool -> arr -> arr
val max : ?axis:int -> ?keep_dims:bool -> arr -> arr
val mean : ?axis:int -> ?keep_dims:bool -> arr -> arr
val var : ?axis:int -> ?keep_dims:bool -> arr -> arr
val std : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l2norm : ?axis:int -> ?keep_dims:bool -> arr -> arr
val cumsum_ : ?out:arr -> ?axis:int -> arr -> unit
val cumprod_ : ?out:arr -> ?axis:int -> arr -> unit
val cummin_ : ?out:arr -> ?axis:int -> arr -> unit
val cummax_ : ?out:arr -> ?axis:int -> arr -> unit
val dropout_ : ?out:arr -> ?rate:float -> arr -> unit
val prod' : arr -> elt
val mean' : arr -> elt
val var' : arr -> elt
val std' : arr -> elt
val elt_equal_ : ?out:arr -> arr -> arr -> unit
val elt_not_equal_ : ?out:arr -> arr -> arr -> unit
val elt_less_ : ?out:arr -> arr -> arr -> unit
val elt_greater_ : ?out:arr -> arr -> arr -> unit
val elt_less_equal_ : ?out:arr -> arr -> arr -> unit
val elt_greater_equal_ : ?out:arr -> arr -> arr -> unit
val elt_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_not_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_less_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_greater_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_less_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_greater_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val conv1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val conv2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val conv3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val dilated_conv1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val dilated_conv2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val dilated_conv3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val transpose_conv1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val transpose_conv2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val transpose_conv3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val max_pool1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val max_pool2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val max_pool3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val upsampling2d_ : out:arr -> arr -> int array -> unit
val conv1d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv1d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv2d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv2d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv3d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv3d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val dilated_conv1d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv1d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv2d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv2d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv3d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv3d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val transpose_conv1d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv1d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv2d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv2d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv3d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv3d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val max_pool1d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val max_pool2d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val max_pool3d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool1d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool2d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool3d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val upsampling2d_backward_ : out:arr -> arr -> int array -> arr -> unit
val fused_adagrad_ : ?out:arr -> rate:float -> eps:float -> arr -> unit
\ No newline at end of file diff --git a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Optimiser/Operator/Symbol/Shape/Type/Device/index.html b/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Optimiser/Operator/Symbol/Shape/Type/Device/index.html deleted file mode 100644 index ea8f7e883..000000000 --- a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Optimiser/Operator/Symbol/Shape/Type/Device/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Device (owl-base.Owl_computation_engine_sig.Flatten_Sig.Optimiser.Operator.Symbol.Shape.Type.Device)

Module Type.Device

Type definition
type device

TODO

type value

TODO

Core functions
val make_device : unit -> device

TODO

val arr_to_value : A.arr -> value

TODO

val value_to_arr : value -> A.arr

TODO

val elt_to_value : A.elt -> value

TODO

val value_to_elt : value -> A.elt

TODO

val value_to_float : value -> float

TODO

val is_arr : value -> bool

TODO

val is_elt : value -> bool

TODO

\ No newline at end of file diff --git a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Optimiser/Operator/Symbol/Shape/Type/index.html b/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Optimiser/Operator/Symbol/Shape/Type/index.html deleted file mode 100644 index 515c833be..000000000 --- a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Optimiser/Operator/Symbol/Shape/Type/index.html +++ /dev/null @@ -1,5 +0,0 @@ - -Type (owl-base.Owl_computation_engine_sig.Flatten_Sig.Optimiser.Operator.Symbol.Shape.Type)

Module Shape.Type

Type definition
type state =
  1. | Valid
  2. | Invalid
    (*

    TODO

    *)

TODO

and block = {
  1. size : int;
  2. block_id : int;
  3. mutable active : t option;
  4. mutable memory : Device.value;
  5. mutable nodes : t list;
}

block type keeps a reference to a block of memory and to the nodes sharing that block.

and attr = {
  1. mutable op : op;
  2. mutable freeze : bool;
  3. mutable reuse : bool;
  4. mutable state : state;
  5. mutable shape : int array option array;
  6. mutable value : Device.value array;
  7. mutable block : block array option;
}

TODO

and arr =
  1. | Arr of t
and elt =
  1. | Elt of t
and op =
  1. | Noop
  2. | Var
  3. | Const
  4. | Empty of int array
  5. | Zeros of int array
  6. | Ones of int array
  7. | Create of int array
  8. | Sequential of int array
  9. | Uniform of int array
  10. | Gaussian of int array
  11. | Bernoulli of int array
  12. | Init of int array * int -> elt
  13. | Get of int array
  14. | Set of int array
  15. | GetSlice of int list list
  16. | SetSlice of int list list
  17. | GetFancy of Owl_types_common.index list
  18. | SetFancy of Owl_types_common.index list
  19. | Copy
  20. | Reset
  21. | Reshape of int array
  22. | Reverse
  23. | Tile of int array
  24. | Repeat of int array
  25. | Pad of elt * int list list
  26. | Concatenate of int
  27. | Stack of int
  28. | Split of int * int array
  29. | Draw of int * int
  30. | Map of elt -> elt
  31. | Fold of int * elt -> elt -> elt
  32. | Scan of int * elt -> elt -> elt
  33. | OneHot of int
  34. | OfArray of int array
  35. | Delay of Device.A.arr -> Device.A.arr
  36. | DelayArray of int array * Device.A.arr array -> Device.A.arr
  37. | LazyPrint of int option - * int option - * bool option - * (Device.A.elt -> string) option
  38. | Abs
  39. | Neg
  40. | Floor
  41. | Ceil
  42. | Round
  43. | Sqr
  44. | Sqrt
  45. | Log
  46. | Log2
  47. | Log10
  48. | Exp
  49. | Sin
  50. | Cos
  51. | Tan
  52. | Sinh
  53. | Cosh
  54. | Tanh
  55. | Asin
  56. | Acos
  57. | Atan
  58. | Asinh
  59. | Acosh
  60. | Atanh
  61. | Min of bool * int
  62. | Max of bool * int
  63. | Sum of bool * int
  64. | SumReduce of int array
  65. | Signum
  66. | Sigmoid
  67. | Relu
  68. | Dawsn
  69. | Min'
  70. | Max'
  71. | Sum'
  72. | LogSumExp'
  73. | LogSumExp of bool * int
  74. | L1norm'
  75. | L2norm'
  76. | L2NormSqr'
  77. | ClipByValue
  78. | ClipByL2norm
  79. | Pow
  80. | ScalarPow
  81. | PowScalar
  82. | Atan2
  83. | ScalarAtan2
  84. | Atan2Scalar
  85. | Hypot
  86. | Min2
  87. | Max2
  88. | Add
  89. | Sub
  90. | Mul
  91. | Div
  92. | AddScalar
  93. | SubScalar
  94. | MulScalar
  95. | DivScalar
  96. | ScalarAdd
  97. | ScalarSub
  98. | ScalarMul
  99. | ScalarDiv
  100. | FMA
  101. | EltEqual
  102. | EltNotEqual
  103. | EltLess
  104. | EltGreater
  105. | EltLessEqual
  106. | EltGreaterEqual
  107. | EltEqualScalar
  108. | EltNotEqualScalar
  109. | EltLessScalar
  110. | EltGreaterScalar
  111. | EltLessEqualScalar
  112. | EltGreaterEqualScalar
  113. | Conv1d of Owl_types_common.padding * int array
  114. | Conv2d of Owl_types_common.padding * int array
  115. | Conv3d of Owl_types_common.padding * int array
  116. | TransposeConv1d of Owl_types_common.padding * int array
  117. | TransposeConv2d of Owl_types_common.padding * int array
  118. | TransposeConv3d of Owl_types_common.padding * int array
  119. | DilatedConv1d of Owl_types_common.padding * int array * int array
  120. | DilatedConv2d of Owl_types_common.padding * int array * int array
  121. | DilatedConv3d of Owl_types_common.padding * int array * int array
  122. | MaxPool1d of Owl_types_common.padding * int array * int array
  123. | MaxPool2d of Owl_types_common.padding * int array * int array
  124. | MaxPool3d of Owl_types_common.padding * int array * int array
  125. | AvgPool1d of Owl_types_common.padding * int array * int array
  126. | AvgPool2d of Owl_types_common.padding * int array * int array
  127. | AvgPool3d of Owl_types_common.padding * int array * int array
  128. | UpSampling2d of int array
  129. | Conv1dBackwardInput of int array
  130. | Conv1dBackwardKernel of int array
  131. | Conv2dBackwardInput of int array
  132. | Conv2dBackwardKernel of int array
  133. | Conv3dBackwardInput of int array
  134. | Conv3dBackwardKernel of int array
  135. | TransposeConv1dBackwardInput of int array
  136. | TransposeConv1dBackwardKernel of int array
  137. | TransposeConv2dBackwardInput of int array
  138. | TransposeConv2dBackwardKernel of int array
  139. | TransposeConv3dBackwardInput of int array
  140. | TransposeConv3dBackwardKernel of int array
  141. | DilatedConv1dBackwardInput of int array * int array
  142. | DilatedConv1dBackwardKernel of int array * int array
  143. | DilatedConv2dBackwardInput of int array * int array
  144. | DilatedConv2dBackwardKernel of int array * int array
  145. | DilatedConv3dBackwardInput of int array * int array
  146. | DilatedConv3dBackwardKernel of int array * int array
  147. | MaxPool1dBackward of Owl_types_common.padding * int array * int array
  148. | MaxPool2dBackward of Owl_types_common.padding * int array * int array
  149. | MaxPool3dBackward of Owl_types_common.padding * int array * int array
  150. | AvgPool1dBackward of Owl_types_common.padding * int array * int array
  151. | AvgPool2dBackward of Owl_types_common.padding * int array * int array
  152. | AvgPool3dBackward of Owl_types_common.padding * int array * int array
  153. | UpSampling2dBackward of int array
  154. | RowNum
  155. | ColNum
  156. | Row
  157. | Rows of int array
  158. | CopyRowTo
  159. | CopyColTo
  160. | Dot of bool * bool * elt * elt
  161. | Inv
  162. | Trace
  163. | Transpose of int array
  164. | ToRows
  165. | OfRows
  166. | Scalar_Add
  167. | Scalar_Sub
  168. | Scalar_Mul
  169. | Scalar_Div
  170. | Scalar_Pow
  171. | Scalar_Atan2
  172. | Scalar_Abs
  173. | Scalar_Neg
  174. | Scalar_Sqr
  175. | Scalar_Sqrt
  176. | Scalar_Exp
  177. | Scalar_Log
  178. | Scalar_Log2
  179. | Scalar_Log10
  180. | Scalar_Signum
  181. | Scalar_Floor
  182. | Scalar_Ceil
  183. | Scalar_Round
  184. | Scalar_Sin
  185. | Scalar_Cos
  186. | Scalar_Tan
  187. | Scalar_Sinh
  188. | Scalar_Cosh
  189. | Scalar_Tanh
  190. | Scalar_Asin
  191. | Scalar_Acos
  192. | Scalar_Atan
  193. | Scalar_Asinh
  194. | Scalar_Acosh
  195. | Scalar_Atanh
  196. | Scalar_Relu
  197. | Scalar_Dawsn
  198. | Scalar_Sigmoid
  199. | Fused_Adagrad of float * float
    (*

    TODO

    *)
\ No newline at end of file diff --git a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Optimiser/Operator/Symbol/Shape/index.html b/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Optimiser/Operator/Symbol/Shape/index.html deleted file mode 100644 index e47b18820..000000000 --- a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Optimiser/Operator/Symbol/Shape/index.html +++ /dev/null @@ -1,5 +0,0 @@ - -Shape (owl-base.Owl_computation_engine_sig.Flatten_Sig.Optimiser.Operator.Symbol.Shape)

Module Symbol.Shape

Core functions
val infer_shape : - Type.op -> - Type.attr Owl_graph.node array -> - int array option array

TODO

\ No newline at end of file diff --git a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Optimiser/Operator/Symbol/index.html b/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Optimiser/Operator/Symbol/index.html deleted file mode 100644 index 3c31c7c3a..000000000 --- a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Optimiser/Operator/Symbol/index.html +++ /dev/null @@ -1,28 +0,0 @@ - -Symbol (owl-base.Owl_computation_engine_sig.Flatten_Sig.Optimiser.Operator.Symbol)

Module Operator.Symbol

Core functions
val op_to_str : Shape.Type.op -> string

TODO

val is_random_variable : Shape.Type.op -> bool

TODO

val refnum : 'a Owl_graph.node -> int

TODO

val node_shape : Shape.Type.attr Owl_graph.node -> int array

TODO

val node_numel : Shape.Type.attr Owl_graph.node -> int

TODO

val is_shape_unknown : Shape.Type.attr Owl_graph.node -> bool

TODO

val infer_shape_graph : Shape.Type.attr Owl_graph.node array -> unit

TODO

val shape_to_str : int array option array -> string

TODO

val node_to_str : Shape.Type.attr Owl_graph.node -> string

TODO

val node_to_arr : Shape.Type.t -> Shape.Type.arr

TODO

val arr_to_node : Shape.Type.arr -> Shape.Type.t

TODO

val node_to_elt : Shape.Type.t -> Shape.Type.elt

TODO

val elt_to_node : Shape.Type.elt -> Shape.Type.t

TODO

val make_node : - ?name:string -> - ?value:Shape.Type.Device.value array -> - ?shape:int array option array -> - ?freeze:bool -> - ?reuse:bool -> - ?state:Shape.Type.state -> - Shape.Type.op -> - Shape.Type.attr Owl_graph.node

TODO

val make_then_connect : - ?shape:int array option array -> - Shape.Type.op -> - Shape.Type.attr Owl_graph.node array -> - Shape.Type.attr Owl_graph.node

TODO

val var_arr : ?shape:int array -> string -> Shape.Type.arr

TODO

val var_elt : string -> Shape.Type.elt

TODO

val const_arr : string -> Shape.Type.Device.A.arr -> Shape.Type.arr

TODO

val const_elt : string -> Shape.Type.Device.A.elt -> Shape.Type.elt

TODO

val new_block_id : unit -> int

new_block_id () returns an unused block id.

val make_empty_block : ?block_id:int -> int -> Shape.Type.block

make_empty_block s returns an empty block of memory of size s.

val make_value_block : - Shape.Type.Device.value -> - Shape.Type.attr Owl_graph.node -> - unit

make_value_block value node creates a block of memory initialised with value and links the new block to node.

get_block node returns the memory block allocated to node. If no block is allocated, throws an exception.

val add_node_to_block : - Shape.Type.attr Owl_graph.node -> - Shape.Type.block -> - unit

Link a node to a reusable block and initialises its memory on the memory of the block.

val get_active_node : Shape.Type.block -> Shape.Type.attr Owl_graph.node option

Return the node that is currently using the memory of the block.

val set_active_node : - Shape.Type.block -> - Shape.Type.attr Owl_graph.node -> - unit

Update the node that is currently using the block of memory.

val get_block_id : Shape.Type.attr Owl_graph.node -> int

get_block_id node returns the id of the block assigned to node. If node has not been assigned yet, returns -1.

val set_value : - Shape.Type.attr Owl_graph.node -> - Shape.Type.Device.value array -> - unit

TODO

val set_operator : Shape.Type.attr Owl_graph.node -> Shape.Type.op -> unit

TODO

TODO

val set_reuse : Shape.Type.attr Owl_graph.node -> bool -> unit

TODO

val get_reuse : Shape.Type.attr Owl_graph.node -> bool

TODO

val is_shared : Shape.Type.attr Owl_graph.node -> bool

TODO

val get_shared_nodes : - Shape.Type.attr Owl_graph.node -> - Shape.Type.attr Owl_graph.node array

get_shared_nodes node returns the nodes sharing the same block of memory as node.

val is_var : Shape.Type.attr Owl_graph.node -> bool

TODO

val is_const : Shape.Type.attr Owl_graph.node -> bool

TODO

val is_node_arr : Shape.Type.attr Owl_graph.node -> bool

TODO

val is_node_elt : Shape.Type.attr Owl_graph.node -> bool

TODO

val is_assigned : Shape.Type.attr Owl_graph.node -> bool

is_assigned node checks if a block of memory has been assigned to node.

val check_assigned : Shape.Type.attr Owl_graph.node -> unit

check_assigned node throws an exception if node has not been assigned to a block.

val is_valid : Shape.Type.attr Owl_graph.node -> bool

TODO

val validate : Shape.Type.attr Owl_graph.node -> unit

TODO

val invalidate : Shape.Type.attr Owl_graph.node -> unit

TODO

val invalidate_graph : Shape.Type.attr Owl_graph.node -> unit

TODO

val is_freeze : Shape.Type.attr Owl_graph.node -> bool

TODO

val freeze : Shape.Type.attr Owl_graph.node -> unit

TODO

val freeze_descendants : Shape.Type.attr Owl_graph.node array -> unit

TODO

val freeze_ancestors : Shape.Type.attr Owl_graph.node array -> unit

TODO

TODO

TODO

val unsafe_assign_arr : Shape.Type.arr -> Shape.Type.Device.A.arr -> unit

TODO

val assign_arr : Shape.Type.arr -> Shape.Type.Device.A.arr -> unit

TODO

val assign_elt : Shape.Type.elt -> Shape.Type.Device.A.elt -> unit

TODO

val float_to_elt : float -> Shape.Type.elt

TODO

val elt_to_float : Shape.Type.elt -> float

TODO

\ No newline at end of file diff --git a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Optimiser/Operator/index.html b/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Optimiser/Operator/index.html deleted file mode 100644 index 8937937bd..000000000 --- a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Optimiser/Operator/index.html +++ /dev/null @@ -1,420 +0,0 @@ - -Operator (owl-base.Owl_computation_engine_sig.Flatten_Sig.Optimiser.Operator)

Module Optimiser.Operator

Vectorised functions
val empty : int array -> Symbol.Shape.Type.arr

TODO

val zeros : int array -> Symbol.Shape.Type.arr

TODO

val ones : int array -> Symbol.Shape.Type.arr

TODO

val create : int array -> Symbol.Shape.Type.elt -> Symbol.Shape.Type.arr

TODO

val sequential : - ?a:Symbol.Shape.Type.elt -> - ?step:Symbol.Shape.Type.elt -> - int array -> - Symbol.Shape.Type.arr

TODO

val uniform : - ?a:Symbol.Shape.Type.elt -> - ?b:Symbol.Shape.Type.elt -> - int array -> - Symbol.Shape.Type.arr

TODO

val gaussian : - ?mu:Symbol.Shape.Type.elt -> - ?sigma:Symbol.Shape.Type.elt -> - int array -> - Symbol.Shape.Type.arr

TODO

val bernoulli : ?p:Symbol.Shape.Type.elt -> int array -> Symbol.Shape.Type.arr

TODO

val init : int array -> (int -> Symbol.Shape.Type.elt) -> Symbol.Shape.Type.arr

TODO

val init_nd : - int array -> - (int array -> Symbol.Shape.Type.elt) -> - Symbol.Shape.Type.arr

TODO

val shape : Symbol.Shape.Type.arr -> int array

TODO

val numel : Symbol.Shape.Type.arr -> int

TODO

TODO

val set : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.elt -> unit

TODO

val get_slice : int list list -> Symbol.Shape.Type.arr -> Symbol.Shape.Type.arr

TODO

val set_slice : - int list list -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - unit

TODO

TODO

val set_fancy : - Owl_types.index list -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - unit

TODO

val copy_ : out:'a -> 'b -> 'c

TODO

val reset : Symbol.Shape.Type.arr -> unit

TODO

val reshape : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.arr

TODO

val tile : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.arr

TODO

val repeat : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.arr

TODO

val pad : - ?v:Symbol.Shape.Type.elt -> - int list list -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val expand : ?hi:bool -> Symbol.Shape.Type.arr -> int -> Symbol.Shape.Type.arr

TODO

val squeeze : ?axis:int array -> Symbol.Shape.Type.arr -> Symbol.Shape.Type.arr

TODO

val concatenate : - ?axis:int -> - Symbol.Shape.Type.arr array -> - Symbol.Shape.Type.arr

TODO

val stack : ?axis:int -> Symbol.Shape.Type.arr array -> Symbol.Shape.Type.arr

TODO

TODO

val split : ?axis:int -> 'a -> 'b -> 'c

TODO

val draw : - ?axis:int -> - Symbol.Shape.Type.arr -> - int -> - Symbol.Shape.Type.arr * 'a array

TODO

TODO

delay f x returns f x. It allows to use a function that is not tracked by the computation graph and delay its evaluation. The output must have the same shape as the input.

delay_array out_shape f x works in the same way as delay but is applied on an array of ndarrays. Needs the shape of the output as an argument.

val lazy_print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(Symbol.Shape.Type.Device.A.elt -> string) -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

lazy_print x prints the output of x when it is evaluated. Is implemented as an identity node. For information about the optional parameters, refer to the print function of the Ndarray module.

val print : ?max_row:'a -> ?max_col:'b -> ?header:'c -> ?fmt:'d -> 'e -> unit

TODO

val min : - ?axis:int -> - ?keep_dims:bool -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val max : - ?axis:int -> - ?keep_dims:bool -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val sum : - ?axis:int -> - ?keep_dims:bool -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val sum_reduce : - ?axis:int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val log_sum_exp : - ?axis:int -> - ?keep_dims:bool -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

TODO

TODO

TODO

TODO

val elt_less_equal_scalar : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.elt -> - Symbol.Shape.Type.arr

TODO

val elt_greater_equal_scalar : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.elt -> - Symbol.Shape.Type.arr

TODO

val conv1d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr

TODO

val conv2d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr

TODO

val conv3d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr

TODO

val transpose_conv1d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr

TODO

val transpose_conv2d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr

TODO

val transpose_conv3d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr

TODO

val dilated_conv1d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val dilated_conv2d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val dilated_conv3d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val max_pool1d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val max_pool2d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val max_pool3d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val avg_pool1d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val avg_pool2d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val avg_pool3d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val upsampling2d : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.arr

TODO

val conv1d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val conv1d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val conv2d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val conv2d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val conv3d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val conv3d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val transpose_conv1d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val transpose_conv1d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val transpose_conv2d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val transpose_conv2d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val transpose_conv3d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val transpose_conv3d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val dilated_conv1d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val dilated_conv1d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val dilated_conv2d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val dilated_conv2d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val dilated_conv3d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val dilated_conv3d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val max_pool1d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val max_pool2d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val max_pool3d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val avg_pool1d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val avg_pool2d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val avg_pool3d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val upsampling2d_backward : - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val row_num : Symbol.Shape.Type.arr -> int

TODO

val col_num : Symbol.Shape.Type.arr -> int

TODO

val rows : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.arr

TODO

val copy_row_to : Symbol.Shape.Type.arr -> 'a -> 'b -> unit

TODO

val copy_col_to : Symbol.Shape.Type.arr -> 'a -> 'b -> unit

TODO

TODO

val transpose : - ?axis:int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val to_rows : Symbol.Shape.Type.arr -> 'a array

TODO

TODO

val to_cols : Symbol.Shape.Type.arr -> 'a array

TODO

TODO

val of_array : - Symbol.Shape.Type.elt array -> - int array -> - Symbol.Shape.Type.arr

TODO

val of_arrays : Symbol.Shape.Type.elt array array -> Symbol.Shape.Type.arr

TODO

val to_arrays : Symbol.Shape.Type.arr -> Symbol.Shape.Type.elt array array

TODO

Scalar functions
module Scalar : sig ... end
module Mat : sig ... end
module Linalg : sig ... end
\ No newline at end of file diff --git a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Optimiser/index.html b/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Optimiser/index.html deleted file mode 100644 index ca12ce169..000000000 --- a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Optimiser/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Optimiser (owl-base.Owl_computation_engine_sig.Flatten_Sig.Optimiser)

Module Flatten_Sig.Optimiser

Core functions
val estimate_complexity : 'a Owl_graph.node array -> int * int

TODO

val optimise_nodes : - Operator.Symbol.Shape.Type.attr Owl_graph.node array -> - unit

TODO

\ No newline at end of file diff --git a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Scalar/index.html b/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Scalar/index.html deleted file mode 100644 index 4bd744263..000000000 --- a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Scalar/index.html +++ /dev/null @@ -1,20 +0,0 @@ - -Scalar (owl-base.Owl_computation_engine_sig.Flatten_Sig.Scalar)

Module Flatten_Sig.Scalar

\ No newline at end of file diff --git a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Shape/Type/Device/A/Linalg/index.html b/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Shape/Type/Device/A/Linalg/index.html deleted file mode 100644 index 8e75bfea7..000000000 --- a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Shape/Type/Device/A/Linalg/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -Linalg (owl-base.Owl_computation_engine_sig.Flatten_Sig.Shape.Type.Device.A.Linalg)

Module A.Linalg

val inv : arr -> arr
val logdet : arr -> elt
val chol : ?upper:bool -> arr -> arr
val svd : ?thin:bool -> arr -> arr * arr * arr
val qr : arr -> arr * arr
val lq : arr -> arr * arr
val sylvester : arr -> arr -> arr -> arr
val lyapunov : arr -> arr -> arr
val discrete_lyapunov : - ?solver:[ `default | `bilinear | `direct ] -> - arr -> - arr -> - arr
val linsolve : ?trans:bool -> ?typ:[ `n | `u | `l ] -> arr -> arr -> arr
val care : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
val dare : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
\ No newline at end of file diff --git a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Shape/Type/Device/A/Mat/index.html b/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Shape/Type/Device/A/Mat/index.html deleted file mode 100644 index 01576aaa8..000000000 --- a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Shape/Type/Device/A/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl-base.Owl_computation_engine_sig.Flatten_Sig.Shape.Type.Device.A.Mat)

Module A.Mat

val diagm : ?k:int -> arr -> arr
val triu : ?k:int -> arr -> arr
val tril : ?k:int -> arr -> arr
val eye : int -> arr
\ No newline at end of file diff --git a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Shape/Type/Device/A/Scalar/index.html b/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Shape/Type/Device/A/Scalar/index.html deleted file mode 100644 index a36dbb512..000000000 --- a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Shape/Type/Device/A/Scalar/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Scalar (owl-base.Owl_computation_engine_sig.Flatten_Sig.Shape.Type.Device.A.Scalar)

Module A.Scalar

val add : elt -> elt -> elt
val sub : elt -> elt -> elt
val mul : elt -> elt -> elt
val div : elt -> elt -> elt
val pow : elt -> elt -> elt
val atan2 : elt -> elt -> elt
val abs : elt -> elt
val neg : elt -> elt
val sqr : elt -> elt
val sqrt : elt -> elt
val exp : elt -> elt
val log : elt -> elt
val log2 : elt -> elt
val log10 : elt -> elt
val signum : elt -> elt
val floor : elt -> elt
val ceil : elt -> elt
val round : elt -> elt
val sin : elt -> elt
val cos : elt -> elt
val tan : elt -> elt
val sinh : elt -> elt
val cosh : elt -> elt
val tanh : elt -> elt
val asin : elt -> elt
val acos : elt -> elt
val atan : elt -> elt
val asinh : elt -> elt
val acosh : elt -> elt
val atanh : elt -> elt
val relu : elt -> elt
val dawsn : elt -> elt
val sigmoid : elt -> elt
\ No newline at end of file diff --git a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Shape/Type/Device/A/index.html b/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Shape/Type/Device/A/index.html deleted file mode 100644 index 252847b07..000000000 --- a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Shape/Type/Device/A/index.html +++ /dev/null @@ -1,379 +0,0 @@ - -A (owl-base.Owl_computation_engine_sig.Flatten_Sig.Shape.Type.Device.A)

Module Device.A

include Owl_types_ndarray_algodiff.Sig
include Owl_types_ndarray_eltcmp.Sig
include Owl_types_ndarray_basic.Sig
type arr
type elt
val empty : int array -> arr
val zeros : int array -> arr
val ones : int array -> arr
val create : int array -> elt -> arr
val sequential : ?a:elt -> ?step:elt -> int array -> arr
val uniform : ?a:elt -> ?b:elt -> int array -> arr
val gaussian : ?mu:elt -> ?sigma:elt -> int array -> arr
val bernoulli : ?p:elt -> int array -> arr
val init : int array -> (int -> elt) -> arr
val init_nd : int array -> (int array -> elt) -> arr
val shape : arr -> int array
val numel : arr -> int
val get : arr -> int array -> elt
val set : arr -> int array -> elt -> unit
val get_slice : int list list -> arr -> arr
val set_slice : int list list -> arr -> arr -> unit
val get_fancy : Owl_types_common.index list -> arr -> arr
val set_fancy : Owl_types_common.index list -> arr -> arr -> unit
val copy : arr -> arr
val copy_ : out:arr -> arr -> unit
val reset : arr -> unit
val reshape : arr -> int array -> arr
val reverse : arr -> arr
val tile : arr -> int array -> arr
val repeat : arr -> int array -> arr
val concatenate : ?axis:int -> arr array -> arr
val stack : ?axis:int -> arr array -> arr
val split : ?axis:int -> int array -> arr -> arr array
val expand : ?hi:bool -> arr -> int -> arr
val squeeze : ?axis:int array -> arr -> arr
val draw : ?axis:int -> arr -> int -> arr * int array
val map : (elt -> elt) -> arr -> arr
val fold : ?axis:int -> (elt -> elt -> elt) -> elt -> arr -> arr
val scan : ?axis:int -> (elt -> elt -> elt) -> arr -> arr
val one_hot : int -> arr -> arr
val pad : ?v:elt -> int list list -> arr -> arr
val print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(elt -> string) -> - arr -> - unit
val abs : arr -> arr
val neg : arr -> arr
val floor : arr -> arr
val ceil : arr -> arr
val round : arr -> arr
val sqr : arr -> arr
val sqrt : arr -> arr
val log : arr -> arr
val log2 : arr -> arr
val log10 : arr -> arr
val exp : arr -> arr
val sin : arr -> arr
val cos : arr -> arr
val tan : arr -> arr
val sinh : arr -> arr
val cosh : arr -> arr
val tanh : arr -> arr
val asin : arr -> arr
val acos : arr -> arr
val atan : arr -> arr
val asinh : arr -> arr
val acosh : arr -> arr
val atanh : arr -> arr
val sum_reduce : ?axis:int array -> arr -> arr
val signum : arr -> arr
val sigmoid : arr -> arr
val relu : arr -> arr
val dawsn : arr -> arr
val min' : arr -> elt
val max' : arr -> elt
val sum' : arr -> elt
val log_sum_exp' : arr -> elt
val log_sum_exp : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm' : arr -> elt
val l2norm' : arr -> elt
val l2norm_sqr' : arr -> elt
val clip_by_value : ?amin:elt -> ?amax:elt -> arr -> arr
val clip_by_l2norm : elt -> arr -> arr
val pow : arr -> arr -> arr
val scalar_pow : elt -> arr -> arr
val pow_scalar : arr -> elt -> arr
val atan2 : arr -> arr -> arr
val scalar_atan2 : elt -> arr -> arr
val atan2_scalar : arr -> elt -> arr
val add : arr -> arr -> arr
val sub : arr -> arr -> arr
val mul : arr -> arr -> arr
val div : arr -> arr -> arr
val add_scalar : arr -> elt -> arr
val sub_scalar : arr -> elt -> arr
val mul_scalar : arr -> elt -> arr
val div_scalar : arr -> elt -> arr
val scalar_add : elt -> arr -> arr
val scalar_sub : elt -> arr -> arr
val scalar_mul : elt -> arr -> arr
val scalar_div : elt -> arr -> arr
val fma : arr -> arr -> arr -> arr
val conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val dilated_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val transpose_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val max_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val upsampling2d : arr -> int array -> arr
val conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val dilated_conv1d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv1d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val transpose_conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val max_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val upsampling2d_backward : arr -> int array -> arr -> arr
val row_num : arr -> int
val col_num : arr -> int
val row : arr -> int -> arr
val rows : arr -> int array -> arr
val copy_row_to : arr -> arr -> int -> unit
val copy_col_to : arr -> arr -> int -> unit
val dot : arr -> arr -> arr
val diag : ?k:int -> arr -> arr
val trace : arr -> elt
val transpose : ?axis:int array -> arr -> arr
val to_rows : arr -> arr array
val of_rows : arr array -> arr
val to_cols : arr -> arr array
val of_cols : arr array -> arr
val of_array : elt array -> int array -> arr
val of_arrays : elt array array -> arr
val float_to_elt : float -> elt
val elt_to_float : elt -> float
val elt_equal : arr -> arr -> arr
val elt_not_equal : arr -> arr -> arr
val elt_less : arr -> arr -> arr
val elt_greater : arr -> arr -> arr
val elt_less_equal : arr -> arr -> arr
val elt_greater_equal : arr -> arr -> arr
val elt_equal_scalar : arr -> elt -> arr
val elt_not_equal_scalar : arr -> elt -> arr
val elt_less_scalar : arr -> elt -> arr
val elt_greater_scalar : arr -> elt -> arr
val elt_less_equal_scalar : arr -> elt -> arr
val elt_greater_equal_scalar : arr -> elt -> arr
module Scalar : sig ... end
module Mat : sig ... end
module Linalg : sig ... end
val create_ : out:arr -> elt -> unit
val uniform_ : ?a:elt -> ?b:elt -> out:arr -> unit
val gaussian_ : ?mu:elt -> ?sigma:elt -> out:arr -> unit
val sequential_ : ?a:elt -> ?step:elt -> out:arr -> unit
val bernoulli_ : ?p:elt -> out:arr -> unit
val zeros_ : out:arr -> unit
val ones_ : out:arr -> unit
val one_hot_ : out:arr -> int -> arr -> unit
val get_slice_ : out:arr -> int list list -> arr -> unit
val set_slice_ : out:arr -> int list list -> arr -> arr -> unit
val sub_left : arr -> int -> int -> arr
val reshape_ : out:arr -> arr -> unit
val reverse_ : out:arr -> arr -> unit
val transpose_ : out:arr -> ?axis:int array -> arr -> unit
val repeat_ : out:arr -> arr -> int array -> unit
val tile_ : out:arr -> arr -> int array -> unit
val pad_ : out:arr -> ?v:elt -> int list list -> arr -> unit
val hypot : arr -> arr -> arr
val fmod : arr -> arr -> arr
val min2 : arr -> arr -> arr
val max2 : arr -> arr -> arr
val add_ : ?out:arr -> arr -> arr -> unit
val sub_ : ?out:arr -> arr -> arr -> unit
val mul_ : ?out:arr -> arr -> arr -> unit
val div_ : ?out:arr -> arr -> arr -> unit
val pow_ : ?out:arr -> arr -> arr -> unit
val atan2_ : ?out:arr -> arr -> arr -> unit
val hypot_ : ?out:arr -> arr -> arr -> unit
val fmod_ : ?out:arr -> arr -> arr -> unit
val min2_ : ?out:arr -> arr -> arr -> unit
val max2_ : ?out:arr -> arr -> arr -> unit
val add_scalar_ : ?out:arr -> arr -> elt -> unit
val sub_scalar_ : ?out:arr -> arr -> elt -> unit
val mul_scalar_ : ?out:arr -> arr -> elt -> unit
val div_scalar_ : ?out:arr -> arr -> elt -> unit
val pow_scalar_ : ?out:arr -> arr -> elt -> unit
val atan2_scalar_ : ?out:arr -> arr -> elt -> unit
val fmod_scalar_ : ?out:arr -> arr -> elt -> unit
val scalar_add_ : ?out:arr -> elt -> arr -> unit
val scalar_sub_ : ?out:arr -> elt -> arr -> unit
val scalar_mul_ : ?out:arr -> elt -> arr -> unit
val scalar_div_ : ?out:arr -> elt -> arr -> unit
val scalar_pow_ : ?out:arr -> elt -> arr -> unit
val scalar_atan2_ : ?out:arr -> elt -> arr -> unit
val scalar_fmod_ : ?out:arr -> elt -> arr -> unit
val fma_ : ?out:arr -> arr -> arr -> arr -> unit
val clip_by_value_ : ?out:arr -> ?amin:elt -> ?amax:elt -> arr -> unit
val clip_by_l2norm_ : ?out:arr -> elt -> arr -> unit
val dot_ : - ?transa:bool -> - ?transb:bool -> - ?alpha:elt -> - ?beta:elt -> - c:arr -> - arr -> - arr -> - unit
val abs_ : ?out:arr -> arr -> unit
val neg_ : ?out:arr -> arr -> unit
val conj_ : ?out:arr -> arr -> unit
val reci_ : ?out:arr -> arr -> unit
val signum_ : ?out:arr -> arr -> unit
val sqr_ : ?out:arr -> arr -> unit
val sqrt_ : ?out:arr -> arr -> unit
val cbrt_ : ?out:arr -> arr -> unit
val exp_ : ?out:arr -> arr -> unit
val exp2_ : ?out:arr -> arr -> unit
val exp10_ : ?out:arr -> arr -> unit
val expm1_ : ?out:arr -> arr -> unit
val log_ : ?out:arr -> arr -> unit
val log2_ : ?out:arr -> arr -> unit
val log10_ : ?out:arr -> arr -> unit
val log1p_ : ?out:arr -> arr -> unit
val sin_ : ?out:arr -> arr -> unit
val cos_ : ?out:arr -> arr -> unit
val tan_ : ?out:arr -> arr -> unit
val asin_ : ?out:arr -> arr -> unit
val acos_ : ?out:arr -> arr -> unit
val atan_ : ?out:arr -> arr -> unit
val sinh_ : ?out:arr -> arr -> unit
val cosh_ : ?out:arr -> arr -> unit
val tanh_ : ?out:arr -> arr -> unit
val asinh_ : ?out:arr -> arr -> unit
val acosh_ : ?out:arr -> arr -> unit
val atanh_ : ?out:arr -> arr -> unit
val floor_ : ?out:arr -> arr -> unit
val ceil_ : ?out:arr -> arr -> unit
val round_ : ?out:arr -> arr -> unit
val trunc_ : ?out:arr -> arr -> unit
val fix_ : ?out:arr -> arr -> unit
val erf_ : ?out:arr -> arr -> unit
val erfc_ : ?out:arr -> arr -> unit
val relu_ : ?out:arr -> arr -> unit
val softplus_ : ?out:arr -> arr -> unit
val softsign_ : ?out:arr -> arr -> unit
val softmax_ : ?out:arr -> ?axis:int -> arr -> unit
val sigmoid_ : ?out:arr -> arr -> unit
val sum_ : out:arr -> axis:int -> arr -> unit
val min_ : out:arr -> axis:int -> arr -> unit
val max_ : out:arr -> axis:int -> arr -> unit
val sum : ?axis:int -> ?keep_dims:bool -> arr -> arr
val prod : ?axis:int -> ?keep_dims:bool -> arr -> arr
val min : ?axis:int -> ?keep_dims:bool -> arr -> arr
val max : ?axis:int -> ?keep_dims:bool -> arr -> arr
val mean : ?axis:int -> ?keep_dims:bool -> arr -> arr
val var : ?axis:int -> ?keep_dims:bool -> arr -> arr
val std : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l2norm : ?axis:int -> ?keep_dims:bool -> arr -> arr
val cumsum_ : ?out:arr -> ?axis:int -> arr -> unit
val cumprod_ : ?out:arr -> ?axis:int -> arr -> unit
val cummin_ : ?out:arr -> ?axis:int -> arr -> unit
val cummax_ : ?out:arr -> ?axis:int -> arr -> unit
val dropout_ : ?out:arr -> ?rate:float -> arr -> unit
val prod' : arr -> elt
val mean' : arr -> elt
val var' : arr -> elt
val std' : arr -> elt
val elt_equal_ : ?out:arr -> arr -> arr -> unit
val elt_not_equal_ : ?out:arr -> arr -> arr -> unit
val elt_less_ : ?out:arr -> arr -> arr -> unit
val elt_greater_ : ?out:arr -> arr -> arr -> unit
val elt_less_equal_ : ?out:arr -> arr -> arr -> unit
val elt_greater_equal_ : ?out:arr -> arr -> arr -> unit
val elt_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_not_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_less_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_greater_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_less_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_greater_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val conv1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val conv2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val conv3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val dilated_conv1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val dilated_conv2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val dilated_conv3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val transpose_conv1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val transpose_conv2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val transpose_conv3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val max_pool1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val max_pool2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val max_pool3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val upsampling2d_ : out:arr -> arr -> int array -> unit
val conv1d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv1d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv2d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv2d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv3d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv3d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val dilated_conv1d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv1d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv2d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv2d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv3d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv3d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val transpose_conv1d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv1d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv2d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv2d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv3d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv3d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val max_pool1d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val max_pool2d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val max_pool3d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool1d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool2d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool3d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val upsampling2d_backward_ : out:arr -> arr -> int array -> arr -> unit
val fused_adagrad_ : ?out:arr -> rate:float -> eps:float -> arr -> unit
\ No newline at end of file diff --git a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Shape/Type/Device/index.html b/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Shape/Type/Device/index.html deleted file mode 100644 index 69a6ed27c..000000000 --- a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Shape/Type/Device/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Device (owl-base.Owl_computation_engine_sig.Flatten_Sig.Shape.Type.Device)

Module Type.Device

Type definition
type device

TODO

type value

TODO

Core functions
val make_device : unit -> device

TODO

val arr_to_value : A.arr -> value

TODO

val value_to_arr : value -> A.arr

TODO

val elt_to_value : A.elt -> value

TODO

val value_to_elt : value -> A.elt

TODO

val value_to_float : value -> float

TODO

val is_arr : value -> bool

TODO

val is_elt : value -> bool

TODO

\ No newline at end of file diff --git a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Shape/Type/index.html b/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Shape/Type/index.html deleted file mode 100644 index b26e035e2..000000000 --- a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Shape/Type/index.html +++ /dev/null @@ -1,5 +0,0 @@ - -Type (owl-base.Owl_computation_engine_sig.Flatten_Sig.Shape.Type)

Module Shape.Type

Type definition
type state =
  1. | Valid
  2. | Invalid
    (*

    TODO

    *)

TODO

and block = {
  1. size : int;
  2. block_id : int;
  3. mutable active : t option;
  4. mutable memory : Device.value;
  5. mutable nodes : t list;
}

block type keeps a reference to a block of memory and to the nodes sharing that block.

and attr = {
  1. mutable op : op;
  2. mutable freeze : bool;
  3. mutable reuse : bool;
  4. mutable state : state;
  5. mutable shape : int array option array;
  6. mutable value : Device.value array;
  7. mutable block : block array option;
}

TODO

and arr =
  1. | Arr of t
and elt =
  1. | Elt of t
and op =
  1. | Noop
  2. | Var
  3. | Const
  4. | Empty of int array
  5. | Zeros of int array
  6. | Ones of int array
  7. | Create of int array
  8. | Sequential of int array
  9. | Uniform of int array
  10. | Gaussian of int array
  11. | Bernoulli of int array
  12. | Init of int array * int -> elt
  13. | Get of int array
  14. | Set of int array
  15. | GetSlice of int list list
  16. | SetSlice of int list list
  17. | GetFancy of Owl_types_common.index list
  18. | SetFancy of Owl_types_common.index list
  19. | Copy
  20. | Reset
  21. | Reshape of int array
  22. | Reverse
  23. | Tile of int array
  24. | Repeat of int array
  25. | Pad of elt * int list list
  26. | Concatenate of int
  27. | Stack of int
  28. | Split of int * int array
  29. | Draw of int * int
  30. | Map of elt -> elt
  31. | Fold of int * elt -> elt -> elt
  32. | Scan of int * elt -> elt -> elt
  33. | OneHot of int
  34. | OfArray of int array
  35. | Delay of Device.A.arr -> Device.A.arr
  36. | DelayArray of int array * Device.A.arr array -> Device.A.arr
  37. | LazyPrint of int option - * int option - * bool option - * (Device.A.elt -> string) option
  38. | Abs
  39. | Neg
  40. | Floor
  41. | Ceil
  42. | Round
  43. | Sqr
  44. | Sqrt
  45. | Log
  46. | Log2
  47. | Log10
  48. | Exp
  49. | Sin
  50. | Cos
  51. | Tan
  52. | Sinh
  53. | Cosh
  54. | Tanh
  55. | Asin
  56. | Acos
  57. | Atan
  58. | Asinh
  59. | Acosh
  60. | Atanh
  61. | Min of bool * int
  62. | Max of bool * int
  63. | Sum of bool * int
  64. | SumReduce of int array
  65. | Signum
  66. | Sigmoid
  67. | Relu
  68. | Dawsn
  69. | Min'
  70. | Max'
  71. | Sum'
  72. | LogSumExp'
  73. | LogSumExp of bool * int
  74. | L1norm'
  75. | L2norm'
  76. | L2NormSqr'
  77. | ClipByValue
  78. | ClipByL2norm
  79. | Pow
  80. | ScalarPow
  81. | PowScalar
  82. | Atan2
  83. | ScalarAtan2
  84. | Atan2Scalar
  85. | Hypot
  86. | Min2
  87. | Max2
  88. | Add
  89. | Sub
  90. | Mul
  91. | Div
  92. | AddScalar
  93. | SubScalar
  94. | MulScalar
  95. | DivScalar
  96. | ScalarAdd
  97. | ScalarSub
  98. | ScalarMul
  99. | ScalarDiv
  100. | FMA
  101. | EltEqual
  102. | EltNotEqual
  103. | EltLess
  104. | EltGreater
  105. | EltLessEqual
  106. | EltGreaterEqual
  107. | EltEqualScalar
  108. | EltNotEqualScalar
  109. | EltLessScalar
  110. | EltGreaterScalar
  111. | EltLessEqualScalar
  112. | EltGreaterEqualScalar
  113. | Conv1d of Owl_types_common.padding * int array
  114. | Conv2d of Owl_types_common.padding * int array
  115. | Conv3d of Owl_types_common.padding * int array
  116. | TransposeConv1d of Owl_types_common.padding * int array
  117. | TransposeConv2d of Owl_types_common.padding * int array
  118. | TransposeConv3d of Owl_types_common.padding * int array
  119. | DilatedConv1d of Owl_types_common.padding * int array * int array
  120. | DilatedConv2d of Owl_types_common.padding * int array * int array
  121. | DilatedConv3d of Owl_types_common.padding * int array * int array
  122. | MaxPool1d of Owl_types_common.padding * int array * int array
  123. | MaxPool2d of Owl_types_common.padding * int array * int array
  124. | MaxPool3d of Owl_types_common.padding * int array * int array
  125. | AvgPool1d of Owl_types_common.padding * int array * int array
  126. | AvgPool2d of Owl_types_common.padding * int array * int array
  127. | AvgPool3d of Owl_types_common.padding * int array * int array
  128. | UpSampling2d of int array
  129. | Conv1dBackwardInput of int array
  130. | Conv1dBackwardKernel of int array
  131. | Conv2dBackwardInput of int array
  132. | Conv2dBackwardKernel of int array
  133. | Conv3dBackwardInput of int array
  134. | Conv3dBackwardKernel of int array
  135. | TransposeConv1dBackwardInput of int array
  136. | TransposeConv1dBackwardKernel of int array
  137. | TransposeConv2dBackwardInput of int array
  138. | TransposeConv2dBackwardKernel of int array
  139. | TransposeConv3dBackwardInput of int array
  140. | TransposeConv3dBackwardKernel of int array
  141. | DilatedConv1dBackwardInput of int array * int array
  142. | DilatedConv1dBackwardKernel of int array * int array
  143. | DilatedConv2dBackwardInput of int array * int array
  144. | DilatedConv2dBackwardKernel of int array * int array
  145. | DilatedConv3dBackwardInput of int array * int array
  146. | DilatedConv3dBackwardKernel of int array * int array
  147. | MaxPool1dBackward of Owl_types_common.padding * int array * int array
  148. | MaxPool2dBackward of Owl_types_common.padding * int array * int array
  149. | MaxPool3dBackward of Owl_types_common.padding * int array * int array
  150. | AvgPool1dBackward of Owl_types_common.padding * int array * int array
  151. | AvgPool2dBackward of Owl_types_common.padding * int array * int array
  152. | AvgPool3dBackward of Owl_types_common.padding * int array * int array
  153. | UpSampling2dBackward of int array
  154. | RowNum
  155. | ColNum
  156. | Row
  157. | Rows of int array
  158. | CopyRowTo
  159. | CopyColTo
  160. | Dot of bool * bool * elt * elt
  161. | Inv
  162. | Trace
  163. | Transpose of int array
  164. | ToRows
  165. | OfRows
  166. | Scalar_Add
  167. | Scalar_Sub
  168. | Scalar_Mul
  169. | Scalar_Div
  170. | Scalar_Pow
  171. | Scalar_Atan2
  172. | Scalar_Abs
  173. | Scalar_Neg
  174. | Scalar_Sqr
  175. | Scalar_Sqrt
  176. | Scalar_Exp
  177. | Scalar_Log
  178. | Scalar_Log2
  179. | Scalar_Log10
  180. | Scalar_Signum
  181. | Scalar_Floor
  182. | Scalar_Ceil
  183. | Scalar_Round
  184. | Scalar_Sin
  185. | Scalar_Cos
  186. | Scalar_Tan
  187. | Scalar_Sinh
  188. | Scalar_Cosh
  189. | Scalar_Tanh
  190. | Scalar_Asin
  191. | Scalar_Acos
  192. | Scalar_Atan
  193. | Scalar_Asinh
  194. | Scalar_Acosh
  195. | Scalar_Atanh
  196. | Scalar_Relu
  197. | Scalar_Dawsn
  198. | Scalar_Sigmoid
  199. | Fused_Adagrad of float * float
    (*

    TODO

    *)
\ No newline at end of file diff --git a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Shape/index.html b/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Shape/index.html deleted file mode 100644 index 08124402b..000000000 --- a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Shape/index.html +++ /dev/null @@ -1,5 +0,0 @@ - -Shape (owl-base.Owl_computation_engine_sig.Flatten_Sig.Shape)

Module Flatten_Sig.Shape

Core functions
val infer_shape : - Type.op -> - Type.attr Owl_graph.node array -> - int array option array

TODO

\ No newline at end of file diff --git a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Symbol/Shape/Type/Device/A/Linalg/index.html b/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Symbol/Shape/Type/Device/A/Linalg/index.html deleted file mode 100644 index 80bee6e74..000000000 --- a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Symbol/Shape/Type/Device/A/Linalg/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -Linalg (owl-base.Owl_computation_engine_sig.Flatten_Sig.Symbol.Shape.Type.Device.A.Linalg)

Module A.Linalg

val inv : arr -> arr
val logdet : arr -> elt
val chol : ?upper:bool -> arr -> arr
val svd : ?thin:bool -> arr -> arr * arr * arr
val qr : arr -> arr * arr
val lq : arr -> arr * arr
val sylvester : arr -> arr -> arr -> arr
val lyapunov : arr -> arr -> arr
val discrete_lyapunov : - ?solver:[ `default | `bilinear | `direct ] -> - arr -> - arr -> - arr
val linsolve : ?trans:bool -> ?typ:[ `n | `u | `l ] -> arr -> arr -> arr
val care : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
val dare : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
\ No newline at end of file diff --git a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Symbol/Shape/Type/Device/A/Mat/index.html b/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Symbol/Shape/Type/Device/A/Mat/index.html deleted file mode 100644 index 2cfc2db7a..000000000 --- a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Symbol/Shape/Type/Device/A/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl-base.Owl_computation_engine_sig.Flatten_Sig.Symbol.Shape.Type.Device.A.Mat)

Module A.Mat

val diagm : ?k:int -> arr -> arr
val triu : ?k:int -> arr -> arr
val tril : ?k:int -> arr -> arr
val eye : int -> arr
\ No newline at end of file diff --git a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Symbol/Shape/Type/Device/A/Scalar/index.html b/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Symbol/Shape/Type/Device/A/Scalar/index.html deleted file mode 100644 index 8ebcf6e14..000000000 --- a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Symbol/Shape/Type/Device/A/Scalar/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Scalar (owl-base.Owl_computation_engine_sig.Flatten_Sig.Symbol.Shape.Type.Device.A.Scalar)

Module A.Scalar

val add : elt -> elt -> elt
val sub : elt -> elt -> elt
val mul : elt -> elt -> elt
val div : elt -> elt -> elt
val pow : elt -> elt -> elt
val atan2 : elt -> elt -> elt
val abs : elt -> elt
val neg : elt -> elt
val sqr : elt -> elt
val sqrt : elt -> elt
val exp : elt -> elt
val log : elt -> elt
val log2 : elt -> elt
val log10 : elt -> elt
val signum : elt -> elt
val floor : elt -> elt
val ceil : elt -> elt
val round : elt -> elt
val sin : elt -> elt
val cos : elt -> elt
val tan : elt -> elt
val sinh : elt -> elt
val cosh : elt -> elt
val tanh : elt -> elt
val asin : elt -> elt
val acos : elt -> elt
val atan : elt -> elt
val asinh : elt -> elt
val acosh : elt -> elt
val atanh : elt -> elt
val relu : elt -> elt
val dawsn : elt -> elt
val sigmoid : elt -> elt
\ No newline at end of file diff --git a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Symbol/Shape/Type/Device/A/index.html b/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Symbol/Shape/Type/Device/A/index.html deleted file mode 100644 index 9fd7a1ec0..000000000 --- a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Symbol/Shape/Type/Device/A/index.html +++ /dev/null @@ -1,379 +0,0 @@ - -A (owl-base.Owl_computation_engine_sig.Flatten_Sig.Symbol.Shape.Type.Device.A)

Module Device.A

include Owl_types_ndarray_algodiff.Sig
include Owl_types_ndarray_eltcmp.Sig
include Owl_types_ndarray_basic.Sig
type arr
type elt
val empty : int array -> arr
val zeros : int array -> arr
val ones : int array -> arr
val create : int array -> elt -> arr
val sequential : ?a:elt -> ?step:elt -> int array -> arr
val uniform : ?a:elt -> ?b:elt -> int array -> arr
val gaussian : ?mu:elt -> ?sigma:elt -> int array -> arr
val bernoulli : ?p:elt -> int array -> arr
val init : int array -> (int -> elt) -> arr
val init_nd : int array -> (int array -> elt) -> arr
val shape : arr -> int array
val numel : arr -> int
val get : arr -> int array -> elt
val set : arr -> int array -> elt -> unit
val get_slice : int list list -> arr -> arr
val set_slice : int list list -> arr -> arr -> unit
val get_fancy : Owl_types_common.index list -> arr -> arr
val set_fancy : Owl_types_common.index list -> arr -> arr -> unit
val copy : arr -> arr
val copy_ : out:arr -> arr -> unit
val reset : arr -> unit
val reshape : arr -> int array -> arr
val reverse : arr -> arr
val tile : arr -> int array -> arr
val repeat : arr -> int array -> arr
val concatenate : ?axis:int -> arr array -> arr
val stack : ?axis:int -> arr array -> arr
val split : ?axis:int -> int array -> arr -> arr array
val expand : ?hi:bool -> arr -> int -> arr
val squeeze : ?axis:int array -> arr -> arr
val draw : ?axis:int -> arr -> int -> arr * int array
val map : (elt -> elt) -> arr -> arr
val fold : ?axis:int -> (elt -> elt -> elt) -> elt -> arr -> arr
val scan : ?axis:int -> (elt -> elt -> elt) -> arr -> arr
val one_hot : int -> arr -> arr
val pad : ?v:elt -> int list list -> arr -> arr
val print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(elt -> string) -> - arr -> - unit
val abs : arr -> arr
val neg : arr -> arr
val floor : arr -> arr
val ceil : arr -> arr
val round : arr -> arr
val sqr : arr -> arr
val sqrt : arr -> arr
val log : arr -> arr
val log2 : arr -> arr
val log10 : arr -> arr
val exp : arr -> arr
val sin : arr -> arr
val cos : arr -> arr
val tan : arr -> arr
val sinh : arr -> arr
val cosh : arr -> arr
val tanh : arr -> arr
val asin : arr -> arr
val acos : arr -> arr
val atan : arr -> arr
val asinh : arr -> arr
val acosh : arr -> arr
val atanh : arr -> arr
val sum_reduce : ?axis:int array -> arr -> arr
val signum : arr -> arr
val sigmoid : arr -> arr
val relu : arr -> arr
val dawsn : arr -> arr
val min' : arr -> elt
val max' : arr -> elt
val sum' : arr -> elt
val log_sum_exp' : arr -> elt
val log_sum_exp : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm' : arr -> elt
val l2norm' : arr -> elt
val l2norm_sqr' : arr -> elt
val clip_by_value : ?amin:elt -> ?amax:elt -> arr -> arr
val clip_by_l2norm : elt -> arr -> arr
val pow : arr -> arr -> arr
val scalar_pow : elt -> arr -> arr
val pow_scalar : arr -> elt -> arr
val atan2 : arr -> arr -> arr
val scalar_atan2 : elt -> arr -> arr
val atan2_scalar : arr -> elt -> arr
val add : arr -> arr -> arr
val sub : arr -> arr -> arr
val mul : arr -> arr -> arr
val div : arr -> arr -> arr
val add_scalar : arr -> elt -> arr
val sub_scalar : arr -> elt -> arr
val mul_scalar : arr -> elt -> arr
val div_scalar : arr -> elt -> arr
val scalar_add : elt -> arr -> arr
val scalar_sub : elt -> arr -> arr
val scalar_mul : elt -> arr -> arr
val scalar_div : elt -> arr -> arr
val fma : arr -> arr -> arr -> arr
val conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val dilated_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val transpose_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val max_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val upsampling2d : arr -> int array -> arr
val conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val dilated_conv1d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv1d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val transpose_conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val max_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val upsampling2d_backward : arr -> int array -> arr -> arr
val row_num : arr -> int
val col_num : arr -> int
val row : arr -> int -> arr
val rows : arr -> int array -> arr
val copy_row_to : arr -> arr -> int -> unit
val copy_col_to : arr -> arr -> int -> unit
val dot : arr -> arr -> arr
val diag : ?k:int -> arr -> arr
val trace : arr -> elt
val transpose : ?axis:int array -> arr -> arr
val to_rows : arr -> arr array
val of_rows : arr array -> arr
val to_cols : arr -> arr array
val of_cols : arr array -> arr
val of_array : elt array -> int array -> arr
val of_arrays : elt array array -> arr
val float_to_elt : float -> elt
val elt_to_float : elt -> float
val elt_equal : arr -> arr -> arr
val elt_not_equal : arr -> arr -> arr
val elt_less : arr -> arr -> arr
val elt_greater : arr -> arr -> arr
val elt_less_equal : arr -> arr -> arr
val elt_greater_equal : arr -> arr -> arr
val elt_equal_scalar : arr -> elt -> arr
val elt_not_equal_scalar : arr -> elt -> arr
val elt_less_scalar : arr -> elt -> arr
val elt_greater_scalar : arr -> elt -> arr
val elt_less_equal_scalar : arr -> elt -> arr
val elt_greater_equal_scalar : arr -> elt -> arr
module Scalar : sig ... end
module Mat : sig ... end
module Linalg : sig ... end
val create_ : out:arr -> elt -> unit
val uniform_ : ?a:elt -> ?b:elt -> out:arr -> unit
val gaussian_ : ?mu:elt -> ?sigma:elt -> out:arr -> unit
val sequential_ : ?a:elt -> ?step:elt -> out:arr -> unit
val bernoulli_ : ?p:elt -> out:arr -> unit
val zeros_ : out:arr -> unit
val ones_ : out:arr -> unit
val one_hot_ : out:arr -> int -> arr -> unit
val get_slice_ : out:arr -> int list list -> arr -> unit
val set_slice_ : out:arr -> int list list -> arr -> arr -> unit
val sub_left : arr -> int -> int -> arr
val reshape_ : out:arr -> arr -> unit
val reverse_ : out:arr -> arr -> unit
val transpose_ : out:arr -> ?axis:int array -> arr -> unit
val repeat_ : out:arr -> arr -> int array -> unit
val tile_ : out:arr -> arr -> int array -> unit
val pad_ : out:arr -> ?v:elt -> int list list -> arr -> unit
val hypot : arr -> arr -> arr
val fmod : arr -> arr -> arr
val min2 : arr -> arr -> arr
val max2 : arr -> arr -> arr
val add_ : ?out:arr -> arr -> arr -> unit
val sub_ : ?out:arr -> arr -> arr -> unit
val mul_ : ?out:arr -> arr -> arr -> unit
val div_ : ?out:arr -> arr -> arr -> unit
val pow_ : ?out:arr -> arr -> arr -> unit
val atan2_ : ?out:arr -> arr -> arr -> unit
val hypot_ : ?out:arr -> arr -> arr -> unit
val fmod_ : ?out:arr -> arr -> arr -> unit
val min2_ : ?out:arr -> arr -> arr -> unit
val max2_ : ?out:arr -> arr -> arr -> unit
val add_scalar_ : ?out:arr -> arr -> elt -> unit
val sub_scalar_ : ?out:arr -> arr -> elt -> unit
val mul_scalar_ : ?out:arr -> arr -> elt -> unit
val div_scalar_ : ?out:arr -> arr -> elt -> unit
val pow_scalar_ : ?out:arr -> arr -> elt -> unit
val atan2_scalar_ : ?out:arr -> arr -> elt -> unit
val fmod_scalar_ : ?out:arr -> arr -> elt -> unit
val scalar_add_ : ?out:arr -> elt -> arr -> unit
val scalar_sub_ : ?out:arr -> elt -> arr -> unit
val scalar_mul_ : ?out:arr -> elt -> arr -> unit
val scalar_div_ : ?out:arr -> elt -> arr -> unit
val scalar_pow_ : ?out:arr -> elt -> arr -> unit
val scalar_atan2_ : ?out:arr -> elt -> arr -> unit
val scalar_fmod_ : ?out:arr -> elt -> arr -> unit
val fma_ : ?out:arr -> arr -> arr -> arr -> unit
val clip_by_value_ : ?out:arr -> ?amin:elt -> ?amax:elt -> arr -> unit
val clip_by_l2norm_ : ?out:arr -> elt -> arr -> unit
val dot_ : - ?transa:bool -> - ?transb:bool -> - ?alpha:elt -> - ?beta:elt -> - c:arr -> - arr -> - arr -> - unit
val abs_ : ?out:arr -> arr -> unit
val neg_ : ?out:arr -> arr -> unit
val conj_ : ?out:arr -> arr -> unit
val reci_ : ?out:arr -> arr -> unit
val signum_ : ?out:arr -> arr -> unit
val sqr_ : ?out:arr -> arr -> unit
val sqrt_ : ?out:arr -> arr -> unit
val cbrt_ : ?out:arr -> arr -> unit
val exp_ : ?out:arr -> arr -> unit
val exp2_ : ?out:arr -> arr -> unit
val exp10_ : ?out:arr -> arr -> unit
val expm1_ : ?out:arr -> arr -> unit
val log_ : ?out:arr -> arr -> unit
val log2_ : ?out:arr -> arr -> unit
val log10_ : ?out:arr -> arr -> unit
val log1p_ : ?out:arr -> arr -> unit
val sin_ : ?out:arr -> arr -> unit
val cos_ : ?out:arr -> arr -> unit
val tan_ : ?out:arr -> arr -> unit
val asin_ : ?out:arr -> arr -> unit
val acos_ : ?out:arr -> arr -> unit
val atan_ : ?out:arr -> arr -> unit
val sinh_ : ?out:arr -> arr -> unit
val cosh_ : ?out:arr -> arr -> unit
val tanh_ : ?out:arr -> arr -> unit
val asinh_ : ?out:arr -> arr -> unit
val acosh_ : ?out:arr -> arr -> unit
val atanh_ : ?out:arr -> arr -> unit
val floor_ : ?out:arr -> arr -> unit
val ceil_ : ?out:arr -> arr -> unit
val round_ : ?out:arr -> arr -> unit
val trunc_ : ?out:arr -> arr -> unit
val fix_ : ?out:arr -> arr -> unit
val erf_ : ?out:arr -> arr -> unit
val erfc_ : ?out:arr -> arr -> unit
val relu_ : ?out:arr -> arr -> unit
val softplus_ : ?out:arr -> arr -> unit
val softsign_ : ?out:arr -> arr -> unit
val softmax_ : ?out:arr -> ?axis:int -> arr -> unit
val sigmoid_ : ?out:arr -> arr -> unit
val sum_ : out:arr -> axis:int -> arr -> unit
val min_ : out:arr -> axis:int -> arr -> unit
val max_ : out:arr -> axis:int -> arr -> unit
val sum : ?axis:int -> ?keep_dims:bool -> arr -> arr
val prod : ?axis:int -> ?keep_dims:bool -> arr -> arr
val min : ?axis:int -> ?keep_dims:bool -> arr -> arr
val max : ?axis:int -> ?keep_dims:bool -> arr -> arr
val mean : ?axis:int -> ?keep_dims:bool -> arr -> arr
val var : ?axis:int -> ?keep_dims:bool -> arr -> arr
val std : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l2norm : ?axis:int -> ?keep_dims:bool -> arr -> arr
val cumsum_ : ?out:arr -> ?axis:int -> arr -> unit
val cumprod_ : ?out:arr -> ?axis:int -> arr -> unit
val cummin_ : ?out:arr -> ?axis:int -> arr -> unit
val cummax_ : ?out:arr -> ?axis:int -> arr -> unit
val dropout_ : ?out:arr -> ?rate:float -> arr -> unit
val prod' : arr -> elt
val mean' : arr -> elt
val var' : arr -> elt
val std' : arr -> elt
val elt_equal_ : ?out:arr -> arr -> arr -> unit
val elt_not_equal_ : ?out:arr -> arr -> arr -> unit
val elt_less_ : ?out:arr -> arr -> arr -> unit
val elt_greater_ : ?out:arr -> arr -> arr -> unit
val elt_less_equal_ : ?out:arr -> arr -> arr -> unit
val elt_greater_equal_ : ?out:arr -> arr -> arr -> unit
val elt_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_not_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_less_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_greater_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_less_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_greater_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val conv1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val conv2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val conv3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val dilated_conv1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val dilated_conv2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val dilated_conv3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val transpose_conv1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val transpose_conv2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val transpose_conv3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val max_pool1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val max_pool2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val max_pool3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val upsampling2d_ : out:arr -> arr -> int array -> unit
val conv1d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv1d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv2d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv2d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv3d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv3d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val dilated_conv1d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv1d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv2d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv2d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv3d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv3d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val transpose_conv1d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv1d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv2d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv2d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv3d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv3d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val max_pool1d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val max_pool2d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val max_pool3d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool1d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool2d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool3d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val upsampling2d_backward_ : out:arr -> arr -> int array -> arr -> unit
val fused_adagrad_ : ?out:arr -> rate:float -> eps:float -> arr -> unit
\ No newline at end of file diff --git a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Symbol/Shape/Type/Device/index.html b/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Symbol/Shape/Type/Device/index.html deleted file mode 100644 index 91adaa7af..000000000 --- a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Symbol/Shape/Type/Device/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Device (owl-base.Owl_computation_engine_sig.Flatten_Sig.Symbol.Shape.Type.Device)

Module Type.Device

Type definition
type device

TODO

type value

TODO

Core functions
val make_device : unit -> device

TODO

val arr_to_value : A.arr -> value

TODO

val value_to_arr : value -> A.arr

TODO

val elt_to_value : A.elt -> value

TODO

val value_to_elt : value -> A.elt

TODO

val value_to_float : value -> float

TODO

val is_arr : value -> bool

TODO

val is_elt : value -> bool

TODO

\ No newline at end of file diff --git a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Symbol/Shape/Type/index.html b/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Symbol/Shape/Type/index.html deleted file mode 100644 index ee97d017a..000000000 --- a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Symbol/Shape/Type/index.html +++ /dev/null @@ -1,5 +0,0 @@ - -Type (owl-base.Owl_computation_engine_sig.Flatten_Sig.Symbol.Shape.Type)

Module Shape.Type

Type definition
type state =
  1. | Valid
  2. | Invalid
    (*

    TODO

    *)

TODO

and block = {
  1. size : int;
  2. block_id : int;
  3. mutable active : t option;
  4. mutable memory : Device.value;
  5. mutable nodes : t list;
}

block type keeps a reference to a block of memory and to the nodes sharing that block.

and attr = {
  1. mutable op : op;
  2. mutable freeze : bool;
  3. mutable reuse : bool;
  4. mutable state : state;
  5. mutable shape : int array option array;
  6. mutable value : Device.value array;
  7. mutable block : block array option;
}

TODO

and arr =
  1. | Arr of t
and elt =
  1. | Elt of t
and op =
  1. | Noop
  2. | Var
  3. | Const
  4. | Empty of int array
  5. | Zeros of int array
  6. | Ones of int array
  7. | Create of int array
  8. | Sequential of int array
  9. | Uniform of int array
  10. | Gaussian of int array
  11. | Bernoulli of int array
  12. | Init of int array * int -> elt
  13. | Get of int array
  14. | Set of int array
  15. | GetSlice of int list list
  16. | SetSlice of int list list
  17. | GetFancy of Owl_types_common.index list
  18. | SetFancy of Owl_types_common.index list
  19. | Copy
  20. | Reset
  21. | Reshape of int array
  22. | Reverse
  23. | Tile of int array
  24. | Repeat of int array
  25. | Pad of elt * int list list
  26. | Concatenate of int
  27. | Stack of int
  28. | Split of int * int array
  29. | Draw of int * int
  30. | Map of elt -> elt
  31. | Fold of int * elt -> elt -> elt
  32. | Scan of int * elt -> elt -> elt
  33. | OneHot of int
  34. | OfArray of int array
  35. | Delay of Device.A.arr -> Device.A.arr
  36. | DelayArray of int array * Device.A.arr array -> Device.A.arr
  37. | LazyPrint of int option - * int option - * bool option - * (Device.A.elt -> string) option
  38. | Abs
  39. | Neg
  40. | Floor
  41. | Ceil
  42. | Round
  43. | Sqr
  44. | Sqrt
  45. | Log
  46. | Log2
  47. | Log10
  48. | Exp
  49. | Sin
  50. | Cos
  51. | Tan
  52. | Sinh
  53. | Cosh
  54. | Tanh
  55. | Asin
  56. | Acos
  57. | Atan
  58. | Asinh
  59. | Acosh
  60. | Atanh
  61. | Min of bool * int
  62. | Max of bool * int
  63. | Sum of bool * int
  64. | SumReduce of int array
  65. | Signum
  66. | Sigmoid
  67. | Relu
  68. | Dawsn
  69. | Min'
  70. | Max'
  71. | Sum'
  72. | LogSumExp'
  73. | LogSumExp of bool * int
  74. | L1norm'
  75. | L2norm'
  76. | L2NormSqr'
  77. | ClipByValue
  78. | ClipByL2norm
  79. | Pow
  80. | ScalarPow
  81. | PowScalar
  82. | Atan2
  83. | ScalarAtan2
  84. | Atan2Scalar
  85. | Hypot
  86. | Min2
  87. | Max2
  88. | Add
  89. | Sub
  90. | Mul
  91. | Div
  92. | AddScalar
  93. | SubScalar
  94. | MulScalar
  95. | DivScalar
  96. | ScalarAdd
  97. | ScalarSub
  98. | ScalarMul
  99. | ScalarDiv
  100. | FMA
  101. | EltEqual
  102. | EltNotEqual
  103. | EltLess
  104. | EltGreater
  105. | EltLessEqual
  106. | EltGreaterEqual
  107. | EltEqualScalar
  108. | EltNotEqualScalar
  109. | EltLessScalar
  110. | EltGreaterScalar
  111. | EltLessEqualScalar
  112. | EltGreaterEqualScalar
  113. | Conv1d of Owl_types_common.padding * int array
  114. | Conv2d of Owl_types_common.padding * int array
  115. | Conv3d of Owl_types_common.padding * int array
  116. | TransposeConv1d of Owl_types_common.padding * int array
  117. | TransposeConv2d of Owl_types_common.padding * int array
  118. | TransposeConv3d of Owl_types_common.padding * int array
  119. | DilatedConv1d of Owl_types_common.padding * int array * int array
  120. | DilatedConv2d of Owl_types_common.padding * int array * int array
  121. | DilatedConv3d of Owl_types_common.padding * int array * int array
  122. | MaxPool1d of Owl_types_common.padding * int array * int array
  123. | MaxPool2d of Owl_types_common.padding * int array * int array
  124. | MaxPool3d of Owl_types_common.padding * int array * int array
  125. | AvgPool1d of Owl_types_common.padding * int array * int array
  126. | AvgPool2d of Owl_types_common.padding * int array * int array
  127. | AvgPool3d of Owl_types_common.padding * int array * int array
  128. | UpSampling2d of int array
  129. | Conv1dBackwardInput of int array
  130. | Conv1dBackwardKernel of int array
  131. | Conv2dBackwardInput of int array
  132. | Conv2dBackwardKernel of int array
  133. | Conv3dBackwardInput of int array
  134. | Conv3dBackwardKernel of int array
  135. | TransposeConv1dBackwardInput of int array
  136. | TransposeConv1dBackwardKernel of int array
  137. | TransposeConv2dBackwardInput of int array
  138. | TransposeConv2dBackwardKernel of int array
  139. | TransposeConv3dBackwardInput of int array
  140. | TransposeConv3dBackwardKernel of int array
  141. | DilatedConv1dBackwardInput of int array * int array
  142. | DilatedConv1dBackwardKernel of int array * int array
  143. | DilatedConv2dBackwardInput of int array * int array
  144. | DilatedConv2dBackwardKernel of int array * int array
  145. | DilatedConv3dBackwardInput of int array * int array
  146. | DilatedConv3dBackwardKernel of int array * int array
  147. | MaxPool1dBackward of Owl_types_common.padding * int array * int array
  148. | MaxPool2dBackward of Owl_types_common.padding * int array * int array
  149. | MaxPool3dBackward of Owl_types_common.padding * int array * int array
  150. | AvgPool1dBackward of Owl_types_common.padding * int array * int array
  151. | AvgPool2dBackward of Owl_types_common.padding * int array * int array
  152. | AvgPool3dBackward of Owl_types_common.padding * int array * int array
  153. | UpSampling2dBackward of int array
  154. | RowNum
  155. | ColNum
  156. | Row
  157. | Rows of int array
  158. | CopyRowTo
  159. | CopyColTo
  160. | Dot of bool * bool * elt * elt
  161. | Inv
  162. | Trace
  163. | Transpose of int array
  164. | ToRows
  165. | OfRows
  166. | Scalar_Add
  167. | Scalar_Sub
  168. | Scalar_Mul
  169. | Scalar_Div
  170. | Scalar_Pow
  171. | Scalar_Atan2
  172. | Scalar_Abs
  173. | Scalar_Neg
  174. | Scalar_Sqr
  175. | Scalar_Sqrt
  176. | Scalar_Exp
  177. | Scalar_Log
  178. | Scalar_Log2
  179. | Scalar_Log10
  180. | Scalar_Signum
  181. | Scalar_Floor
  182. | Scalar_Ceil
  183. | Scalar_Round
  184. | Scalar_Sin
  185. | Scalar_Cos
  186. | Scalar_Tan
  187. | Scalar_Sinh
  188. | Scalar_Cosh
  189. | Scalar_Tanh
  190. | Scalar_Asin
  191. | Scalar_Acos
  192. | Scalar_Atan
  193. | Scalar_Asinh
  194. | Scalar_Acosh
  195. | Scalar_Atanh
  196. | Scalar_Relu
  197. | Scalar_Dawsn
  198. | Scalar_Sigmoid
  199. | Fused_Adagrad of float * float
    (*

    TODO

    *)
\ No newline at end of file diff --git a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Symbol/Shape/index.html b/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Symbol/Shape/index.html deleted file mode 100644 index 2a7a20f3a..000000000 --- a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Symbol/Shape/index.html +++ /dev/null @@ -1,5 +0,0 @@ - -Shape (owl-base.Owl_computation_engine_sig.Flatten_Sig.Symbol.Shape)

Module Symbol.Shape

Core functions
val infer_shape : - Type.op -> - Type.attr Owl_graph.node array -> - int array option array

TODO

\ No newline at end of file diff --git a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Symbol/index.html b/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Symbol/index.html deleted file mode 100644 index b1f334d5c..000000000 --- a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Symbol/index.html +++ /dev/null @@ -1,28 +0,0 @@ - -Symbol (owl-base.Owl_computation_engine_sig.Flatten_Sig.Symbol)

Module Flatten_Sig.Symbol

Core functions
val op_to_str : Shape.Type.op -> string

TODO

val is_random_variable : Shape.Type.op -> bool

TODO

val refnum : 'a Owl_graph.node -> int

TODO

val node_shape : Shape.Type.attr Owl_graph.node -> int array

TODO

val node_numel : Shape.Type.attr Owl_graph.node -> int

TODO

val is_shape_unknown : Shape.Type.attr Owl_graph.node -> bool

TODO

val infer_shape_graph : Shape.Type.attr Owl_graph.node array -> unit

TODO

val shape_to_str : int array option array -> string

TODO

val node_to_str : Shape.Type.attr Owl_graph.node -> string

TODO

val node_to_arr : Shape.Type.t -> Shape.Type.arr

TODO

val arr_to_node : Shape.Type.arr -> Shape.Type.t

TODO

val node_to_elt : Shape.Type.t -> Shape.Type.elt

TODO

val elt_to_node : Shape.Type.elt -> Shape.Type.t

TODO

val make_node : - ?name:string -> - ?value:Shape.Type.Device.value array -> - ?shape:int array option array -> - ?freeze:bool -> - ?reuse:bool -> - ?state:Shape.Type.state -> - Shape.Type.op -> - Shape.Type.attr Owl_graph.node

TODO

val make_then_connect : - ?shape:int array option array -> - Shape.Type.op -> - Shape.Type.attr Owl_graph.node array -> - Shape.Type.attr Owl_graph.node

TODO

val var_arr : ?shape:int array -> string -> Shape.Type.arr

TODO

val var_elt : string -> Shape.Type.elt

TODO

val const_arr : string -> Shape.Type.Device.A.arr -> Shape.Type.arr

TODO

val const_elt : string -> Shape.Type.Device.A.elt -> Shape.Type.elt

TODO

val new_block_id : unit -> int

new_block_id () returns an unused block id.

val make_empty_block : ?block_id:int -> int -> Shape.Type.block

make_empty_block s returns an empty block of memory of size s.

val make_value_block : - Shape.Type.Device.value -> - Shape.Type.attr Owl_graph.node -> - unit

make_value_block value node creates a block of memory initialised with value and links the new block to node.

get_block node returns the memory block allocated to node. If no block is allocated, throws an exception.

val add_node_to_block : - Shape.Type.attr Owl_graph.node -> - Shape.Type.block -> - unit

Link a node to a reusable block and initialises its memory on the memory of the block.

val get_active_node : Shape.Type.block -> Shape.Type.attr Owl_graph.node option

Return the node that is currently using the memory of the block.

val set_active_node : - Shape.Type.block -> - Shape.Type.attr Owl_graph.node -> - unit

Update the node that is currently using the block of memory.

val get_block_id : Shape.Type.attr Owl_graph.node -> int

get_block_id node returns the id of the block assigned to node. If node has not been assigned yet, returns -1.

val set_value : - Shape.Type.attr Owl_graph.node -> - Shape.Type.Device.value array -> - unit

TODO

val set_operator : Shape.Type.attr Owl_graph.node -> Shape.Type.op -> unit

TODO

TODO

val set_reuse : Shape.Type.attr Owl_graph.node -> bool -> unit

TODO

val get_reuse : Shape.Type.attr Owl_graph.node -> bool

TODO

val is_shared : Shape.Type.attr Owl_graph.node -> bool

TODO

val get_shared_nodes : - Shape.Type.attr Owl_graph.node -> - Shape.Type.attr Owl_graph.node array

get_shared_nodes node returns the nodes sharing the same block of memory as node.

val is_var : Shape.Type.attr Owl_graph.node -> bool

TODO

val is_const : Shape.Type.attr Owl_graph.node -> bool

TODO

val is_node_arr : Shape.Type.attr Owl_graph.node -> bool

TODO

val is_node_elt : Shape.Type.attr Owl_graph.node -> bool

TODO

val is_assigned : Shape.Type.attr Owl_graph.node -> bool

is_assigned node checks if a block of memory has been assigned to node.

val check_assigned : Shape.Type.attr Owl_graph.node -> unit

check_assigned node throws an exception if node has not been assigned to a block.

val is_valid : Shape.Type.attr Owl_graph.node -> bool

TODO

val validate : Shape.Type.attr Owl_graph.node -> unit

TODO

val invalidate : Shape.Type.attr Owl_graph.node -> unit

TODO

val invalidate_graph : Shape.Type.attr Owl_graph.node -> unit

TODO

val is_freeze : Shape.Type.attr Owl_graph.node -> bool

TODO

val freeze : Shape.Type.attr Owl_graph.node -> unit

TODO

val freeze_descendants : Shape.Type.attr Owl_graph.node array -> unit

TODO

val freeze_ancestors : Shape.Type.attr Owl_graph.node array -> unit

TODO

TODO

TODO

val unsafe_assign_arr : Shape.Type.arr -> Shape.Type.Device.A.arr -> unit

TODO

val assign_arr : Shape.Type.arr -> Shape.Type.Device.A.arr -> unit

TODO

val assign_elt : Shape.Type.elt -> Shape.Type.Device.A.elt -> unit

TODO

val float_to_elt : float -> Shape.Type.elt

TODO

val elt_to_float : Shape.Type.elt -> float

TODO

\ No newline at end of file diff --git a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Type/Device/A/Linalg/index.html b/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Type/Device/A/Linalg/index.html deleted file mode 100644 index 64b46743f..000000000 --- a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Type/Device/A/Linalg/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -Linalg (owl-base.Owl_computation_engine_sig.Flatten_Sig.Type.Device.A.Linalg)

Module A.Linalg

val inv : arr -> arr
val logdet : arr -> elt
val chol : ?upper:bool -> arr -> arr
val svd : ?thin:bool -> arr -> arr * arr * arr
val qr : arr -> arr * arr
val lq : arr -> arr * arr
val sylvester : arr -> arr -> arr -> arr
val lyapunov : arr -> arr -> arr
val discrete_lyapunov : - ?solver:[ `default | `bilinear | `direct ] -> - arr -> - arr -> - arr
val linsolve : ?trans:bool -> ?typ:[ `n | `u | `l ] -> arr -> arr -> arr
val care : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
val dare : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
\ No newline at end of file diff --git a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Type/Device/A/Mat/index.html b/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Type/Device/A/Mat/index.html deleted file mode 100644 index 0f22eff58..000000000 --- a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Type/Device/A/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl-base.Owl_computation_engine_sig.Flatten_Sig.Type.Device.A.Mat)

Module A.Mat

val diagm : ?k:int -> arr -> arr
val triu : ?k:int -> arr -> arr
val tril : ?k:int -> arr -> arr
val eye : int -> arr
\ No newline at end of file diff --git a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Type/Device/A/Scalar/index.html b/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Type/Device/A/Scalar/index.html deleted file mode 100644 index dc2ddb3ea..000000000 --- a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Type/Device/A/Scalar/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Scalar (owl-base.Owl_computation_engine_sig.Flatten_Sig.Type.Device.A.Scalar)

Module A.Scalar

val add : elt -> elt -> elt
val sub : elt -> elt -> elt
val mul : elt -> elt -> elt
val div : elt -> elt -> elt
val pow : elt -> elt -> elt
val atan2 : elt -> elt -> elt
val abs : elt -> elt
val neg : elt -> elt
val sqr : elt -> elt
val sqrt : elt -> elt
val exp : elt -> elt
val log : elt -> elt
val log2 : elt -> elt
val log10 : elt -> elt
val signum : elt -> elt
val floor : elt -> elt
val ceil : elt -> elt
val round : elt -> elt
val sin : elt -> elt
val cos : elt -> elt
val tan : elt -> elt
val sinh : elt -> elt
val cosh : elt -> elt
val tanh : elt -> elt
val asin : elt -> elt
val acos : elt -> elt
val atan : elt -> elt
val asinh : elt -> elt
val acosh : elt -> elt
val atanh : elt -> elt
val relu : elt -> elt
val dawsn : elt -> elt
val sigmoid : elt -> elt
\ No newline at end of file diff --git a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Type/Device/A/index.html b/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Type/Device/A/index.html deleted file mode 100644 index e1b989ac5..000000000 --- a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Type/Device/A/index.html +++ /dev/null @@ -1,379 +0,0 @@ - -A (owl-base.Owl_computation_engine_sig.Flatten_Sig.Type.Device.A)

Module Device.A

include Owl_types_ndarray_algodiff.Sig
include Owl_types_ndarray_eltcmp.Sig
include Owl_types_ndarray_basic.Sig
type arr
type elt
val empty : int array -> arr
val zeros : int array -> arr
val ones : int array -> arr
val create : int array -> elt -> arr
val sequential : ?a:elt -> ?step:elt -> int array -> arr
val uniform : ?a:elt -> ?b:elt -> int array -> arr
val gaussian : ?mu:elt -> ?sigma:elt -> int array -> arr
val bernoulli : ?p:elt -> int array -> arr
val init : int array -> (int -> elt) -> arr
val init_nd : int array -> (int array -> elt) -> arr
val shape : arr -> int array
val numel : arr -> int
val get : arr -> int array -> elt
val set : arr -> int array -> elt -> unit
val get_slice : int list list -> arr -> arr
val set_slice : int list list -> arr -> arr -> unit
val get_fancy : Owl_types_common.index list -> arr -> arr
val set_fancy : Owl_types_common.index list -> arr -> arr -> unit
val copy : arr -> arr
val copy_ : out:arr -> arr -> unit
val reset : arr -> unit
val reshape : arr -> int array -> arr
val reverse : arr -> arr
val tile : arr -> int array -> arr
val repeat : arr -> int array -> arr
val concatenate : ?axis:int -> arr array -> arr
val stack : ?axis:int -> arr array -> arr
val split : ?axis:int -> int array -> arr -> arr array
val expand : ?hi:bool -> arr -> int -> arr
val squeeze : ?axis:int array -> arr -> arr
val draw : ?axis:int -> arr -> int -> arr * int array
val map : (elt -> elt) -> arr -> arr
val fold : ?axis:int -> (elt -> elt -> elt) -> elt -> arr -> arr
val scan : ?axis:int -> (elt -> elt -> elt) -> arr -> arr
val one_hot : int -> arr -> arr
val pad : ?v:elt -> int list list -> arr -> arr
val print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(elt -> string) -> - arr -> - unit
val abs : arr -> arr
val neg : arr -> arr
val floor : arr -> arr
val ceil : arr -> arr
val round : arr -> arr
val sqr : arr -> arr
val sqrt : arr -> arr
val log : arr -> arr
val log2 : arr -> arr
val log10 : arr -> arr
val exp : arr -> arr
val sin : arr -> arr
val cos : arr -> arr
val tan : arr -> arr
val sinh : arr -> arr
val cosh : arr -> arr
val tanh : arr -> arr
val asin : arr -> arr
val acos : arr -> arr
val atan : arr -> arr
val asinh : arr -> arr
val acosh : arr -> arr
val atanh : arr -> arr
val sum_reduce : ?axis:int array -> arr -> arr
val signum : arr -> arr
val sigmoid : arr -> arr
val relu : arr -> arr
val dawsn : arr -> arr
val min' : arr -> elt
val max' : arr -> elt
val sum' : arr -> elt
val log_sum_exp' : arr -> elt
val log_sum_exp : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm' : arr -> elt
val l2norm' : arr -> elt
val l2norm_sqr' : arr -> elt
val clip_by_value : ?amin:elt -> ?amax:elt -> arr -> arr
val clip_by_l2norm : elt -> arr -> arr
val pow : arr -> arr -> arr
val scalar_pow : elt -> arr -> arr
val pow_scalar : arr -> elt -> arr
val atan2 : arr -> arr -> arr
val scalar_atan2 : elt -> arr -> arr
val atan2_scalar : arr -> elt -> arr
val add : arr -> arr -> arr
val sub : arr -> arr -> arr
val mul : arr -> arr -> arr
val div : arr -> arr -> arr
val add_scalar : arr -> elt -> arr
val sub_scalar : arr -> elt -> arr
val mul_scalar : arr -> elt -> arr
val div_scalar : arr -> elt -> arr
val scalar_add : elt -> arr -> arr
val scalar_sub : elt -> arr -> arr
val scalar_mul : elt -> arr -> arr
val scalar_div : elt -> arr -> arr
val fma : arr -> arr -> arr -> arr
val conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val dilated_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val transpose_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val max_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val upsampling2d : arr -> int array -> arr
val conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val dilated_conv1d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv1d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val transpose_conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val max_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val upsampling2d_backward : arr -> int array -> arr -> arr
val row_num : arr -> int
val col_num : arr -> int
val row : arr -> int -> arr
val rows : arr -> int array -> arr
val copy_row_to : arr -> arr -> int -> unit
val copy_col_to : arr -> arr -> int -> unit
val dot : arr -> arr -> arr
val diag : ?k:int -> arr -> arr
val trace : arr -> elt
val transpose : ?axis:int array -> arr -> arr
val to_rows : arr -> arr array
val of_rows : arr array -> arr
val to_cols : arr -> arr array
val of_cols : arr array -> arr
val of_array : elt array -> int array -> arr
val of_arrays : elt array array -> arr
val float_to_elt : float -> elt
val elt_to_float : elt -> float
val elt_equal : arr -> arr -> arr
val elt_not_equal : arr -> arr -> arr
val elt_less : arr -> arr -> arr
val elt_greater : arr -> arr -> arr
val elt_less_equal : arr -> arr -> arr
val elt_greater_equal : arr -> arr -> arr
val elt_equal_scalar : arr -> elt -> arr
val elt_not_equal_scalar : arr -> elt -> arr
val elt_less_scalar : arr -> elt -> arr
val elt_greater_scalar : arr -> elt -> arr
val elt_less_equal_scalar : arr -> elt -> arr
val elt_greater_equal_scalar : arr -> elt -> arr
module Scalar : sig ... end
module Mat : sig ... end
module Linalg : sig ... end
val create_ : out:arr -> elt -> unit
val uniform_ : ?a:elt -> ?b:elt -> out:arr -> unit
val gaussian_ : ?mu:elt -> ?sigma:elt -> out:arr -> unit
val sequential_ : ?a:elt -> ?step:elt -> out:arr -> unit
val bernoulli_ : ?p:elt -> out:arr -> unit
val zeros_ : out:arr -> unit
val ones_ : out:arr -> unit
val one_hot_ : out:arr -> int -> arr -> unit
val get_slice_ : out:arr -> int list list -> arr -> unit
val set_slice_ : out:arr -> int list list -> arr -> arr -> unit
val sub_left : arr -> int -> int -> arr
val reshape_ : out:arr -> arr -> unit
val reverse_ : out:arr -> arr -> unit
val transpose_ : out:arr -> ?axis:int array -> arr -> unit
val repeat_ : out:arr -> arr -> int array -> unit
val tile_ : out:arr -> arr -> int array -> unit
val pad_ : out:arr -> ?v:elt -> int list list -> arr -> unit
val hypot : arr -> arr -> arr
val fmod : arr -> arr -> arr
val min2 : arr -> arr -> arr
val max2 : arr -> arr -> arr
val add_ : ?out:arr -> arr -> arr -> unit
val sub_ : ?out:arr -> arr -> arr -> unit
val mul_ : ?out:arr -> arr -> arr -> unit
val div_ : ?out:arr -> arr -> arr -> unit
val pow_ : ?out:arr -> arr -> arr -> unit
val atan2_ : ?out:arr -> arr -> arr -> unit
val hypot_ : ?out:arr -> arr -> arr -> unit
val fmod_ : ?out:arr -> arr -> arr -> unit
val min2_ : ?out:arr -> arr -> arr -> unit
val max2_ : ?out:arr -> arr -> arr -> unit
val add_scalar_ : ?out:arr -> arr -> elt -> unit
val sub_scalar_ : ?out:arr -> arr -> elt -> unit
val mul_scalar_ : ?out:arr -> arr -> elt -> unit
val div_scalar_ : ?out:arr -> arr -> elt -> unit
val pow_scalar_ : ?out:arr -> arr -> elt -> unit
val atan2_scalar_ : ?out:arr -> arr -> elt -> unit
val fmod_scalar_ : ?out:arr -> arr -> elt -> unit
val scalar_add_ : ?out:arr -> elt -> arr -> unit
val scalar_sub_ : ?out:arr -> elt -> arr -> unit
val scalar_mul_ : ?out:arr -> elt -> arr -> unit
val scalar_div_ : ?out:arr -> elt -> arr -> unit
val scalar_pow_ : ?out:arr -> elt -> arr -> unit
val scalar_atan2_ : ?out:arr -> elt -> arr -> unit
val scalar_fmod_ : ?out:arr -> elt -> arr -> unit
val fma_ : ?out:arr -> arr -> arr -> arr -> unit
val clip_by_value_ : ?out:arr -> ?amin:elt -> ?amax:elt -> arr -> unit
val clip_by_l2norm_ : ?out:arr -> elt -> arr -> unit
val dot_ : - ?transa:bool -> - ?transb:bool -> - ?alpha:elt -> - ?beta:elt -> - c:arr -> - arr -> - arr -> - unit
val abs_ : ?out:arr -> arr -> unit
val neg_ : ?out:arr -> arr -> unit
val conj_ : ?out:arr -> arr -> unit
val reci_ : ?out:arr -> arr -> unit
val signum_ : ?out:arr -> arr -> unit
val sqr_ : ?out:arr -> arr -> unit
val sqrt_ : ?out:arr -> arr -> unit
val cbrt_ : ?out:arr -> arr -> unit
val exp_ : ?out:arr -> arr -> unit
val exp2_ : ?out:arr -> arr -> unit
val exp10_ : ?out:arr -> arr -> unit
val expm1_ : ?out:arr -> arr -> unit
val log_ : ?out:arr -> arr -> unit
val log2_ : ?out:arr -> arr -> unit
val log10_ : ?out:arr -> arr -> unit
val log1p_ : ?out:arr -> arr -> unit
val sin_ : ?out:arr -> arr -> unit
val cos_ : ?out:arr -> arr -> unit
val tan_ : ?out:arr -> arr -> unit
val asin_ : ?out:arr -> arr -> unit
val acos_ : ?out:arr -> arr -> unit
val atan_ : ?out:arr -> arr -> unit
val sinh_ : ?out:arr -> arr -> unit
val cosh_ : ?out:arr -> arr -> unit
val tanh_ : ?out:arr -> arr -> unit
val asinh_ : ?out:arr -> arr -> unit
val acosh_ : ?out:arr -> arr -> unit
val atanh_ : ?out:arr -> arr -> unit
val floor_ : ?out:arr -> arr -> unit
val ceil_ : ?out:arr -> arr -> unit
val round_ : ?out:arr -> arr -> unit
val trunc_ : ?out:arr -> arr -> unit
val fix_ : ?out:arr -> arr -> unit
val erf_ : ?out:arr -> arr -> unit
val erfc_ : ?out:arr -> arr -> unit
val relu_ : ?out:arr -> arr -> unit
val softplus_ : ?out:arr -> arr -> unit
val softsign_ : ?out:arr -> arr -> unit
val softmax_ : ?out:arr -> ?axis:int -> arr -> unit
val sigmoid_ : ?out:arr -> arr -> unit
val sum_ : out:arr -> axis:int -> arr -> unit
val min_ : out:arr -> axis:int -> arr -> unit
val max_ : out:arr -> axis:int -> arr -> unit
val sum : ?axis:int -> ?keep_dims:bool -> arr -> arr
val prod : ?axis:int -> ?keep_dims:bool -> arr -> arr
val min : ?axis:int -> ?keep_dims:bool -> arr -> arr
val max : ?axis:int -> ?keep_dims:bool -> arr -> arr
val mean : ?axis:int -> ?keep_dims:bool -> arr -> arr
val var : ?axis:int -> ?keep_dims:bool -> arr -> arr
val std : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l2norm : ?axis:int -> ?keep_dims:bool -> arr -> arr
val cumsum_ : ?out:arr -> ?axis:int -> arr -> unit
val cumprod_ : ?out:arr -> ?axis:int -> arr -> unit
val cummin_ : ?out:arr -> ?axis:int -> arr -> unit
val cummax_ : ?out:arr -> ?axis:int -> arr -> unit
val dropout_ : ?out:arr -> ?rate:float -> arr -> unit
val prod' : arr -> elt
val mean' : arr -> elt
val var' : arr -> elt
val std' : arr -> elt
val elt_equal_ : ?out:arr -> arr -> arr -> unit
val elt_not_equal_ : ?out:arr -> arr -> arr -> unit
val elt_less_ : ?out:arr -> arr -> arr -> unit
val elt_greater_ : ?out:arr -> arr -> arr -> unit
val elt_less_equal_ : ?out:arr -> arr -> arr -> unit
val elt_greater_equal_ : ?out:arr -> arr -> arr -> unit
val elt_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_not_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_less_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_greater_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_less_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_greater_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val conv1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val conv2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val conv3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val dilated_conv1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val dilated_conv2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val dilated_conv3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val transpose_conv1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val transpose_conv2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val transpose_conv3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val max_pool1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val max_pool2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val max_pool3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val upsampling2d_ : out:arr -> arr -> int array -> unit
val conv1d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv1d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv2d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv2d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv3d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv3d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val dilated_conv1d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv1d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv2d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv2d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv3d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv3d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val transpose_conv1d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv1d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv2d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv2d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv3d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv3d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val max_pool1d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val max_pool2d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val max_pool3d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool1d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool2d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool3d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val upsampling2d_backward_ : out:arr -> arr -> int array -> arr -> unit
val fused_adagrad_ : ?out:arr -> rate:float -> eps:float -> arr -> unit
\ No newline at end of file diff --git a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Type/Device/index.html b/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Type/Device/index.html deleted file mode 100644 index 5d61ce154..000000000 --- a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Type/Device/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Device (owl-base.Owl_computation_engine_sig.Flatten_Sig.Type.Device)

Module Type.Device

Type definition
type device

TODO

type value

TODO

Core functions
val make_device : unit -> device

TODO

val arr_to_value : A.arr -> value

TODO

val value_to_arr : value -> A.arr

TODO

val elt_to_value : A.elt -> value

TODO

val value_to_elt : value -> A.elt

TODO

val value_to_float : value -> float

TODO

val is_arr : value -> bool

TODO

val is_elt : value -> bool

TODO

\ No newline at end of file diff --git a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Type/index.html b/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Type/index.html deleted file mode 100644 index f18196fd0..000000000 --- a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/Type/index.html +++ /dev/null @@ -1,5 +0,0 @@ - -Type (owl-base.Owl_computation_engine_sig.Flatten_Sig.Type)

Module Flatten_Sig.Type

Type definition
type state =
  1. | Valid
  2. | Invalid
    (*

    TODO

    *)

TODO

and block = {
  1. size : int;
  2. block_id : int;
  3. mutable active : t option;
  4. mutable memory : Device.value;
  5. mutable nodes : t list;
}

block type keeps a reference to a block of memory and to the nodes sharing that block.

and attr = {
  1. mutable op : op;
  2. mutable freeze : bool;
  3. mutable reuse : bool;
  4. mutable state : state;
  5. mutable shape : int array option array;
  6. mutable value : Device.value array;
  7. mutable block : block array option;
}

TODO

and arr =
  1. | Arr of t
and elt =
  1. | Elt of t
and op =
  1. | Noop
  2. | Var
  3. | Const
  4. | Empty of int array
  5. | Zeros of int array
  6. | Ones of int array
  7. | Create of int array
  8. | Sequential of int array
  9. | Uniform of int array
  10. | Gaussian of int array
  11. | Bernoulli of int array
  12. | Init of int array * int -> elt
  13. | Get of int array
  14. | Set of int array
  15. | GetSlice of int list list
  16. | SetSlice of int list list
  17. | GetFancy of Owl_types_common.index list
  18. | SetFancy of Owl_types_common.index list
  19. | Copy
  20. | Reset
  21. | Reshape of int array
  22. | Reverse
  23. | Tile of int array
  24. | Repeat of int array
  25. | Pad of elt * int list list
  26. | Concatenate of int
  27. | Stack of int
  28. | Split of int * int array
  29. | Draw of int * int
  30. | Map of elt -> elt
  31. | Fold of int * elt -> elt -> elt
  32. | Scan of int * elt -> elt -> elt
  33. | OneHot of int
  34. | OfArray of int array
  35. | Delay of Device.A.arr -> Device.A.arr
  36. | DelayArray of int array * Device.A.arr array -> Device.A.arr
  37. | LazyPrint of int option - * int option - * bool option - * (Device.A.elt -> string) option
  38. | Abs
  39. | Neg
  40. | Floor
  41. | Ceil
  42. | Round
  43. | Sqr
  44. | Sqrt
  45. | Log
  46. | Log2
  47. | Log10
  48. | Exp
  49. | Sin
  50. | Cos
  51. | Tan
  52. | Sinh
  53. | Cosh
  54. | Tanh
  55. | Asin
  56. | Acos
  57. | Atan
  58. | Asinh
  59. | Acosh
  60. | Atanh
  61. | Min of bool * int
  62. | Max of bool * int
  63. | Sum of bool * int
  64. | SumReduce of int array
  65. | Signum
  66. | Sigmoid
  67. | Relu
  68. | Dawsn
  69. | Min'
  70. | Max'
  71. | Sum'
  72. | LogSumExp'
  73. | LogSumExp of bool * int
  74. | L1norm'
  75. | L2norm'
  76. | L2NormSqr'
  77. | ClipByValue
  78. | ClipByL2norm
  79. | Pow
  80. | ScalarPow
  81. | PowScalar
  82. | Atan2
  83. | ScalarAtan2
  84. | Atan2Scalar
  85. | Hypot
  86. | Min2
  87. | Max2
  88. | Add
  89. | Sub
  90. | Mul
  91. | Div
  92. | AddScalar
  93. | SubScalar
  94. | MulScalar
  95. | DivScalar
  96. | ScalarAdd
  97. | ScalarSub
  98. | ScalarMul
  99. | ScalarDiv
  100. | FMA
  101. | EltEqual
  102. | EltNotEqual
  103. | EltLess
  104. | EltGreater
  105. | EltLessEqual
  106. | EltGreaterEqual
  107. | EltEqualScalar
  108. | EltNotEqualScalar
  109. | EltLessScalar
  110. | EltGreaterScalar
  111. | EltLessEqualScalar
  112. | EltGreaterEqualScalar
  113. | Conv1d of Owl_types_common.padding * int array
  114. | Conv2d of Owl_types_common.padding * int array
  115. | Conv3d of Owl_types_common.padding * int array
  116. | TransposeConv1d of Owl_types_common.padding * int array
  117. | TransposeConv2d of Owl_types_common.padding * int array
  118. | TransposeConv3d of Owl_types_common.padding * int array
  119. | DilatedConv1d of Owl_types_common.padding * int array * int array
  120. | DilatedConv2d of Owl_types_common.padding * int array * int array
  121. | DilatedConv3d of Owl_types_common.padding * int array * int array
  122. | MaxPool1d of Owl_types_common.padding * int array * int array
  123. | MaxPool2d of Owl_types_common.padding * int array * int array
  124. | MaxPool3d of Owl_types_common.padding * int array * int array
  125. | AvgPool1d of Owl_types_common.padding * int array * int array
  126. | AvgPool2d of Owl_types_common.padding * int array * int array
  127. | AvgPool3d of Owl_types_common.padding * int array * int array
  128. | UpSampling2d of int array
  129. | Conv1dBackwardInput of int array
  130. | Conv1dBackwardKernel of int array
  131. | Conv2dBackwardInput of int array
  132. | Conv2dBackwardKernel of int array
  133. | Conv3dBackwardInput of int array
  134. | Conv3dBackwardKernel of int array
  135. | TransposeConv1dBackwardInput of int array
  136. | TransposeConv1dBackwardKernel of int array
  137. | TransposeConv2dBackwardInput of int array
  138. | TransposeConv2dBackwardKernel of int array
  139. | TransposeConv3dBackwardInput of int array
  140. | TransposeConv3dBackwardKernel of int array
  141. | DilatedConv1dBackwardInput of int array * int array
  142. | DilatedConv1dBackwardKernel of int array * int array
  143. | DilatedConv2dBackwardInput of int array * int array
  144. | DilatedConv2dBackwardKernel of int array * int array
  145. | DilatedConv3dBackwardInput of int array * int array
  146. | DilatedConv3dBackwardKernel of int array * int array
  147. | MaxPool1dBackward of Owl_types_common.padding * int array * int array
  148. | MaxPool2dBackward of Owl_types_common.padding * int array * int array
  149. | MaxPool3dBackward of Owl_types_common.padding * int array * int array
  150. | AvgPool1dBackward of Owl_types_common.padding * int array * int array
  151. | AvgPool2dBackward of Owl_types_common.padding * int array * int array
  152. | AvgPool3dBackward of Owl_types_common.padding * int array * int array
  153. | UpSampling2dBackward of int array
  154. | RowNum
  155. | ColNum
  156. | Row
  157. | Rows of int array
  158. | CopyRowTo
  159. | CopyColTo
  160. | Dot of bool * bool * elt * elt
  161. | Inv
  162. | Trace
  163. | Transpose of int array
  164. | ToRows
  165. | OfRows
  166. | Scalar_Add
  167. | Scalar_Sub
  168. | Scalar_Mul
  169. | Scalar_Div
  170. | Scalar_Pow
  171. | Scalar_Atan2
  172. | Scalar_Abs
  173. | Scalar_Neg
  174. | Scalar_Sqr
  175. | Scalar_Sqrt
  176. | Scalar_Exp
  177. | Scalar_Log
  178. | Scalar_Log2
  179. | Scalar_Log10
  180. | Scalar_Signum
  181. | Scalar_Floor
  182. | Scalar_Ceil
  183. | Scalar_Round
  184. | Scalar_Sin
  185. | Scalar_Cos
  186. | Scalar_Tan
  187. | Scalar_Sinh
  188. | Scalar_Cosh
  189. | Scalar_Tanh
  190. | Scalar_Asin
  191. | Scalar_Acos
  192. | Scalar_Atan
  193. | Scalar_Asinh
  194. | Scalar_Acosh
  195. | Scalar_Atanh
  196. | Scalar_Relu
  197. | Scalar_Dawsn
  198. | Scalar_Sigmoid
  199. | Fused_Adagrad of float * float
    (*

    TODO

    *)
\ No newline at end of file diff --git a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/index.html b/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/index.html deleted file mode 100644 index 110ae4755..000000000 --- a/owl-base/Owl_computation_engine_sig/module-type-Flatten_Sig/index.html +++ /dev/null @@ -1,485 +0,0 @@ - -Flatten_Sig (owl-base.Owl_computation_engine_sig.Flatten_Sig)

Module type Owl_computation_engine_sig.Flatten_Sig

include Owl_types_computation_engine.Sig
Core evaluation functions of the engine

TODO

TODO

val eval_graph : Graph.graph -> unit

TODO

include Owl_computation_graph_sig.Sig
Type definition
type graph

TODO

Core functions
val shape_or_value : Optimiser.Operator.Symbol.Shape.Type.t -> string

TODO

val graph_to_dot : graph -> string

TODO

val graph_to_trace : graph -> string

TODO

val save_graph : 'a -> string -> unit

TODO

val load_graph : string -> 'a * 'b

TODO

val invalidate_rvs : graph -> unit

TODO

val is_iopair_safe : 'a Owl_graph.node -> 'a Owl_graph.node -> bool

TODO

val update_iopair : graph -> unit

TODO

val remove_unused_iopair : - 'a Owl_graph.node array -> - 'b array -> - 'a Owl_graph.node array * 'b array

TODO

val optimise : graph -> unit

TODO

include Owl_computation_optimiser_sig.Sig
Core functions
val estimate_complexity : 'a Owl_graph.node array -> int * int

TODO

val optimise_nodes : - Operator.Symbol.Shape.Type.attr Owl_graph.node array -> - unit

TODO

include Owl_computation_operator_sig.Sig
Vectorised functions
val empty : int array -> Symbol.Shape.Type.arr

TODO

val zeros : int array -> Symbol.Shape.Type.arr

TODO

val ones : int array -> Symbol.Shape.Type.arr

TODO

val create : int array -> Symbol.Shape.Type.elt -> Symbol.Shape.Type.arr

TODO

val sequential : - ?a:Symbol.Shape.Type.elt -> - ?step:Symbol.Shape.Type.elt -> - int array -> - Symbol.Shape.Type.arr

TODO

val uniform : - ?a:Symbol.Shape.Type.elt -> - ?b:Symbol.Shape.Type.elt -> - int array -> - Symbol.Shape.Type.arr

TODO

val gaussian : - ?mu:Symbol.Shape.Type.elt -> - ?sigma:Symbol.Shape.Type.elt -> - int array -> - Symbol.Shape.Type.arr

TODO

val bernoulli : ?p:Symbol.Shape.Type.elt -> int array -> Symbol.Shape.Type.arr

TODO

val init : int array -> (int -> Symbol.Shape.Type.elt) -> Symbol.Shape.Type.arr

TODO

val init_nd : - int array -> - (int array -> Symbol.Shape.Type.elt) -> - Symbol.Shape.Type.arr

TODO

val shape : Symbol.Shape.Type.arr -> int array

TODO

val numel : Symbol.Shape.Type.arr -> int

TODO

TODO

val set : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.elt -> unit

TODO

val get_slice : int list list -> Symbol.Shape.Type.arr -> Symbol.Shape.Type.arr

TODO

val set_slice : - int list list -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - unit

TODO

TODO

val set_fancy : - Owl_types.index list -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - unit

TODO

val copy_ : out:'a -> 'b -> 'c

TODO

val reset : Symbol.Shape.Type.arr -> unit

TODO

val reshape : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.arr

TODO

val tile : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.arr

TODO

val repeat : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.arr

TODO

val pad : - ?v:Symbol.Shape.Type.elt -> - int list list -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val expand : ?hi:bool -> Symbol.Shape.Type.arr -> int -> Symbol.Shape.Type.arr

TODO

val squeeze : ?axis:int array -> Symbol.Shape.Type.arr -> Symbol.Shape.Type.arr

TODO

val concatenate : - ?axis:int -> - Symbol.Shape.Type.arr array -> - Symbol.Shape.Type.arr

TODO

val stack : ?axis:int -> Symbol.Shape.Type.arr array -> Symbol.Shape.Type.arr

TODO

TODO

val split : ?axis:int -> 'a -> 'b -> 'c

TODO

val draw : - ?axis:int -> - Symbol.Shape.Type.arr -> - int -> - Symbol.Shape.Type.arr * 'a array

TODO

TODO

delay f x returns f x. It allows to use a function that is not tracked by the computation graph and delay its evaluation. The output must have the same shape as the input.

delay_array out_shape f x works in the same way as delay but is applied on an array of ndarrays. Needs the shape of the output as an argument.

val lazy_print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(Symbol.Shape.Type.Device.A.elt -> string) -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

lazy_print x prints the output of x when it is evaluated. Is implemented as an identity node. For information about the optional parameters, refer to the print function of the Ndarray module.

val print : ?max_row:'a -> ?max_col:'b -> ?header:'c -> ?fmt:'d -> 'e -> unit

TODO

val min : - ?axis:int -> - ?keep_dims:bool -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val max : - ?axis:int -> - ?keep_dims:bool -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val sum : - ?axis:int -> - ?keep_dims:bool -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val sum_reduce : - ?axis:int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val log_sum_exp : - ?axis:int -> - ?keep_dims:bool -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

TODO

TODO

TODO

TODO

val elt_less_equal_scalar : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.elt -> - Symbol.Shape.Type.arr

TODO

val elt_greater_equal_scalar : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.elt -> - Symbol.Shape.Type.arr

TODO

val conv1d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr

TODO

val conv2d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr

TODO

val conv3d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr

TODO

val transpose_conv1d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr

TODO

val transpose_conv2d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr

TODO

val transpose_conv3d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr

TODO

val dilated_conv1d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val dilated_conv2d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val dilated_conv3d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val max_pool1d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val max_pool2d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val max_pool3d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val avg_pool1d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val avg_pool2d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val avg_pool3d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val upsampling2d : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.arr

TODO

val conv1d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val conv1d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val conv2d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val conv2d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val conv3d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val conv3d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val transpose_conv1d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val transpose_conv1d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val transpose_conv2d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val transpose_conv2d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val transpose_conv3d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val transpose_conv3d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val dilated_conv1d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val dilated_conv1d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val dilated_conv2d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val dilated_conv2d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val dilated_conv3d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val dilated_conv3d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val max_pool1d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val max_pool2d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val max_pool3d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val avg_pool1d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val avg_pool2d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val avg_pool3d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val upsampling2d_backward : - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val row_num : Symbol.Shape.Type.arr -> int

TODO

val col_num : Symbol.Shape.Type.arr -> int

TODO

val rows : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.arr

TODO

val copy_row_to : Symbol.Shape.Type.arr -> 'a -> 'b -> unit

TODO

val copy_col_to : Symbol.Shape.Type.arr -> 'a -> 'b -> unit

TODO

TODO

val transpose : - ?axis:int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val to_rows : Symbol.Shape.Type.arr -> 'a array

TODO

TODO

val to_cols : Symbol.Shape.Type.arr -> 'a array

TODO

TODO

val of_array : - Symbol.Shape.Type.elt array -> - int array -> - Symbol.Shape.Type.arr

TODO

val of_arrays : Symbol.Shape.Type.elt array array -> Symbol.Shape.Type.arr

TODO

val to_arrays : Symbol.Shape.Type.arr -> Symbol.Shape.Type.elt array array

TODO

Scalar functions
module Scalar : sig ... end
module Mat : sig ... end
module Linalg : sig ... end
include Owl_computation_symbol_sig.Sig
Core functions
val op_to_str : Shape.Type.op -> string

TODO

val is_random_variable : Shape.Type.op -> bool

TODO

val refnum : 'a Owl_graph.node -> int

TODO

val node_shape : Shape.Type.attr Owl_graph.node -> int array

TODO

val node_numel : Shape.Type.attr Owl_graph.node -> int

TODO

val is_shape_unknown : Shape.Type.attr Owl_graph.node -> bool

TODO

val infer_shape_graph : Shape.Type.attr Owl_graph.node array -> unit

TODO

val shape_to_str : int array option array -> string

TODO

val node_to_str : Shape.Type.attr Owl_graph.node -> string

TODO

val node_to_arr : Shape.Type.t -> Shape.Type.arr

TODO

val arr_to_node : Shape.Type.arr -> Shape.Type.t

TODO

val node_to_elt : Shape.Type.t -> Shape.Type.elt

TODO

val elt_to_node : Shape.Type.elt -> Shape.Type.t

TODO

val make_node : - ?name:string -> - ?value:Shape.Type.Device.value array -> - ?shape:int array option array -> - ?freeze:bool -> - ?reuse:bool -> - ?state:Shape.Type.state -> - Shape.Type.op -> - Shape.Type.attr Owl_graph.node

TODO

val make_then_connect : - ?shape:int array option array -> - Shape.Type.op -> - Shape.Type.attr Owl_graph.node array -> - Shape.Type.attr Owl_graph.node

TODO

val var_arr : ?shape:int array -> string -> Shape.Type.arr

TODO

val var_elt : string -> Shape.Type.elt

TODO

val const_arr : string -> Shape.Type.Device.A.arr -> Shape.Type.arr

TODO

val const_elt : string -> Shape.Type.Device.A.elt -> Shape.Type.elt

TODO

val new_block_id : unit -> int

new_block_id () returns an unused block id.

val make_empty_block : ?block_id:int -> int -> Shape.Type.block

make_empty_block s returns an empty block of memory of size s.

val make_value_block : - Shape.Type.Device.value -> - Shape.Type.attr Owl_graph.node -> - unit

make_value_block value node creates a block of memory initialised with value and links the new block to node.

get_block node returns the memory block allocated to node. If no block is allocated, throws an exception.

val add_node_to_block : - Shape.Type.attr Owl_graph.node -> - Shape.Type.block -> - unit

Link a node to a reusable block and initialises its memory on the memory of the block.

val get_active_node : Shape.Type.block -> Shape.Type.attr Owl_graph.node option

Return the node that is currently using the memory of the block.

val set_active_node : - Shape.Type.block -> - Shape.Type.attr Owl_graph.node -> - unit

Update the node that is currently using the block of memory.

val get_block_id : Shape.Type.attr Owl_graph.node -> int

get_block_id node returns the id of the block assigned to node. If node has not been assigned yet, returns -1.

val set_value : - Shape.Type.attr Owl_graph.node -> - Shape.Type.Device.value array -> - unit

TODO

val set_operator : Shape.Type.attr Owl_graph.node -> Shape.Type.op -> unit

TODO

TODO

val set_reuse : Shape.Type.attr Owl_graph.node -> bool -> unit

TODO

val get_reuse : Shape.Type.attr Owl_graph.node -> bool

TODO

val is_shared : Shape.Type.attr Owl_graph.node -> bool

TODO

val get_shared_nodes : - Shape.Type.attr Owl_graph.node -> - Shape.Type.attr Owl_graph.node array

get_shared_nodes node returns the nodes sharing the same block of memory as node.

val is_var : Shape.Type.attr Owl_graph.node -> bool

TODO

val is_const : Shape.Type.attr Owl_graph.node -> bool

TODO

val is_node_arr : Shape.Type.attr Owl_graph.node -> bool

TODO

val is_node_elt : Shape.Type.attr Owl_graph.node -> bool

TODO

val is_assigned : Shape.Type.attr Owl_graph.node -> bool

is_assigned node checks if a block of memory has been assigned to node.

val check_assigned : Shape.Type.attr Owl_graph.node -> unit

check_assigned node throws an exception if node has not been assigned to a block.

val is_valid : Shape.Type.attr Owl_graph.node -> bool

TODO

val validate : Shape.Type.attr Owl_graph.node -> unit

TODO

val invalidate : Shape.Type.attr Owl_graph.node -> unit

TODO

val invalidate_graph : Shape.Type.attr Owl_graph.node -> unit

TODO

val is_freeze : Shape.Type.attr Owl_graph.node -> bool

TODO

val freeze : Shape.Type.attr Owl_graph.node -> unit

TODO

val freeze_descendants : Shape.Type.attr Owl_graph.node array -> unit

TODO

val freeze_ancestors : Shape.Type.attr Owl_graph.node array -> unit

TODO

TODO

TODO

val unsafe_assign_arr : Shape.Type.arr -> Shape.Type.Device.A.arr -> unit

TODO

val assign_arr : Shape.Type.arr -> Shape.Type.Device.A.arr -> unit

TODO

val assign_elt : Shape.Type.elt -> Shape.Type.Device.A.elt -> unit

TODO

val float_to_elt : float -> Shape.Type.elt

TODO

val elt_to_float : Shape.Type.elt -> float

TODO

include Owl_computation_shape_sig.Sig
Core functions
val infer_shape : - Type.op -> - Type.attr Owl_graph.node array -> - int array option array

TODO

include Owl_computation_type_sig.Sig
Type definition
type state =
  1. | Valid
  2. | Invalid
    (*

    TODO

    *)

TODO

and block = {
  1. size : int;
  2. block_id : int;
  3. mutable active : t option;
  4. mutable memory : Device.value;
  5. mutable nodes : t list;
}

block type keeps a reference to a block of memory and to the nodes sharing that block.

and attr = {
  1. mutable op : op;
  2. mutable freeze : bool;
  3. mutable reuse : bool;
  4. mutable state : state;
  5. mutable shape : int array option array;
  6. mutable value : Device.value array;
  7. mutable block : block array option;
}

TODO

and arr =
  1. | Arr of t
and elt =
  1. | Elt of t
and op =
  1. | Noop
  2. | Var
  3. | Const
  4. | Empty of int array
  5. | Zeros of int array
  6. | Ones of int array
  7. | Create of int array
  8. | Sequential of int array
  9. | Uniform of int array
  10. | Gaussian of int array
  11. | Bernoulli of int array
  12. | Init of int array * int -> elt
  13. | Get of int array
  14. | Set of int array
  15. | GetSlice of int list list
  16. | SetSlice of int list list
  17. | GetFancy of Owl_types_common.index list
  18. | SetFancy of Owl_types_common.index list
  19. | Copy
  20. | Reset
  21. | Reshape of int array
  22. | Reverse
  23. | Tile of int array
  24. | Repeat of int array
  25. | Pad of elt * int list list
  26. | Concatenate of int
  27. | Stack of int
  28. | Split of int * int array
  29. | Draw of int * int
  30. | Map of elt -> elt
  31. | Fold of int * elt -> elt -> elt
  32. | Scan of int * elt -> elt -> elt
  33. | OneHot of int
  34. | OfArray of int array
  35. | Delay of Device.A.arr -> Device.A.arr
  36. | DelayArray of int array * Device.A.arr array -> Device.A.arr
  37. | LazyPrint of int option - * int option - * bool option - * (Device.A.elt -> string) option
  38. | Abs
  39. | Neg
  40. | Floor
  41. | Ceil
  42. | Round
  43. | Sqr
  44. | Sqrt
  45. | Log
  46. | Log2
  47. | Log10
  48. | Exp
  49. | Sin
  50. | Cos
  51. | Tan
  52. | Sinh
  53. | Cosh
  54. | Tanh
  55. | Asin
  56. | Acos
  57. | Atan
  58. | Asinh
  59. | Acosh
  60. | Atanh
  61. | Min of bool * int
  62. | Max of bool * int
  63. | Sum of bool * int
  64. | SumReduce of int array
  65. | Signum
  66. | Sigmoid
  67. | Relu
  68. | Dawsn
  69. | Min'
  70. | Max'
  71. | Sum'
  72. | LogSumExp'
  73. | LogSumExp of bool * int
  74. | L1norm'
  75. | L2norm'
  76. | L2NormSqr'
  77. | ClipByValue
  78. | ClipByL2norm
  79. | Pow
  80. | ScalarPow
  81. | PowScalar
  82. | Atan2
  83. | ScalarAtan2
  84. | Atan2Scalar
  85. | Hypot
  86. | Min2
  87. | Max2
  88. | Add
  89. | Sub
  90. | Mul
  91. | Div
  92. | AddScalar
  93. | SubScalar
  94. | MulScalar
  95. | DivScalar
  96. | ScalarAdd
  97. | ScalarSub
  98. | ScalarMul
  99. | ScalarDiv
  100. | FMA
  101. | EltEqual
  102. | EltNotEqual
  103. | EltLess
  104. | EltGreater
  105. | EltLessEqual
  106. | EltGreaterEqual
  107. | EltEqualScalar
  108. | EltNotEqualScalar
  109. | EltLessScalar
  110. | EltGreaterScalar
  111. | EltLessEqualScalar
  112. | EltGreaterEqualScalar
  113. | Conv1d of Owl_types_common.padding * int array
  114. | Conv2d of Owl_types_common.padding * int array
  115. | Conv3d of Owl_types_common.padding * int array
  116. | TransposeConv1d of Owl_types_common.padding * int array
  117. | TransposeConv2d of Owl_types_common.padding * int array
  118. | TransposeConv3d of Owl_types_common.padding * int array
  119. | DilatedConv1d of Owl_types_common.padding * int array * int array
  120. | DilatedConv2d of Owl_types_common.padding * int array * int array
  121. | DilatedConv3d of Owl_types_common.padding * int array * int array
  122. | MaxPool1d of Owl_types_common.padding * int array * int array
  123. | MaxPool2d of Owl_types_common.padding * int array * int array
  124. | MaxPool3d of Owl_types_common.padding * int array * int array
  125. | AvgPool1d of Owl_types_common.padding * int array * int array
  126. | AvgPool2d of Owl_types_common.padding * int array * int array
  127. | AvgPool3d of Owl_types_common.padding * int array * int array
  128. | UpSampling2d of int array
  129. | Conv1dBackwardInput of int array
  130. | Conv1dBackwardKernel of int array
  131. | Conv2dBackwardInput of int array
  132. | Conv2dBackwardKernel of int array
  133. | Conv3dBackwardInput of int array
  134. | Conv3dBackwardKernel of int array
  135. | TransposeConv1dBackwardInput of int array
  136. | TransposeConv1dBackwardKernel of int array
  137. | TransposeConv2dBackwardInput of int array
  138. | TransposeConv2dBackwardKernel of int array
  139. | TransposeConv3dBackwardInput of int array
  140. | TransposeConv3dBackwardKernel of int array
  141. | DilatedConv1dBackwardInput of int array * int array
  142. | DilatedConv1dBackwardKernel of int array * int array
  143. | DilatedConv2dBackwardInput of int array * int array
  144. | DilatedConv2dBackwardKernel of int array * int array
  145. | DilatedConv3dBackwardInput of int array * int array
  146. | DilatedConv3dBackwardKernel of int array * int array
  147. | MaxPool1dBackward of Owl_types_common.padding * int array * int array
  148. | MaxPool2dBackward of Owl_types_common.padding * int array * int array
  149. | MaxPool3dBackward of Owl_types_common.padding * int array * int array
  150. | AvgPool1dBackward of Owl_types_common.padding * int array * int array
  151. | AvgPool2dBackward of Owl_types_common.padding * int array * int array
  152. | AvgPool3dBackward of Owl_types_common.padding * int array * int array
  153. | UpSampling2dBackward of int array
  154. | RowNum
  155. | ColNum
  156. | Row
  157. | Rows of int array
  158. | CopyRowTo
  159. | CopyColTo
  160. | Dot of bool * bool * elt * elt
  161. | Inv
  162. | Trace
  163. | Transpose of int array
  164. | ToRows
  165. | OfRows
  166. | Scalar_Add
  167. | Scalar_Sub
  168. | Scalar_Mul
  169. | Scalar_Div
  170. | Scalar_Pow
  171. | Scalar_Atan2
  172. | Scalar_Abs
  173. | Scalar_Neg
  174. | Scalar_Sqr
  175. | Scalar_Sqrt
  176. | Scalar_Exp
  177. | Scalar_Log
  178. | Scalar_Log2
  179. | Scalar_Log10
  180. | Scalar_Signum
  181. | Scalar_Floor
  182. | Scalar_Ceil
  183. | Scalar_Round
  184. | Scalar_Sin
  185. | Scalar_Cos
  186. | Scalar_Tan
  187. | Scalar_Sinh
  188. | Scalar_Cosh
  189. | Scalar_Tanh
  190. | Scalar_Asin
  191. | Scalar_Acos
  192. | Scalar_Atan
  193. | Scalar_Asinh
  194. | Scalar_Acosh
  195. | Scalar_Atanh
  196. | Scalar_Relu
  197. | Scalar_Dawsn
  198. | Scalar_Sigmoid
  199. | Fused_Adagrad of float * float
    (*

    TODO

    *)
include Owl_types_computation_device.Sig
Type definition
type device

TODO

type value

TODO

Core functions
val make_device : unit -> device

TODO

val arr_to_value : A.arr -> value

TODO

val value_to_arr : value -> A.arr

TODO

val elt_to_value : A.elt -> value

TODO

val value_to_elt : value -> A.elt

TODO

val value_to_float : value -> float

TODO

val is_arr : value -> bool

TODO

val is_elt : value -> bool

TODO

\ No newline at end of file diff --git a/owl-base/Owl_computation_engine_sig/module-type-Make_Graph_Sig/Optimiser/Operator/Linalg/index.html b/owl-base/Owl_computation_engine_sig/module-type-Make_Graph_Sig/Optimiser/Operator/Linalg/index.html deleted file mode 100644 index 0a7bea658..000000000 --- a/owl-base/Owl_computation_engine_sig/module-type-Make_Graph_Sig/Optimiser/Operator/Linalg/index.html +++ /dev/null @@ -1,33 +0,0 @@ - -Linalg (owl-base.Owl_computation_engine_sig.Make_Graph_Sig.Optimiser.Operator.Linalg)

Module Operator.Linalg

\ No newline at end of file diff --git a/owl-base/Owl_computation_engine_sig/module-type-Make_Graph_Sig/Optimiser/Operator/Mat/index.html b/owl-base/Owl_computation_engine_sig/module-type-Make_Graph_Sig/Optimiser/Operator/Mat/index.html deleted file mode 100644 index 14a11091c..000000000 --- a/owl-base/Owl_computation_engine_sig/module-type-Make_Graph_Sig/Optimiser/Operator/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl-base.Owl_computation_engine_sig.Make_Graph_Sig.Optimiser.Operator.Mat)

Module Operator.Mat

val eye : int -> Symbol.Shape.Type.arr

TODO

TODO

TODO

TODO

\ No newline at end of file diff --git a/owl-base/Owl_computation_engine_sig/module-type-Make_Graph_Sig/Optimiser/Operator/Scalar/index.html b/owl-base/Owl_computation_engine_sig/module-type-Make_Graph_Sig/Optimiser/Operator/Scalar/index.html deleted file mode 100644 index ed2140ac5..000000000 --- a/owl-base/Owl_computation_engine_sig/module-type-Make_Graph_Sig/Optimiser/Operator/Scalar/index.html +++ /dev/null @@ -1,20 +0,0 @@ - -Scalar (owl-base.Owl_computation_engine_sig.Make_Graph_Sig.Optimiser.Operator.Scalar)

Module Operator.Scalar

\ No newline at end of file diff --git a/owl-base/Owl_computation_engine_sig/module-type-Make_Graph_Sig/Optimiser/Operator/Symbol/Shape/Type/Device/A/Linalg/index.html b/owl-base/Owl_computation_engine_sig/module-type-Make_Graph_Sig/Optimiser/Operator/Symbol/Shape/Type/Device/A/Linalg/index.html deleted file mode 100644 index 28f4c1ba2..000000000 --- a/owl-base/Owl_computation_engine_sig/module-type-Make_Graph_Sig/Optimiser/Operator/Symbol/Shape/Type/Device/A/Linalg/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -Linalg (owl-base.Owl_computation_engine_sig.Make_Graph_Sig.Optimiser.Operator.Symbol.Shape.Type.Device.A.Linalg)

Module A.Linalg

val inv : arr -> arr
val logdet : arr -> elt
val chol : ?upper:bool -> arr -> arr
val svd : ?thin:bool -> arr -> arr * arr * arr
val qr : arr -> arr * arr
val lq : arr -> arr * arr
val sylvester : arr -> arr -> arr -> arr
val lyapunov : arr -> arr -> arr
val discrete_lyapunov : - ?solver:[ `default | `bilinear | `direct ] -> - arr -> - arr -> - arr
val linsolve : ?trans:bool -> ?typ:[ `n | `u | `l ] -> arr -> arr -> arr
val care : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
val dare : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
\ No newline at end of file diff --git a/owl-base/Owl_computation_engine_sig/module-type-Make_Graph_Sig/Optimiser/Operator/Symbol/Shape/Type/Device/A/Mat/index.html b/owl-base/Owl_computation_engine_sig/module-type-Make_Graph_Sig/Optimiser/Operator/Symbol/Shape/Type/Device/A/Mat/index.html deleted file mode 100644 index bf58e592c..000000000 --- a/owl-base/Owl_computation_engine_sig/module-type-Make_Graph_Sig/Optimiser/Operator/Symbol/Shape/Type/Device/A/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl-base.Owl_computation_engine_sig.Make_Graph_Sig.Optimiser.Operator.Symbol.Shape.Type.Device.A.Mat)

Module A.Mat

val diagm : ?k:int -> arr -> arr
val triu : ?k:int -> arr -> arr
val tril : ?k:int -> arr -> arr
val eye : int -> arr
\ No newline at end of file diff --git a/owl-base/Owl_computation_engine_sig/module-type-Make_Graph_Sig/Optimiser/Operator/Symbol/Shape/Type/Device/A/Scalar/index.html b/owl-base/Owl_computation_engine_sig/module-type-Make_Graph_Sig/Optimiser/Operator/Symbol/Shape/Type/Device/A/Scalar/index.html deleted file mode 100644 index 97d0c4fda..000000000 --- a/owl-base/Owl_computation_engine_sig/module-type-Make_Graph_Sig/Optimiser/Operator/Symbol/Shape/Type/Device/A/Scalar/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Scalar (owl-base.Owl_computation_engine_sig.Make_Graph_Sig.Optimiser.Operator.Symbol.Shape.Type.Device.A.Scalar)

Module A.Scalar

val add : elt -> elt -> elt
val sub : elt -> elt -> elt
val mul : elt -> elt -> elt
val div : elt -> elt -> elt
val pow : elt -> elt -> elt
val atan2 : elt -> elt -> elt
val abs : elt -> elt
val neg : elt -> elt
val sqr : elt -> elt
val sqrt : elt -> elt
val exp : elt -> elt
val log : elt -> elt
val log2 : elt -> elt
val log10 : elt -> elt
val signum : elt -> elt
val floor : elt -> elt
val ceil : elt -> elt
val round : elt -> elt
val sin : elt -> elt
val cos : elt -> elt
val tan : elt -> elt
val sinh : elt -> elt
val cosh : elt -> elt
val tanh : elt -> elt
val asin : elt -> elt
val acos : elt -> elt
val atan : elt -> elt
val asinh : elt -> elt
val acosh : elt -> elt
val atanh : elt -> elt
val relu : elt -> elt
val dawsn : elt -> elt
val sigmoid : elt -> elt
\ No newline at end of file diff --git a/owl-base/Owl_computation_engine_sig/module-type-Make_Graph_Sig/Optimiser/Operator/Symbol/Shape/Type/Device/A/index.html b/owl-base/Owl_computation_engine_sig/module-type-Make_Graph_Sig/Optimiser/Operator/Symbol/Shape/Type/Device/A/index.html deleted file mode 100644 index e43300f97..000000000 --- a/owl-base/Owl_computation_engine_sig/module-type-Make_Graph_Sig/Optimiser/Operator/Symbol/Shape/Type/Device/A/index.html +++ /dev/null @@ -1,379 +0,0 @@ - -A (owl-base.Owl_computation_engine_sig.Make_Graph_Sig.Optimiser.Operator.Symbol.Shape.Type.Device.A)

Module Device.A

include Owl_types_ndarray_algodiff.Sig
include Owl_types_ndarray_eltcmp.Sig
include Owl_types_ndarray_basic.Sig
type arr
type elt
val empty : int array -> arr
val zeros : int array -> arr
val ones : int array -> arr
val create : int array -> elt -> arr
val sequential : ?a:elt -> ?step:elt -> int array -> arr
val uniform : ?a:elt -> ?b:elt -> int array -> arr
val gaussian : ?mu:elt -> ?sigma:elt -> int array -> arr
val bernoulli : ?p:elt -> int array -> arr
val init : int array -> (int -> elt) -> arr
val init_nd : int array -> (int array -> elt) -> arr
val shape : arr -> int array
val numel : arr -> int
val get : arr -> int array -> elt
val set : arr -> int array -> elt -> unit
val get_slice : int list list -> arr -> arr
val set_slice : int list list -> arr -> arr -> unit
val get_fancy : Owl_types_common.index list -> arr -> arr
val set_fancy : Owl_types_common.index list -> arr -> arr -> unit
val copy : arr -> arr
val copy_ : out:arr -> arr -> unit
val reset : arr -> unit
val reshape : arr -> int array -> arr
val reverse : arr -> arr
val tile : arr -> int array -> arr
val repeat : arr -> int array -> arr
val concatenate : ?axis:int -> arr array -> arr
val stack : ?axis:int -> arr array -> arr
val split : ?axis:int -> int array -> arr -> arr array
val expand : ?hi:bool -> arr -> int -> arr
val squeeze : ?axis:int array -> arr -> arr
val draw : ?axis:int -> arr -> int -> arr * int array
val map : (elt -> elt) -> arr -> arr
val fold : ?axis:int -> (elt -> elt -> elt) -> elt -> arr -> arr
val scan : ?axis:int -> (elt -> elt -> elt) -> arr -> arr
val one_hot : int -> arr -> arr
val pad : ?v:elt -> int list list -> arr -> arr
val print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(elt -> string) -> - arr -> - unit
val abs : arr -> arr
val neg : arr -> arr
val floor : arr -> arr
val ceil : arr -> arr
val round : arr -> arr
val sqr : arr -> arr
val sqrt : arr -> arr
val log : arr -> arr
val log2 : arr -> arr
val log10 : arr -> arr
val exp : arr -> arr
val sin : arr -> arr
val cos : arr -> arr
val tan : arr -> arr
val sinh : arr -> arr
val cosh : arr -> arr
val tanh : arr -> arr
val asin : arr -> arr
val acos : arr -> arr
val atan : arr -> arr
val asinh : arr -> arr
val acosh : arr -> arr
val atanh : arr -> arr
val sum_reduce : ?axis:int array -> arr -> arr
val signum : arr -> arr
val sigmoid : arr -> arr
val relu : arr -> arr
val dawsn : arr -> arr
val min' : arr -> elt
val max' : arr -> elt
val sum' : arr -> elt
val log_sum_exp' : arr -> elt
val log_sum_exp : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm' : arr -> elt
val l2norm' : arr -> elt
val l2norm_sqr' : arr -> elt
val clip_by_value : ?amin:elt -> ?amax:elt -> arr -> arr
val clip_by_l2norm : elt -> arr -> arr
val pow : arr -> arr -> arr
val scalar_pow : elt -> arr -> arr
val pow_scalar : arr -> elt -> arr
val atan2 : arr -> arr -> arr
val scalar_atan2 : elt -> arr -> arr
val atan2_scalar : arr -> elt -> arr
val add : arr -> arr -> arr
val sub : arr -> arr -> arr
val mul : arr -> arr -> arr
val div : arr -> arr -> arr
val add_scalar : arr -> elt -> arr
val sub_scalar : arr -> elt -> arr
val mul_scalar : arr -> elt -> arr
val div_scalar : arr -> elt -> arr
val scalar_add : elt -> arr -> arr
val scalar_sub : elt -> arr -> arr
val scalar_mul : elt -> arr -> arr
val scalar_div : elt -> arr -> arr
val fma : arr -> arr -> arr -> arr
val conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val dilated_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val transpose_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val max_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val upsampling2d : arr -> int array -> arr
val conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val dilated_conv1d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv1d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val transpose_conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val max_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val upsampling2d_backward : arr -> int array -> arr -> arr
val row_num : arr -> int
val col_num : arr -> int
val row : arr -> int -> arr
val rows : arr -> int array -> arr
val copy_row_to : arr -> arr -> int -> unit
val copy_col_to : arr -> arr -> int -> unit
val dot : arr -> arr -> arr
val diag : ?k:int -> arr -> arr
val trace : arr -> elt
val transpose : ?axis:int array -> arr -> arr
val to_rows : arr -> arr array
val of_rows : arr array -> arr
val to_cols : arr -> arr array
val of_cols : arr array -> arr
val of_array : elt array -> int array -> arr
val of_arrays : elt array array -> arr
val float_to_elt : float -> elt
val elt_to_float : elt -> float
val elt_equal : arr -> arr -> arr
val elt_not_equal : arr -> arr -> arr
val elt_less : arr -> arr -> arr
val elt_greater : arr -> arr -> arr
val elt_less_equal : arr -> arr -> arr
val elt_greater_equal : arr -> arr -> arr
val elt_equal_scalar : arr -> elt -> arr
val elt_not_equal_scalar : arr -> elt -> arr
val elt_less_scalar : arr -> elt -> arr
val elt_greater_scalar : arr -> elt -> arr
val elt_less_equal_scalar : arr -> elt -> arr
val elt_greater_equal_scalar : arr -> elt -> arr
module Scalar : sig ... end
module Mat : sig ... end
module Linalg : sig ... end
val create_ : out:arr -> elt -> unit
val uniform_ : ?a:elt -> ?b:elt -> out:arr -> unit
val gaussian_ : ?mu:elt -> ?sigma:elt -> out:arr -> unit
val sequential_ : ?a:elt -> ?step:elt -> out:arr -> unit
val bernoulli_ : ?p:elt -> out:arr -> unit
val zeros_ : out:arr -> unit
val ones_ : out:arr -> unit
val one_hot_ : out:arr -> int -> arr -> unit
val get_slice_ : out:arr -> int list list -> arr -> unit
val set_slice_ : out:arr -> int list list -> arr -> arr -> unit
val sub_left : arr -> int -> int -> arr
val reshape_ : out:arr -> arr -> unit
val reverse_ : out:arr -> arr -> unit
val transpose_ : out:arr -> ?axis:int array -> arr -> unit
val repeat_ : out:arr -> arr -> int array -> unit
val tile_ : out:arr -> arr -> int array -> unit
val pad_ : out:arr -> ?v:elt -> int list list -> arr -> unit
val hypot : arr -> arr -> arr
val fmod : arr -> arr -> arr
val min2 : arr -> arr -> arr
val max2 : arr -> arr -> arr
val add_ : ?out:arr -> arr -> arr -> unit
val sub_ : ?out:arr -> arr -> arr -> unit
val mul_ : ?out:arr -> arr -> arr -> unit
val div_ : ?out:arr -> arr -> arr -> unit
val pow_ : ?out:arr -> arr -> arr -> unit
val atan2_ : ?out:arr -> arr -> arr -> unit
val hypot_ : ?out:arr -> arr -> arr -> unit
val fmod_ : ?out:arr -> arr -> arr -> unit
val min2_ : ?out:arr -> arr -> arr -> unit
val max2_ : ?out:arr -> arr -> arr -> unit
val add_scalar_ : ?out:arr -> arr -> elt -> unit
val sub_scalar_ : ?out:arr -> arr -> elt -> unit
val mul_scalar_ : ?out:arr -> arr -> elt -> unit
val div_scalar_ : ?out:arr -> arr -> elt -> unit
val pow_scalar_ : ?out:arr -> arr -> elt -> unit
val atan2_scalar_ : ?out:arr -> arr -> elt -> unit
val fmod_scalar_ : ?out:arr -> arr -> elt -> unit
val scalar_add_ : ?out:arr -> elt -> arr -> unit
val scalar_sub_ : ?out:arr -> elt -> arr -> unit
val scalar_mul_ : ?out:arr -> elt -> arr -> unit
val scalar_div_ : ?out:arr -> elt -> arr -> unit
val scalar_pow_ : ?out:arr -> elt -> arr -> unit
val scalar_atan2_ : ?out:arr -> elt -> arr -> unit
val scalar_fmod_ : ?out:arr -> elt -> arr -> unit
val fma_ : ?out:arr -> arr -> arr -> arr -> unit
val clip_by_value_ : ?out:arr -> ?amin:elt -> ?amax:elt -> arr -> unit
val clip_by_l2norm_ : ?out:arr -> elt -> arr -> unit
val dot_ : - ?transa:bool -> - ?transb:bool -> - ?alpha:elt -> - ?beta:elt -> - c:arr -> - arr -> - arr -> - unit
val abs_ : ?out:arr -> arr -> unit
val neg_ : ?out:arr -> arr -> unit
val conj_ : ?out:arr -> arr -> unit
val reci_ : ?out:arr -> arr -> unit
val signum_ : ?out:arr -> arr -> unit
val sqr_ : ?out:arr -> arr -> unit
val sqrt_ : ?out:arr -> arr -> unit
val cbrt_ : ?out:arr -> arr -> unit
val exp_ : ?out:arr -> arr -> unit
val exp2_ : ?out:arr -> arr -> unit
val exp10_ : ?out:arr -> arr -> unit
val expm1_ : ?out:arr -> arr -> unit
val log_ : ?out:arr -> arr -> unit
val log2_ : ?out:arr -> arr -> unit
val log10_ : ?out:arr -> arr -> unit
val log1p_ : ?out:arr -> arr -> unit
val sin_ : ?out:arr -> arr -> unit
val cos_ : ?out:arr -> arr -> unit
val tan_ : ?out:arr -> arr -> unit
val asin_ : ?out:arr -> arr -> unit
val acos_ : ?out:arr -> arr -> unit
val atan_ : ?out:arr -> arr -> unit
val sinh_ : ?out:arr -> arr -> unit
val cosh_ : ?out:arr -> arr -> unit
val tanh_ : ?out:arr -> arr -> unit
val asinh_ : ?out:arr -> arr -> unit
val acosh_ : ?out:arr -> arr -> unit
val atanh_ : ?out:arr -> arr -> unit
val floor_ : ?out:arr -> arr -> unit
val ceil_ : ?out:arr -> arr -> unit
val round_ : ?out:arr -> arr -> unit
val trunc_ : ?out:arr -> arr -> unit
val fix_ : ?out:arr -> arr -> unit
val erf_ : ?out:arr -> arr -> unit
val erfc_ : ?out:arr -> arr -> unit
val relu_ : ?out:arr -> arr -> unit
val softplus_ : ?out:arr -> arr -> unit
val softsign_ : ?out:arr -> arr -> unit
val softmax_ : ?out:arr -> ?axis:int -> arr -> unit
val sigmoid_ : ?out:arr -> arr -> unit
val sum_ : out:arr -> axis:int -> arr -> unit
val min_ : out:arr -> axis:int -> arr -> unit
val max_ : out:arr -> axis:int -> arr -> unit
val sum : ?axis:int -> ?keep_dims:bool -> arr -> arr
val prod : ?axis:int -> ?keep_dims:bool -> arr -> arr
val min : ?axis:int -> ?keep_dims:bool -> arr -> arr
val max : ?axis:int -> ?keep_dims:bool -> arr -> arr
val mean : ?axis:int -> ?keep_dims:bool -> arr -> arr
val var : ?axis:int -> ?keep_dims:bool -> arr -> arr
val std : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l2norm : ?axis:int -> ?keep_dims:bool -> arr -> arr
val cumsum_ : ?out:arr -> ?axis:int -> arr -> unit
val cumprod_ : ?out:arr -> ?axis:int -> arr -> unit
val cummin_ : ?out:arr -> ?axis:int -> arr -> unit
val cummax_ : ?out:arr -> ?axis:int -> arr -> unit
val dropout_ : ?out:arr -> ?rate:float -> arr -> unit
val prod' : arr -> elt
val mean' : arr -> elt
val var' : arr -> elt
val std' : arr -> elt
val elt_equal_ : ?out:arr -> arr -> arr -> unit
val elt_not_equal_ : ?out:arr -> arr -> arr -> unit
val elt_less_ : ?out:arr -> arr -> arr -> unit
val elt_greater_ : ?out:arr -> arr -> arr -> unit
val elt_less_equal_ : ?out:arr -> arr -> arr -> unit
val elt_greater_equal_ : ?out:arr -> arr -> arr -> unit
val elt_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_not_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_less_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_greater_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_less_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_greater_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val conv1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val conv2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val conv3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val dilated_conv1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val dilated_conv2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val dilated_conv3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val transpose_conv1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val transpose_conv2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val transpose_conv3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val max_pool1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val max_pool2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val max_pool3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val upsampling2d_ : out:arr -> arr -> int array -> unit
val conv1d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv1d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv2d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv2d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv3d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv3d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val dilated_conv1d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv1d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv2d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv2d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv3d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv3d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val transpose_conv1d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv1d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv2d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv2d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv3d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv3d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val max_pool1d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val max_pool2d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val max_pool3d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool1d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool2d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool3d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val upsampling2d_backward_ : out:arr -> arr -> int array -> arr -> unit
val fused_adagrad_ : ?out:arr -> rate:float -> eps:float -> arr -> unit
\ No newline at end of file diff --git a/owl-base/Owl_computation_engine_sig/module-type-Make_Graph_Sig/Optimiser/Operator/Symbol/Shape/Type/Device/index.html b/owl-base/Owl_computation_engine_sig/module-type-Make_Graph_Sig/Optimiser/Operator/Symbol/Shape/Type/Device/index.html deleted file mode 100644 index f0770fde8..000000000 --- a/owl-base/Owl_computation_engine_sig/module-type-Make_Graph_Sig/Optimiser/Operator/Symbol/Shape/Type/Device/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Device (owl-base.Owl_computation_engine_sig.Make_Graph_Sig.Optimiser.Operator.Symbol.Shape.Type.Device)

Module Type.Device

Type definition
type device

TODO

type value

TODO

Core functions
val make_device : unit -> device

TODO

val arr_to_value : A.arr -> value

TODO

val value_to_arr : value -> A.arr

TODO

val elt_to_value : A.elt -> value

TODO

val value_to_elt : value -> A.elt

TODO

val value_to_float : value -> float

TODO

val is_arr : value -> bool

TODO

val is_elt : value -> bool

TODO

\ No newline at end of file diff --git a/owl-base/Owl_computation_engine_sig/module-type-Make_Graph_Sig/Optimiser/Operator/Symbol/Shape/Type/index.html b/owl-base/Owl_computation_engine_sig/module-type-Make_Graph_Sig/Optimiser/Operator/Symbol/Shape/Type/index.html deleted file mode 100644 index f53acc297..000000000 --- a/owl-base/Owl_computation_engine_sig/module-type-Make_Graph_Sig/Optimiser/Operator/Symbol/Shape/Type/index.html +++ /dev/null @@ -1,5 +0,0 @@ - -Type (owl-base.Owl_computation_engine_sig.Make_Graph_Sig.Optimiser.Operator.Symbol.Shape.Type)

Module Shape.Type

Type definition
type state =
  1. | Valid
  2. | Invalid
    (*

    TODO

    *)

TODO

and block = {
  1. size : int;
  2. block_id : int;
  3. mutable active : t option;
  4. mutable memory : Device.value;
  5. mutable nodes : t list;
}

block type keeps a reference to a block of memory and to the nodes sharing that block.

and attr = {
  1. mutable op : op;
  2. mutable freeze : bool;
  3. mutable reuse : bool;
  4. mutable state : state;
  5. mutable shape : int array option array;
  6. mutable value : Device.value array;
  7. mutable block : block array option;
}

TODO

and arr =
  1. | Arr of t
and elt =
  1. | Elt of t
and op =
  1. | Noop
  2. | Var
  3. | Const
  4. | Empty of int array
  5. | Zeros of int array
  6. | Ones of int array
  7. | Create of int array
  8. | Sequential of int array
  9. | Uniform of int array
  10. | Gaussian of int array
  11. | Bernoulli of int array
  12. | Init of int array * int -> elt
  13. | Get of int array
  14. | Set of int array
  15. | GetSlice of int list list
  16. | SetSlice of int list list
  17. | GetFancy of Owl_types_common.index list
  18. | SetFancy of Owl_types_common.index list
  19. | Copy
  20. | Reset
  21. | Reshape of int array
  22. | Reverse
  23. | Tile of int array
  24. | Repeat of int array
  25. | Pad of elt * int list list
  26. | Concatenate of int
  27. | Stack of int
  28. | Split of int * int array
  29. | Draw of int * int
  30. | Map of elt -> elt
  31. | Fold of int * elt -> elt -> elt
  32. | Scan of int * elt -> elt -> elt
  33. | OneHot of int
  34. | OfArray of int array
  35. | Delay of Device.A.arr -> Device.A.arr
  36. | DelayArray of int array * Device.A.arr array -> Device.A.arr
  37. | LazyPrint of int option - * int option - * bool option - * (Device.A.elt -> string) option
  38. | Abs
  39. | Neg
  40. | Floor
  41. | Ceil
  42. | Round
  43. | Sqr
  44. | Sqrt
  45. | Log
  46. | Log2
  47. | Log10
  48. | Exp
  49. | Sin
  50. | Cos
  51. | Tan
  52. | Sinh
  53. | Cosh
  54. | Tanh
  55. | Asin
  56. | Acos
  57. | Atan
  58. | Asinh
  59. | Acosh
  60. | Atanh
  61. | Min of bool * int
  62. | Max of bool * int
  63. | Sum of bool * int
  64. | SumReduce of int array
  65. | Signum
  66. | Sigmoid
  67. | Relu
  68. | Dawsn
  69. | Min'
  70. | Max'
  71. | Sum'
  72. | LogSumExp'
  73. | LogSumExp of bool * int
  74. | L1norm'
  75. | L2norm'
  76. | L2NormSqr'
  77. | ClipByValue
  78. | ClipByL2norm
  79. | Pow
  80. | ScalarPow
  81. | PowScalar
  82. | Atan2
  83. | ScalarAtan2
  84. | Atan2Scalar
  85. | Hypot
  86. | Min2
  87. | Max2
  88. | Add
  89. | Sub
  90. | Mul
  91. | Div
  92. | AddScalar
  93. | SubScalar
  94. | MulScalar
  95. | DivScalar
  96. | ScalarAdd
  97. | ScalarSub
  98. | ScalarMul
  99. | ScalarDiv
  100. | FMA
  101. | EltEqual
  102. | EltNotEqual
  103. | EltLess
  104. | EltGreater
  105. | EltLessEqual
  106. | EltGreaterEqual
  107. | EltEqualScalar
  108. | EltNotEqualScalar
  109. | EltLessScalar
  110. | EltGreaterScalar
  111. | EltLessEqualScalar
  112. | EltGreaterEqualScalar
  113. | Conv1d of Owl_types_common.padding * int array
  114. | Conv2d of Owl_types_common.padding * int array
  115. | Conv3d of Owl_types_common.padding * int array
  116. | TransposeConv1d of Owl_types_common.padding * int array
  117. | TransposeConv2d of Owl_types_common.padding * int array
  118. | TransposeConv3d of Owl_types_common.padding * int array
  119. | DilatedConv1d of Owl_types_common.padding * int array * int array
  120. | DilatedConv2d of Owl_types_common.padding * int array * int array
  121. | DilatedConv3d of Owl_types_common.padding * int array * int array
  122. | MaxPool1d of Owl_types_common.padding * int array * int array
  123. | MaxPool2d of Owl_types_common.padding * int array * int array
  124. | MaxPool3d of Owl_types_common.padding * int array * int array
  125. | AvgPool1d of Owl_types_common.padding * int array * int array
  126. | AvgPool2d of Owl_types_common.padding * int array * int array
  127. | AvgPool3d of Owl_types_common.padding * int array * int array
  128. | UpSampling2d of int array
  129. | Conv1dBackwardInput of int array
  130. | Conv1dBackwardKernel of int array
  131. | Conv2dBackwardInput of int array
  132. | Conv2dBackwardKernel of int array
  133. | Conv3dBackwardInput of int array
  134. | Conv3dBackwardKernel of int array
  135. | TransposeConv1dBackwardInput of int array
  136. | TransposeConv1dBackwardKernel of int array
  137. | TransposeConv2dBackwardInput of int array
  138. | TransposeConv2dBackwardKernel of int array
  139. | TransposeConv3dBackwardInput of int array
  140. | TransposeConv3dBackwardKernel of int array
  141. | DilatedConv1dBackwardInput of int array * int array
  142. | DilatedConv1dBackwardKernel of int array * int array
  143. | DilatedConv2dBackwardInput of int array * int array
  144. | DilatedConv2dBackwardKernel of int array * int array
  145. | DilatedConv3dBackwardInput of int array * int array
  146. | DilatedConv3dBackwardKernel of int array * int array
  147. | MaxPool1dBackward of Owl_types_common.padding * int array * int array
  148. | MaxPool2dBackward of Owl_types_common.padding * int array * int array
  149. | MaxPool3dBackward of Owl_types_common.padding * int array * int array
  150. | AvgPool1dBackward of Owl_types_common.padding * int array * int array
  151. | AvgPool2dBackward of Owl_types_common.padding * int array * int array
  152. | AvgPool3dBackward of Owl_types_common.padding * int array * int array
  153. | UpSampling2dBackward of int array
  154. | RowNum
  155. | ColNum
  156. | Row
  157. | Rows of int array
  158. | CopyRowTo
  159. | CopyColTo
  160. | Dot of bool * bool * elt * elt
  161. | Inv
  162. | Trace
  163. | Transpose of int array
  164. | ToRows
  165. | OfRows
  166. | Scalar_Add
  167. | Scalar_Sub
  168. | Scalar_Mul
  169. | Scalar_Div
  170. | Scalar_Pow
  171. | Scalar_Atan2
  172. | Scalar_Abs
  173. | Scalar_Neg
  174. | Scalar_Sqr
  175. | Scalar_Sqrt
  176. | Scalar_Exp
  177. | Scalar_Log
  178. | Scalar_Log2
  179. | Scalar_Log10
  180. | Scalar_Signum
  181. | Scalar_Floor
  182. | Scalar_Ceil
  183. | Scalar_Round
  184. | Scalar_Sin
  185. | Scalar_Cos
  186. | Scalar_Tan
  187. | Scalar_Sinh
  188. | Scalar_Cosh
  189. | Scalar_Tanh
  190. | Scalar_Asin
  191. | Scalar_Acos
  192. | Scalar_Atan
  193. | Scalar_Asinh
  194. | Scalar_Acosh
  195. | Scalar_Atanh
  196. | Scalar_Relu
  197. | Scalar_Dawsn
  198. | Scalar_Sigmoid
  199. | Fused_Adagrad of float * float
    (*

    TODO

    *)
\ No newline at end of file diff --git a/owl-base/Owl_computation_engine_sig/module-type-Make_Graph_Sig/Optimiser/Operator/Symbol/Shape/index.html b/owl-base/Owl_computation_engine_sig/module-type-Make_Graph_Sig/Optimiser/Operator/Symbol/Shape/index.html deleted file mode 100644 index cbb1660af..000000000 --- a/owl-base/Owl_computation_engine_sig/module-type-Make_Graph_Sig/Optimiser/Operator/Symbol/Shape/index.html +++ /dev/null @@ -1,5 +0,0 @@ - -Shape (owl-base.Owl_computation_engine_sig.Make_Graph_Sig.Optimiser.Operator.Symbol.Shape)

Module Symbol.Shape

Core functions
val infer_shape : - Type.op -> - Type.attr Owl_graph.node array -> - int array option array

TODO

\ No newline at end of file diff --git a/owl-base/Owl_computation_engine_sig/module-type-Make_Graph_Sig/Optimiser/Operator/Symbol/index.html b/owl-base/Owl_computation_engine_sig/module-type-Make_Graph_Sig/Optimiser/Operator/Symbol/index.html deleted file mode 100644 index 5c73606f1..000000000 --- a/owl-base/Owl_computation_engine_sig/module-type-Make_Graph_Sig/Optimiser/Operator/Symbol/index.html +++ /dev/null @@ -1,28 +0,0 @@ - -Symbol (owl-base.Owl_computation_engine_sig.Make_Graph_Sig.Optimiser.Operator.Symbol)

Module Operator.Symbol

Core functions
val op_to_str : Shape.Type.op -> string

TODO

val is_random_variable : Shape.Type.op -> bool

TODO

val refnum : 'a Owl_graph.node -> int

TODO

val node_shape : Shape.Type.attr Owl_graph.node -> int array

TODO

val node_numel : Shape.Type.attr Owl_graph.node -> int

TODO

val is_shape_unknown : Shape.Type.attr Owl_graph.node -> bool

TODO

val infer_shape_graph : Shape.Type.attr Owl_graph.node array -> unit

TODO

val shape_to_str : int array option array -> string

TODO

val node_to_str : Shape.Type.attr Owl_graph.node -> string

TODO

val node_to_arr : Shape.Type.t -> Shape.Type.arr

TODO

val arr_to_node : Shape.Type.arr -> Shape.Type.t

TODO

val node_to_elt : Shape.Type.t -> Shape.Type.elt

TODO

val elt_to_node : Shape.Type.elt -> Shape.Type.t

TODO

val make_node : - ?name:string -> - ?value:Shape.Type.Device.value array -> - ?shape:int array option array -> - ?freeze:bool -> - ?reuse:bool -> - ?state:Shape.Type.state -> - Shape.Type.op -> - Shape.Type.attr Owl_graph.node

TODO

val make_then_connect : - ?shape:int array option array -> - Shape.Type.op -> - Shape.Type.attr Owl_graph.node array -> - Shape.Type.attr Owl_graph.node

TODO

val var_arr : ?shape:int array -> string -> Shape.Type.arr

TODO

val var_elt : string -> Shape.Type.elt

TODO

val const_arr : string -> Shape.Type.Device.A.arr -> Shape.Type.arr

TODO

val const_elt : string -> Shape.Type.Device.A.elt -> Shape.Type.elt

TODO

val new_block_id : unit -> int

new_block_id () returns an unused block id.

val make_empty_block : ?block_id:int -> int -> Shape.Type.block

make_empty_block s returns an empty block of memory of size s.

val make_value_block : - Shape.Type.Device.value -> - Shape.Type.attr Owl_graph.node -> - unit

make_value_block value node creates a block of memory initialised with value and links the new block to node.

get_block node returns the memory block allocated to node. If no block is allocated, throws an exception.

val add_node_to_block : - Shape.Type.attr Owl_graph.node -> - Shape.Type.block -> - unit

Link a node to a reusable block and initialises its memory on the memory of the block.

val get_active_node : Shape.Type.block -> Shape.Type.attr Owl_graph.node option

Return the node that is currently using the memory of the block.

val set_active_node : - Shape.Type.block -> - Shape.Type.attr Owl_graph.node -> - unit

Update the node that is currently using the block of memory.

val get_block_id : Shape.Type.attr Owl_graph.node -> int

get_block_id node returns the id of the block assigned to node. If node has not been assigned yet, returns -1.

val set_value : - Shape.Type.attr Owl_graph.node -> - Shape.Type.Device.value array -> - unit

TODO

val set_operator : Shape.Type.attr Owl_graph.node -> Shape.Type.op -> unit

TODO

TODO

val set_reuse : Shape.Type.attr Owl_graph.node -> bool -> unit

TODO

val get_reuse : Shape.Type.attr Owl_graph.node -> bool

TODO

val is_shared : Shape.Type.attr Owl_graph.node -> bool

TODO

val get_shared_nodes : - Shape.Type.attr Owl_graph.node -> - Shape.Type.attr Owl_graph.node array

get_shared_nodes node returns the nodes sharing the same block of memory as node.

val is_var : Shape.Type.attr Owl_graph.node -> bool

TODO

val is_const : Shape.Type.attr Owl_graph.node -> bool

TODO

val is_node_arr : Shape.Type.attr Owl_graph.node -> bool

TODO

val is_node_elt : Shape.Type.attr Owl_graph.node -> bool

TODO

val is_assigned : Shape.Type.attr Owl_graph.node -> bool

is_assigned node checks if a block of memory has been assigned to node.

val check_assigned : Shape.Type.attr Owl_graph.node -> unit

check_assigned node throws an exception if node has not been assigned to a block.

val is_valid : Shape.Type.attr Owl_graph.node -> bool

TODO

val validate : Shape.Type.attr Owl_graph.node -> unit

TODO

val invalidate : Shape.Type.attr Owl_graph.node -> unit

TODO

val invalidate_graph : Shape.Type.attr Owl_graph.node -> unit

TODO

val is_freeze : Shape.Type.attr Owl_graph.node -> bool

TODO

val freeze : Shape.Type.attr Owl_graph.node -> unit

TODO

val freeze_descendants : Shape.Type.attr Owl_graph.node array -> unit

TODO

val freeze_ancestors : Shape.Type.attr Owl_graph.node array -> unit

TODO

TODO

TODO

val unsafe_assign_arr : Shape.Type.arr -> Shape.Type.Device.A.arr -> unit

TODO

val assign_arr : Shape.Type.arr -> Shape.Type.Device.A.arr -> unit

TODO

val assign_elt : Shape.Type.elt -> Shape.Type.Device.A.elt -> unit

TODO

val float_to_elt : float -> Shape.Type.elt

TODO

val elt_to_float : Shape.Type.elt -> float

TODO

\ No newline at end of file diff --git a/owl-base/Owl_computation_engine_sig/module-type-Make_Graph_Sig/Optimiser/Operator/index.html b/owl-base/Owl_computation_engine_sig/module-type-Make_Graph_Sig/Optimiser/Operator/index.html deleted file mode 100644 index 22502f059..000000000 --- a/owl-base/Owl_computation_engine_sig/module-type-Make_Graph_Sig/Optimiser/Operator/index.html +++ /dev/null @@ -1,420 +0,0 @@ - -Operator (owl-base.Owl_computation_engine_sig.Make_Graph_Sig.Optimiser.Operator)

Module Optimiser.Operator

Vectorised functions
val empty : int array -> Symbol.Shape.Type.arr

TODO

val zeros : int array -> Symbol.Shape.Type.arr

TODO

val ones : int array -> Symbol.Shape.Type.arr

TODO

val create : int array -> Symbol.Shape.Type.elt -> Symbol.Shape.Type.arr

TODO

val sequential : - ?a:Symbol.Shape.Type.elt -> - ?step:Symbol.Shape.Type.elt -> - int array -> - Symbol.Shape.Type.arr

TODO

val uniform : - ?a:Symbol.Shape.Type.elt -> - ?b:Symbol.Shape.Type.elt -> - int array -> - Symbol.Shape.Type.arr

TODO

val gaussian : - ?mu:Symbol.Shape.Type.elt -> - ?sigma:Symbol.Shape.Type.elt -> - int array -> - Symbol.Shape.Type.arr

TODO

val bernoulli : ?p:Symbol.Shape.Type.elt -> int array -> Symbol.Shape.Type.arr

TODO

val init : int array -> (int -> Symbol.Shape.Type.elt) -> Symbol.Shape.Type.arr

TODO

val init_nd : - int array -> - (int array -> Symbol.Shape.Type.elt) -> - Symbol.Shape.Type.arr

TODO

val shape : Symbol.Shape.Type.arr -> int array

TODO

val numel : Symbol.Shape.Type.arr -> int

TODO

TODO

val set : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.elt -> unit

TODO

val get_slice : int list list -> Symbol.Shape.Type.arr -> Symbol.Shape.Type.arr

TODO

val set_slice : - int list list -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - unit

TODO

TODO

val set_fancy : - Owl_types.index list -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - unit

TODO

val copy_ : out:'a -> 'b -> 'c

TODO

val reset : Symbol.Shape.Type.arr -> unit

TODO

val reshape : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.arr

TODO

val tile : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.arr

TODO

val repeat : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.arr

TODO

val pad : - ?v:Symbol.Shape.Type.elt -> - int list list -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val expand : ?hi:bool -> Symbol.Shape.Type.arr -> int -> Symbol.Shape.Type.arr

TODO

val squeeze : ?axis:int array -> Symbol.Shape.Type.arr -> Symbol.Shape.Type.arr

TODO

val concatenate : - ?axis:int -> - Symbol.Shape.Type.arr array -> - Symbol.Shape.Type.arr

TODO

val stack : ?axis:int -> Symbol.Shape.Type.arr array -> Symbol.Shape.Type.arr

TODO

TODO

val split : ?axis:int -> 'a -> 'b -> 'c

TODO

val draw : - ?axis:int -> - Symbol.Shape.Type.arr -> - int -> - Symbol.Shape.Type.arr * 'a array

TODO

TODO

delay f x returns f x. It allows to use a function that is not tracked by the computation graph and delay its evaluation. The output must have the same shape as the input.

delay_array out_shape f x works in the same way as delay but is applied on an array of ndarrays. Needs the shape of the output as an argument.

val lazy_print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(Symbol.Shape.Type.Device.A.elt -> string) -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

lazy_print x prints the output of x when it is evaluated. Is implemented as an identity node. For information about the optional parameters, refer to the print function of the Ndarray module.

val print : ?max_row:'a -> ?max_col:'b -> ?header:'c -> ?fmt:'d -> 'e -> unit

TODO

val min : - ?axis:int -> - ?keep_dims:bool -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val max : - ?axis:int -> - ?keep_dims:bool -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val sum : - ?axis:int -> - ?keep_dims:bool -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val sum_reduce : - ?axis:int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val log_sum_exp : - ?axis:int -> - ?keep_dims:bool -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

TODO

TODO

TODO

TODO

val elt_less_equal_scalar : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.elt -> - Symbol.Shape.Type.arr

TODO

val elt_greater_equal_scalar : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.elt -> - Symbol.Shape.Type.arr

TODO

val conv1d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr

TODO

val conv2d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr

TODO

val conv3d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr

TODO

val transpose_conv1d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr

TODO

val transpose_conv2d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr

TODO

val transpose_conv3d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr

TODO

val dilated_conv1d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val dilated_conv2d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val dilated_conv3d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val max_pool1d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val max_pool2d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val max_pool3d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val avg_pool1d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val avg_pool2d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val avg_pool3d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val upsampling2d : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.arr

TODO

val conv1d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val conv1d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val conv2d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val conv2d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val conv3d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val conv3d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val transpose_conv1d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val transpose_conv1d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val transpose_conv2d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val transpose_conv2d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val transpose_conv3d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val transpose_conv3d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val dilated_conv1d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val dilated_conv1d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val dilated_conv2d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val dilated_conv2d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val dilated_conv3d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val dilated_conv3d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val max_pool1d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val max_pool2d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val max_pool3d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val avg_pool1d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val avg_pool2d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val avg_pool3d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val upsampling2d_backward : - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val row_num : Symbol.Shape.Type.arr -> int

TODO

val col_num : Symbol.Shape.Type.arr -> int

TODO

val rows : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.arr

TODO

val copy_row_to : Symbol.Shape.Type.arr -> 'a -> 'b -> unit

TODO

val copy_col_to : Symbol.Shape.Type.arr -> 'a -> 'b -> unit

TODO

TODO

val transpose : - ?axis:int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val to_rows : Symbol.Shape.Type.arr -> 'a array

TODO

TODO

val to_cols : Symbol.Shape.Type.arr -> 'a array

TODO

TODO

val of_array : - Symbol.Shape.Type.elt array -> - int array -> - Symbol.Shape.Type.arr

TODO

val of_arrays : Symbol.Shape.Type.elt array array -> Symbol.Shape.Type.arr

TODO

val to_arrays : Symbol.Shape.Type.arr -> Symbol.Shape.Type.elt array array

TODO

Scalar functions
module Scalar : sig ... end
module Mat : sig ... end
module Linalg : sig ... end
\ No newline at end of file diff --git a/owl-base/Owl_computation_engine_sig/module-type-Make_Graph_Sig/Optimiser/index.html b/owl-base/Owl_computation_engine_sig/module-type-Make_Graph_Sig/Optimiser/index.html deleted file mode 100644 index 207be9118..000000000 --- a/owl-base/Owl_computation_engine_sig/module-type-Make_Graph_Sig/Optimiser/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Optimiser (owl-base.Owl_computation_engine_sig.Make_Graph_Sig.Optimiser)

Module Make_Graph_Sig.Optimiser

Core functions
val estimate_complexity : 'a Owl_graph.node array -> int * int

TODO

val optimise_nodes : - Operator.Symbol.Shape.Type.attr Owl_graph.node array -> - unit

TODO

\ No newline at end of file diff --git a/owl-base/Owl_computation_engine_sig/module-type-Make_Graph_Sig/index.html b/owl-base/Owl_computation_engine_sig/module-type-Make_Graph_Sig/index.html deleted file mode 100644 index f4c44b09b..000000000 --- a/owl-base/Owl_computation_engine_sig/module-type-Make_Graph_Sig/index.html +++ /dev/null @@ -1,33 +0,0 @@ - -Make_Graph_Sig (owl-base.Owl_computation_engine_sig.Make_Graph_Sig)

Module type Owl_computation_engine_sig.Make_Graph_Sig

include Owl_computation_graph_sig.Sig
Type definition
type graph

TODO

Core functions
val shape_or_value : Optimiser.Operator.Symbol.Shape.Type.t -> string

TODO

val graph_to_dot : graph -> string

TODO

val graph_to_trace : graph -> string

TODO

val save_graph : 'a -> string -> unit

TODO

val load_graph : string -> 'a * 'b

TODO

val invalidate_rvs : graph -> unit

TODO

val is_iopair_safe : 'a Owl_graph.node -> 'a Owl_graph.node -> bool

TODO

val update_iopair : graph -> unit

TODO

val remove_unused_iopair : - 'a Owl_graph.node array -> - 'b array -> - 'a Owl_graph.node array * 'b array

TODO

val optimise : graph -> unit

TODO

\ No newline at end of file diff --git a/owl-base/Owl_computation_graph/.dummy b/owl-base/Owl_computation_graph/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl-base/Owl_computation_graph/Make/argument-1-Optimiser/Operator/Linalg/index.html b/owl-base/Owl_computation_graph/Make/argument-1-Optimiser/Operator/Linalg/index.html deleted file mode 100644 index f8677c481..000000000 --- a/owl-base/Owl_computation_graph/Make/argument-1-Optimiser/Operator/Linalg/index.html +++ /dev/null @@ -1,33 +0,0 @@ - -Linalg (owl-base.Owl_computation_graph.Make.Optimiser.Operator.Linalg)

Module Operator.Linalg

\ No newline at end of file diff --git a/owl-base/Owl_computation_graph/Make/argument-1-Optimiser/Operator/Mat/index.html b/owl-base/Owl_computation_graph/Make/argument-1-Optimiser/Operator/Mat/index.html deleted file mode 100644 index 10ae156bb..000000000 --- a/owl-base/Owl_computation_graph/Make/argument-1-Optimiser/Operator/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl-base.Owl_computation_graph.Make.Optimiser.Operator.Mat)

Module Operator.Mat

val eye : int -> Symbol.Shape.Type.arr

TODO

TODO

TODO

TODO

\ No newline at end of file diff --git a/owl-base/Owl_computation_graph/Make/argument-1-Optimiser/Operator/Scalar/index.html b/owl-base/Owl_computation_graph/Make/argument-1-Optimiser/Operator/Scalar/index.html deleted file mode 100644 index b119f1a6b..000000000 --- a/owl-base/Owl_computation_graph/Make/argument-1-Optimiser/Operator/Scalar/index.html +++ /dev/null @@ -1,20 +0,0 @@ - -Scalar (owl-base.Owl_computation_graph.Make.Optimiser.Operator.Scalar)

Module Operator.Scalar

\ No newline at end of file diff --git a/owl-base/Owl_computation_graph/Make/argument-1-Optimiser/Operator/Symbol/Shape/Type/Device/A/Linalg/index.html b/owl-base/Owl_computation_graph/Make/argument-1-Optimiser/Operator/Symbol/Shape/Type/Device/A/Linalg/index.html deleted file mode 100644 index 52f74544c..000000000 --- a/owl-base/Owl_computation_graph/Make/argument-1-Optimiser/Operator/Symbol/Shape/Type/Device/A/Linalg/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -Linalg (owl-base.Owl_computation_graph.Make.Optimiser.Operator.Symbol.Shape.Type.Device.A.Linalg)

Module A.Linalg

val inv : arr -> arr
val logdet : arr -> elt
val chol : ?upper:bool -> arr -> arr
val svd : ?thin:bool -> arr -> arr * arr * arr
val qr : arr -> arr * arr
val lq : arr -> arr * arr
val sylvester : arr -> arr -> arr -> arr
val lyapunov : arr -> arr -> arr
val discrete_lyapunov : - ?solver:[ `default | `bilinear | `direct ] -> - arr -> - arr -> - arr
val linsolve : ?trans:bool -> ?typ:[ `n | `u | `l ] -> arr -> arr -> arr
val care : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
val dare : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
\ No newline at end of file diff --git a/owl-base/Owl_computation_graph/Make/argument-1-Optimiser/Operator/Symbol/Shape/Type/Device/A/Mat/index.html b/owl-base/Owl_computation_graph/Make/argument-1-Optimiser/Operator/Symbol/Shape/Type/Device/A/Mat/index.html deleted file mode 100644 index 214c33683..000000000 --- a/owl-base/Owl_computation_graph/Make/argument-1-Optimiser/Operator/Symbol/Shape/Type/Device/A/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl-base.Owl_computation_graph.Make.Optimiser.Operator.Symbol.Shape.Type.Device.A.Mat)

Module A.Mat

val diagm : ?k:int -> arr -> arr
val triu : ?k:int -> arr -> arr
val tril : ?k:int -> arr -> arr
val eye : int -> arr
\ No newline at end of file diff --git a/owl-base/Owl_computation_graph/Make/argument-1-Optimiser/Operator/Symbol/Shape/Type/Device/A/Scalar/index.html b/owl-base/Owl_computation_graph/Make/argument-1-Optimiser/Operator/Symbol/Shape/Type/Device/A/Scalar/index.html deleted file mode 100644 index 7028787e6..000000000 --- a/owl-base/Owl_computation_graph/Make/argument-1-Optimiser/Operator/Symbol/Shape/Type/Device/A/Scalar/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Scalar (owl-base.Owl_computation_graph.Make.Optimiser.Operator.Symbol.Shape.Type.Device.A.Scalar)

Module A.Scalar

val add : elt -> elt -> elt
val sub : elt -> elt -> elt
val mul : elt -> elt -> elt
val div : elt -> elt -> elt
val pow : elt -> elt -> elt
val atan2 : elt -> elt -> elt
val abs : elt -> elt
val neg : elt -> elt
val sqr : elt -> elt
val sqrt : elt -> elt
val exp : elt -> elt
val log : elt -> elt
val log2 : elt -> elt
val log10 : elt -> elt
val signum : elt -> elt
val floor : elt -> elt
val ceil : elt -> elt
val round : elt -> elt
val sin : elt -> elt
val cos : elt -> elt
val tan : elt -> elt
val sinh : elt -> elt
val cosh : elt -> elt
val tanh : elt -> elt
val asin : elt -> elt
val acos : elt -> elt
val atan : elt -> elt
val asinh : elt -> elt
val acosh : elt -> elt
val atanh : elt -> elt
val relu : elt -> elt
val dawsn : elt -> elt
val sigmoid : elt -> elt
\ No newline at end of file diff --git a/owl-base/Owl_computation_graph/Make/argument-1-Optimiser/Operator/Symbol/Shape/Type/Device/A/index.html b/owl-base/Owl_computation_graph/Make/argument-1-Optimiser/Operator/Symbol/Shape/Type/Device/A/index.html deleted file mode 100644 index e2441ec6d..000000000 --- a/owl-base/Owl_computation_graph/Make/argument-1-Optimiser/Operator/Symbol/Shape/Type/Device/A/index.html +++ /dev/null @@ -1,379 +0,0 @@ - -A (owl-base.Owl_computation_graph.Make.Optimiser.Operator.Symbol.Shape.Type.Device.A)

Module Device.A

include Owl_types_ndarray_algodiff.Sig
include Owl_types_ndarray_eltcmp.Sig
include Owl_types_ndarray_basic.Sig
type arr
type elt
val empty : int array -> arr
val zeros : int array -> arr
val ones : int array -> arr
val create : int array -> elt -> arr
val sequential : ?a:elt -> ?step:elt -> int array -> arr
val uniform : ?a:elt -> ?b:elt -> int array -> arr
val gaussian : ?mu:elt -> ?sigma:elt -> int array -> arr
val bernoulli : ?p:elt -> int array -> arr
val init : int array -> (int -> elt) -> arr
val init_nd : int array -> (int array -> elt) -> arr
val shape : arr -> int array
val numel : arr -> int
val get : arr -> int array -> elt
val set : arr -> int array -> elt -> unit
val get_slice : int list list -> arr -> arr
val set_slice : int list list -> arr -> arr -> unit
val get_fancy : Owl_types_common.index list -> arr -> arr
val set_fancy : Owl_types_common.index list -> arr -> arr -> unit
val copy : arr -> arr
val copy_ : out:arr -> arr -> unit
val reset : arr -> unit
val reshape : arr -> int array -> arr
val reverse : arr -> arr
val tile : arr -> int array -> arr
val repeat : arr -> int array -> arr
val concatenate : ?axis:int -> arr array -> arr
val stack : ?axis:int -> arr array -> arr
val split : ?axis:int -> int array -> arr -> arr array
val expand : ?hi:bool -> arr -> int -> arr
val squeeze : ?axis:int array -> arr -> arr
val draw : ?axis:int -> arr -> int -> arr * int array
val map : (elt -> elt) -> arr -> arr
val fold : ?axis:int -> (elt -> elt -> elt) -> elt -> arr -> arr
val scan : ?axis:int -> (elt -> elt -> elt) -> arr -> arr
val one_hot : int -> arr -> arr
val pad : ?v:elt -> int list list -> arr -> arr
val print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(elt -> string) -> - arr -> - unit
val abs : arr -> arr
val neg : arr -> arr
val floor : arr -> arr
val ceil : arr -> arr
val round : arr -> arr
val sqr : arr -> arr
val sqrt : arr -> arr
val log : arr -> arr
val log2 : arr -> arr
val log10 : arr -> arr
val exp : arr -> arr
val sin : arr -> arr
val cos : arr -> arr
val tan : arr -> arr
val sinh : arr -> arr
val cosh : arr -> arr
val tanh : arr -> arr
val asin : arr -> arr
val acos : arr -> arr
val atan : arr -> arr
val asinh : arr -> arr
val acosh : arr -> arr
val atanh : arr -> arr
val sum_reduce : ?axis:int array -> arr -> arr
val signum : arr -> arr
val sigmoid : arr -> arr
val relu : arr -> arr
val dawsn : arr -> arr
val min' : arr -> elt
val max' : arr -> elt
val sum' : arr -> elt
val log_sum_exp' : arr -> elt
val log_sum_exp : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm' : arr -> elt
val l2norm' : arr -> elt
val l2norm_sqr' : arr -> elt
val clip_by_value : ?amin:elt -> ?amax:elt -> arr -> arr
val clip_by_l2norm : elt -> arr -> arr
val pow : arr -> arr -> arr
val scalar_pow : elt -> arr -> arr
val pow_scalar : arr -> elt -> arr
val atan2 : arr -> arr -> arr
val scalar_atan2 : elt -> arr -> arr
val atan2_scalar : arr -> elt -> arr
val add : arr -> arr -> arr
val sub : arr -> arr -> arr
val mul : arr -> arr -> arr
val div : arr -> arr -> arr
val add_scalar : arr -> elt -> arr
val sub_scalar : arr -> elt -> arr
val mul_scalar : arr -> elt -> arr
val div_scalar : arr -> elt -> arr
val scalar_add : elt -> arr -> arr
val scalar_sub : elt -> arr -> arr
val scalar_mul : elt -> arr -> arr
val scalar_div : elt -> arr -> arr
val fma : arr -> arr -> arr -> arr
val conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val dilated_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val transpose_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val max_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val upsampling2d : arr -> int array -> arr
val conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val dilated_conv1d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv1d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val transpose_conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val max_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val upsampling2d_backward : arr -> int array -> arr -> arr
val row_num : arr -> int
val col_num : arr -> int
val row : arr -> int -> arr
val rows : arr -> int array -> arr
val copy_row_to : arr -> arr -> int -> unit
val copy_col_to : arr -> arr -> int -> unit
val dot : arr -> arr -> arr
val diag : ?k:int -> arr -> arr
val trace : arr -> elt
val transpose : ?axis:int array -> arr -> arr
val to_rows : arr -> arr array
val of_rows : arr array -> arr
val to_cols : arr -> arr array
val of_cols : arr array -> arr
val of_array : elt array -> int array -> arr
val of_arrays : elt array array -> arr
val float_to_elt : float -> elt
val elt_to_float : elt -> float
val elt_equal : arr -> arr -> arr
val elt_not_equal : arr -> arr -> arr
val elt_less : arr -> arr -> arr
val elt_greater : arr -> arr -> arr
val elt_less_equal : arr -> arr -> arr
val elt_greater_equal : arr -> arr -> arr
val elt_equal_scalar : arr -> elt -> arr
val elt_not_equal_scalar : arr -> elt -> arr
val elt_less_scalar : arr -> elt -> arr
val elt_greater_scalar : arr -> elt -> arr
val elt_less_equal_scalar : arr -> elt -> arr
val elt_greater_equal_scalar : arr -> elt -> arr
module Scalar : sig ... end
module Mat : sig ... end
module Linalg : sig ... end
val create_ : out:arr -> elt -> unit
val uniform_ : ?a:elt -> ?b:elt -> out:arr -> unit
val gaussian_ : ?mu:elt -> ?sigma:elt -> out:arr -> unit
val sequential_ : ?a:elt -> ?step:elt -> out:arr -> unit
val bernoulli_ : ?p:elt -> out:arr -> unit
val zeros_ : out:arr -> unit
val ones_ : out:arr -> unit
val one_hot_ : out:arr -> int -> arr -> unit
val get_slice_ : out:arr -> int list list -> arr -> unit
val set_slice_ : out:arr -> int list list -> arr -> arr -> unit
val sub_left : arr -> int -> int -> arr
val reshape_ : out:arr -> arr -> unit
val reverse_ : out:arr -> arr -> unit
val transpose_ : out:arr -> ?axis:int array -> arr -> unit
val repeat_ : out:arr -> arr -> int array -> unit
val tile_ : out:arr -> arr -> int array -> unit
val pad_ : out:arr -> ?v:elt -> int list list -> arr -> unit
val hypot : arr -> arr -> arr
val fmod : arr -> arr -> arr
val min2 : arr -> arr -> arr
val max2 : arr -> arr -> arr
val add_ : ?out:arr -> arr -> arr -> unit
val sub_ : ?out:arr -> arr -> arr -> unit
val mul_ : ?out:arr -> arr -> arr -> unit
val div_ : ?out:arr -> arr -> arr -> unit
val pow_ : ?out:arr -> arr -> arr -> unit
val atan2_ : ?out:arr -> arr -> arr -> unit
val hypot_ : ?out:arr -> arr -> arr -> unit
val fmod_ : ?out:arr -> arr -> arr -> unit
val min2_ : ?out:arr -> arr -> arr -> unit
val max2_ : ?out:arr -> arr -> arr -> unit
val add_scalar_ : ?out:arr -> arr -> elt -> unit
val sub_scalar_ : ?out:arr -> arr -> elt -> unit
val mul_scalar_ : ?out:arr -> arr -> elt -> unit
val div_scalar_ : ?out:arr -> arr -> elt -> unit
val pow_scalar_ : ?out:arr -> arr -> elt -> unit
val atan2_scalar_ : ?out:arr -> arr -> elt -> unit
val fmod_scalar_ : ?out:arr -> arr -> elt -> unit
val scalar_add_ : ?out:arr -> elt -> arr -> unit
val scalar_sub_ : ?out:arr -> elt -> arr -> unit
val scalar_mul_ : ?out:arr -> elt -> arr -> unit
val scalar_div_ : ?out:arr -> elt -> arr -> unit
val scalar_pow_ : ?out:arr -> elt -> arr -> unit
val scalar_atan2_ : ?out:arr -> elt -> arr -> unit
val scalar_fmod_ : ?out:arr -> elt -> arr -> unit
val fma_ : ?out:arr -> arr -> arr -> arr -> unit
val clip_by_value_ : ?out:arr -> ?amin:elt -> ?amax:elt -> arr -> unit
val clip_by_l2norm_ : ?out:arr -> elt -> arr -> unit
val dot_ : - ?transa:bool -> - ?transb:bool -> - ?alpha:elt -> - ?beta:elt -> - c:arr -> - arr -> - arr -> - unit
val abs_ : ?out:arr -> arr -> unit
val neg_ : ?out:arr -> arr -> unit
val conj_ : ?out:arr -> arr -> unit
val reci_ : ?out:arr -> arr -> unit
val signum_ : ?out:arr -> arr -> unit
val sqr_ : ?out:arr -> arr -> unit
val sqrt_ : ?out:arr -> arr -> unit
val cbrt_ : ?out:arr -> arr -> unit
val exp_ : ?out:arr -> arr -> unit
val exp2_ : ?out:arr -> arr -> unit
val exp10_ : ?out:arr -> arr -> unit
val expm1_ : ?out:arr -> arr -> unit
val log_ : ?out:arr -> arr -> unit
val log2_ : ?out:arr -> arr -> unit
val log10_ : ?out:arr -> arr -> unit
val log1p_ : ?out:arr -> arr -> unit
val sin_ : ?out:arr -> arr -> unit
val cos_ : ?out:arr -> arr -> unit
val tan_ : ?out:arr -> arr -> unit
val asin_ : ?out:arr -> arr -> unit
val acos_ : ?out:arr -> arr -> unit
val atan_ : ?out:arr -> arr -> unit
val sinh_ : ?out:arr -> arr -> unit
val cosh_ : ?out:arr -> arr -> unit
val tanh_ : ?out:arr -> arr -> unit
val asinh_ : ?out:arr -> arr -> unit
val acosh_ : ?out:arr -> arr -> unit
val atanh_ : ?out:arr -> arr -> unit
val floor_ : ?out:arr -> arr -> unit
val ceil_ : ?out:arr -> arr -> unit
val round_ : ?out:arr -> arr -> unit
val trunc_ : ?out:arr -> arr -> unit
val fix_ : ?out:arr -> arr -> unit
val erf_ : ?out:arr -> arr -> unit
val erfc_ : ?out:arr -> arr -> unit
val relu_ : ?out:arr -> arr -> unit
val softplus_ : ?out:arr -> arr -> unit
val softsign_ : ?out:arr -> arr -> unit
val softmax_ : ?out:arr -> ?axis:int -> arr -> unit
val sigmoid_ : ?out:arr -> arr -> unit
val sum_ : out:arr -> axis:int -> arr -> unit
val min_ : out:arr -> axis:int -> arr -> unit
val max_ : out:arr -> axis:int -> arr -> unit
val sum : ?axis:int -> ?keep_dims:bool -> arr -> arr
val prod : ?axis:int -> ?keep_dims:bool -> arr -> arr
val min : ?axis:int -> ?keep_dims:bool -> arr -> arr
val max : ?axis:int -> ?keep_dims:bool -> arr -> arr
val mean : ?axis:int -> ?keep_dims:bool -> arr -> arr
val var : ?axis:int -> ?keep_dims:bool -> arr -> arr
val std : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l2norm : ?axis:int -> ?keep_dims:bool -> arr -> arr
val cumsum_ : ?out:arr -> ?axis:int -> arr -> unit
val cumprod_ : ?out:arr -> ?axis:int -> arr -> unit
val cummin_ : ?out:arr -> ?axis:int -> arr -> unit
val cummax_ : ?out:arr -> ?axis:int -> arr -> unit
val dropout_ : ?out:arr -> ?rate:float -> arr -> unit
val prod' : arr -> elt
val mean' : arr -> elt
val var' : arr -> elt
val std' : arr -> elt
val elt_equal_ : ?out:arr -> arr -> arr -> unit
val elt_not_equal_ : ?out:arr -> arr -> arr -> unit
val elt_less_ : ?out:arr -> arr -> arr -> unit
val elt_greater_ : ?out:arr -> arr -> arr -> unit
val elt_less_equal_ : ?out:arr -> arr -> arr -> unit
val elt_greater_equal_ : ?out:arr -> arr -> arr -> unit
val elt_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_not_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_less_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_greater_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_less_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_greater_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val conv1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val conv2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val conv3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val dilated_conv1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val dilated_conv2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val dilated_conv3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val transpose_conv1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val transpose_conv2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val transpose_conv3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val max_pool1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val max_pool2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val max_pool3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val upsampling2d_ : out:arr -> arr -> int array -> unit
val conv1d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv1d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv2d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv2d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv3d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv3d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val dilated_conv1d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv1d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv2d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv2d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv3d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv3d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val transpose_conv1d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv1d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv2d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv2d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv3d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv3d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val max_pool1d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val max_pool2d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val max_pool3d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool1d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool2d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool3d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val upsampling2d_backward_ : out:arr -> arr -> int array -> arr -> unit
val fused_adagrad_ : ?out:arr -> rate:float -> eps:float -> arr -> unit
\ No newline at end of file diff --git a/owl-base/Owl_computation_graph/Make/argument-1-Optimiser/Operator/Symbol/Shape/Type/Device/index.html b/owl-base/Owl_computation_graph/Make/argument-1-Optimiser/Operator/Symbol/Shape/Type/Device/index.html deleted file mode 100644 index 9be7835d9..000000000 --- a/owl-base/Owl_computation_graph/Make/argument-1-Optimiser/Operator/Symbol/Shape/Type/Device/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Device (owl-base.Owl_computation_graph.Make.Optimiser.Operator.Symbol.Shape.Type.Device)

Module Type.Device

Type definition
type device

TODO

type value

TODO

Core functions
val make_device : unit -> device

TODO

val arr_to_value : A.arr -> value

TODO

val value_to_arr : value -> A.arr

TODO

val elt_to_value : A.elt -> value

TODO

val value_to_elt : value -> A.elt

TODO

val value_to_float : value -> float

TODO

val is_arr : value -> bool

TODO

val is_elt : value -> bool

TODO

\ No newline at end of file diff --git a/owl-base/Owl_computation_graph/Make/argument-1-Optimiser/Operator/Symbol/Shape/Type/index.html b/owl-base/Owl_computation_graph/Make/argument-1-Optimiser/Operator/Symbol/Shape/Type/index.html deleted file mode 100644 index d336616eb..000000000 --- a/owl-base/Owl_computation_graph/Make/argument-1-Optimiser/Operator/Symbol/Shape/Type/index.html +++ /dev/null @@ -1,5 +0,0 @@ - -Type (owl-base.Owl_computation_graph.Make.Optimiser.Operator.Symbol.Shape.Type)

Module Shape.Type

Type definition
type state =
  1. | Valid
  2. | Invalid
    (*

    TODO

    *)

TODO

and block = {
  1. size : int;
  2. block_id : int;
  3. mutable active : t option;
  4. mutable memory : Device.value;
  5. mutable nodes : t list;
}

block type keeps a reference to a block of memory and to the nodes sharing that block.

and attr = {
  1. mutable op : op;
  2. mutable freeze : bool;
  3. mutable reuse : bool;
  4. mutable state : state;
  5. mutable shape : int array option array;
  6. mutable value : Device.value array;
  7. mutable block : block array option;
}

TODO

and arr =
  1. | Arr of t
and elt =
  1. | Elt of t
and op =
  1. | Noop
  2. | Var
  3. | Const
  4. | Empty of int array
  5. | Zeros of int array
  6. | Ones of int array
  7. | Create of int array
  8. | Sequential of int array
  9. | Uniform of int array
  10. | Gaussian of int array
  11. | Bernoulli of int array
  12. | Init of int array * int -> elt
  13. | Get of int array
  14. | Set of int array
  15. | GetSlice of int list list
  16. | SetSlice of int list list
  17. | GetFancy of Owl_types_common.index list
  18. | SetFancy of Owl_types_common.index list
  19. | Copy
  20. | Reset
  21. | Reshape of int array
  22. | Reverse
  23. | Tile of int array
  24. | Repeat of int array
  25. | Pad of elt * int list list
  26. | Concatenate of int
  27. | Stack of int
  28. | Split of int * int array
  29. | Draw of int * int
  30. | Map of elt -> elt
  31. | Fold of int * elt -> elt -> elt
  32. | Scan of int * elt -> elt -> elt
  33. | OneHot of int
  34. | OfArray of int array
  35. | Delay of Device.A.arr -> Device.A.arr
  36. | DelayArray of int array * Device.A.arr array -> Device.A.arr
  37. | LazyPrint of int option - * int option - * bool option - * (Device.A.elt -> string) option
  38. | Abs
  39. | Neg
  40. | Floor
  41. | Ceil
  42. | Round
  43. | Sqr
  44. | Sqrt
  45. | Log
  46. | Log2
  47. | Log10
  48. | Exp
  49. | Sin
  50. | Cos
  51. | Tan
  52. | Sinh
  53. | Cosh
  54. | Tanh
  55. | Asin
  56. | Acos
  57. | Atan
  58. | Asinh
  59. | Acosh
  60. | Atanh
  61. | Min of bool * int
  62. | Max of bool * int
  63. | Sum of bool * int
  64. | SumReduce of int array
  65. | Signum
  66. | Sigmoid
  67. | Relu
  68. | Dawsn
  69. | Min'
  70. | Max'
  71. | Sum'
  72. | LogSumExp'
  73. | LogSumExp of bool * int
  74. | L1norm'
  75. | L2norm'
  76. | L2NormSqr'
  77. | ClipByValue
  78. | ClipByL2norm
  79. | Pow
  80. | ScalarPow
  81. | PowScalar
  82. | Atan2
  83. | ScalarAtan2
  84. | Atan2Scalar
  85. | Hypot
  86. | Min2
  87. | Max2
  88. | Add
  89. | Sub
  90. | Mul
  91. | Div
  92. | AddScalar
  93. | SubScalar
  94. | MulScalar
  95. | DivScalar
  96. | ScalarAdd
  97. | ScalarSub
  98. | ScalarMul
  99. | ScalarDiv
  100. | FMA
  101. | EltEqual
  102. | EltNotEqual
  103. | EltLess
  104. | EltGreater
  105. | EltLessEqual
  106. | EltGreaterEqual
  107. | EltEqualScalar
  108. | EltNotEqualScalar
  109. | EltLessScalar
  110. | EltGreaterScalar
  111. | EltLessEqualScalar
  112. | EltGreaterEqualScalar
  113. | Conv1d of Owl_types_common.padding * int array
  114. | Conv2d of Owl_types_common.padding * int array
  115. | Conv3d of Owl_types_common.padding * int array
  116. | TransposeConv1d of Owl_types_common.padding * int array
  117. | TransposeConv2d of Owl_types_common.padding * int array
  118. | TransposeConv3d of Owl_types_common.padding * int array
  119. | DilatedConv1d of Owl_types_common.padding * int array * int array
  120. | DilatedConv2d of Owl_types_common.padding * int array * int array
  121. | DilatedConv3d of Owl_types_common.padding * int array * int array
  122. | MaxPool1d of Owl_types_common.padding * int array * int array
  123. | MaxPool2d of Owl_types_common.padding * int array * int array
  124. | MaxPool3d of Owl_types_common.padding * int array * int array
  125. | AvgPool1d of Owl_types_common.padding * int array * int array
  126. | AvgPool2d of Owl_types_common.padding * int array * int array
  127. | AvgPool3d of Owl_types_common.padding * int array * int array
  128. | UpSampling2d of int array
  129. | Conv1dBackwardInput of int array
  130. | Conv1dBackwardKernel of int array
  131. | Conv2dBackwardInput of int array
  132. | Conv2dBackwardKernel of int array
  133. | Conv3dBackwardInput of int array
  134. | Conv3dBackwardKernel of int array
  135. | TransposeConv1dBackwardInput of int array
  136. | TransposeConv1dBackwardKernel of int array
  137. | TransposeConv2dBackwardInput of int array
  138. | TransposeConv2dBackwardKernel of int array
  139. | TransposeConv3dBackwardInput of int array
  140. | TransposeConv3dBackwardKernel of int array
  141. | DilatedConv1dBackwardInput of int array * int array
  142. | DilatedConv1dBackwardKernel of int array * int array
  143. | DilatedConv2dBackwardInput of int array * int array
  144. | DilatedConv2dBackwardKernel of int array * int array
  145. | DilatedConv3dBackwardInput of int array * int array
  146. | DilatedConv3dBackwardKernel of int array * int array
  147. | MaxPool1dBackward of Owl_types_common.padding * int array * int array
  148. | MaxPool2dBackward of Owl_types_common.padding * int array * int array
  149. | MaxPool3dBackward of Owl_types_common.padding * int array * int array
  150. | AvgPool1dBackward of Owl_types_common.padding * int array * int array
  151. | AvgPool2dBackward of Owl_types_common.padding * int array * int array
  152. | AvgPool3dBackward of Owl_types_common.padding * int array * int array
  153. | UpSampling2dBackward of int array
  154. | RowNum
  155. | ColNum
  156. | Row
  157. | Rows of int array
  158. | CopyRowTo
  159. | CopyColTo
  160. | Dot of bool * bool * elt * elt
  161. | Inv
  162. | Trace
  163. | Transpose of int array
  164. | ToRows
  165. | OfRows
  166. | Scalar_Add
  167. | Scalar_Sub
  168. | Scalar_Mul
  169. | Scalar_Div
  170. | Scalar_Pow
  171. | Scalar_Atan2
  172. | Scalar_Abs
  173. | Scalar_Neg
  174. | Scalar_Sqr
  175. | Scalar_Sqrt
  176. | Scalar_Exp
  177. | Scalar_Log
  178. | Scalar_Log2
  179. | Scalar_Log10
  180. | Scalar_Signum
  181. | Scalar_Floor
  182. | Scalar_Ceil
  183. | Scalar_Round
  184. | Scalar_Sin
  185. | Scalar_Cos
  186. | Scalar_Tan
  187. | Scalar_Sinh
  188. | Scalar_Cosh
  189. | Scalar_Tanh
  190. | Scalar_Asin
  191. | Scalar_Acos
  192. | Scalar_Atan
  193. | Scalar_Asinh
  194. | Scalar_Acosh
  195. | Scalar_Atanh
  196. | Scalar_Relu
  197. | Scalar_Dawsn
  198. | Scalar_Sigmoid
  199. | Fused_Adagrad of float * float
    (*

    TODO

    *)
\ No newline at end of file diff --git a/owl-base/Owl_computation_graph/Make/argument-1-Optimiser/Operator/Symbol/Shape/index.html b/owl-base/Owl_computation_graph/Make/argument-1-Optimiser/Operator/Symbol/Shape/index.html deleted file mode 100644 index 650fda872..000000000 --- a/owl-base/Owl_computation_graph/Make/argument-1-Optimiser/Operator/Symbol/Shape/index.html +++ /dev/null @@ -1,5 +0,0 @@ - -Shape (owl-base.Owl_computation_graph.Make.Optimiser.Operator.Symbol.Shape)

Module Symbol.Shape

Core functions
val infer_shape : - Type.op -> - Type.attr Owl_graph.node array -> - int array option array

TODO

\ No newline at end of file diff --git a/owl-base/Owl_computation_graph/Make/argument-1-Optimiser/Operator/Symbol/index.html b/owl-base/Owl_computation_graph/Make/argument-1-Optimiser/Operator/Symbol/index.html deleted file mode 100644 index 0c937ac2b..000000000 --- a/owl-base/Owl_computation_graph/Make/argument-1-Optimiser/Operator/Symbol/index.html +++ /dev/null @@ -1,28 +0,0 @@ - -Symbol (owl-base.Owl_computation_graph.Make.Optimiser.Operator.Symbol)

Module Operator.Symbol

Core functions
val op_to_str : Shape.Type.op -> string

TODO

val is_random_variable : Shape.Type.op -> bool

TODO

val refnum : 'a Owl_graph.node -> int

TODO

val node_shape : Shape.Type.attr Owl_graph.node -> int array

TODO

val node_numel : Shape.Type.attr Owl_graph.node -> int

TODO

val is_shape_unknown : Shape.Type.attr Owl_graph.node -> bool

TODO

val infer_shape_graph : Shape.Type.attr Owl_graph.node array -> unit

TODO

val shape_to_str : int array option array -> string

TODO

val node_to_str : Shape.Type.attr Owl_graph.node -> string

TODO

val node_to_arr : Shape.Type.t -> Shape.Type.arr

TODO

val arr_to_node : Shape.Type.arr -> Shape.Type.t

TODO

val node_to_elt : Shape.Type.t -> Shape.Type.elt

TODO

val elt_to_node : Shape.Type.elt -> Shape.Type.t

TODO

val make_node : - ?name:string -> - ?value:Shape.Type.Device.value array -> - ?shape:int array option array -> - ?freeze:bool -> - ?reuse:bool -> - ?state:Shape.Type.state -> - Shape.Type.op -> - Shape.Type.attr Owl_graph.node

TODO

val make_then_connect : - ?shape:int array option array -> - Shape.Type.op -> - Shape.Type.attr Owl_graph.node array -> - Shape.Type.attr Owl_graph.node

TODO

val var_arr : ?shape:int array -> string -> Shape.Type.arr

TODO

val var_elt : string -> Shape.Type.elt

TODO

val const_arr : string -> Shape.Type.Device.A.arr -> Shape.Type.arr

TODO

val const_elt : string -> Shape.Type.Device.A.elt -> Shape.Type.elt

TODO

val new_block_id : unit -> int

new_block_id () returns an unused block id.

val make_empty_block : ?block_id:int -> int -> Shape.Type.block

make_empty_block s returns an empty block of memory of size s.

val make_value_block : - Shape.Type.Device.value -> - Shape.Type.attr Owl_graph.node -> - unit

make_value_block value node creates a block of memory initialised with value and links the new block to node.

get_block node returns the memory block allocated to node. If no block is allocated, throws an exception.

val add_node_to_block : - Shape.Type.attr Owl_graph.node -> - Shape.Type.block -> - unit

Link a node to a reusable block and initialises its memory on the memory of the block.

val get_active_node : Shape.Type.block -> Shape.Type.attr Owl_graph.node option

Return the node that is currently using the memory of the block.

val set_active_node : - Shape.Type.block -> - Shape.Type.attr Owl_graph.node -> - unit

Update the node that is currently using the block of memory.

val get_block_id : Shape.Type.attr Owl_graph.node -> int

get_block_id node returns the id of the block assigned to node. If node has not been assigned yet, returns -1.

val set_value : - Shape.Type.attr Owl_graph.node -> - Shape.Type.Device.value array -> - unit

TODO

val set_operator : Shape.Type.attr Owl_graph.node -> Shape.Type.op -> unit

TODO

TODO

val set_reuse : Shape.Type.attr Owl_graph.node -> bool -> unit

TODO

val get_reuse : Shape.Type.attr Owl_graph.node -> bool

TODO

val is_shared : Shape.Type.attr Owl_graph.node -> bool

TODO

val get_shared_nodes : - Shape.Type.attr Owl_graph.node -> - Shape.Type.attr Owl_graph.node array

get_shared_nodes node returns the nodes sharing the same block of memory as node.

val is_var : Shape.Type.attr Owl_graph.node -> bool

TODO

val is_const : Shape.Type.attr Owl_graph.node -> bool

TODO

val is_node_arr : Shape.Type.attr Owl_graph.node -> bool

TODO

val is_node_elt : Shape.Type.attr Owl_graph.node -> bool

TODO

val is_assigned : Shape.Type.attr Owl_graph.node -> bool

is_assigned node checks if a block of memory has been assigned to node.

val check_assigned : Shape.Type.attr Owl_graph.node -> unit

check_assigned node throws an exception if node has not been assigned to a block.

val is_valid : Shape.Type.attr Owl_graph.node -> bool

TODO

val validate : Shape.Type.attr Owl_graph.node -> unit

TODO

val invalidate : Shape.Type.attr Owl_graph.node -> unit

TODO

val invalidate_graph : Shape.Type.attr Owl_graph.node -> unit

TODO

val is_freeze : Shape.Type.attr Owl_graph.node -> bool

TODO

val freeze : Shape.Type.attr Owl_graph.node -> unit

TODO

val freeze_descendants : Shape.Type.attr Owl_graph.node array -> unit

TODO

val freeze_ancestors : Shape.Type.attr Owl_graph.node array -> unit

TODO

TODO

TODO

val unsafe_assign_arr : Shape.Type.arr -> Shape.Type.Device.A.arr -> unit

TODO

val assign_arr : Shape.Type.arr -> Shape.Type.Device.A.arr -> unit

TODO

val assign_elt : Shape.Type.elt -> Shape.Type.Device.A.elt -> unit

TODO

val float_to_elt : float -> Shape.Type.elt

TODO

val elt_to_float : Shape.Type.elt -> float

TODO

\ No newline at end of file diff --git a/owl-base/Owl_computation_graph/Make/argument-1-Optimiser/Operator/index.html b/owl-base/Owl_computation_graph/Make/argument-1-Optimiser/Operator/index.html deleted file mode 100644 index 548051b7b..000000000 --- a/owl-base/Owl_computation_graph/Make/argument-1-Optimiser/Operator/index.html +++ /dev/null @@ -1,420 +0,0 @@ - -Operator (owl-base.Owl_computation_graph.Make.Optimiser.Operator)

Module Optimiser.Operator

Vectorised functions
val empty : int array -> Symbol.Shape.Type.arr

TODO

val zeros : int array -> Symbol.Shape.Type.arr

TODO

val ones : int array -> Symbol.Shape.Type.arr

TODO

val create : int array -> Symbol.Shape.Type.elt -> Symbol.Shape.Type.arr

TODO

val sequential : - ?a:Symbol.Shape.Type.elt -> - ?step:Symbol.Shape.Type.elt -> - int array -> - Symbol.Shape.Type.arr

TODO

val uniform : - ?a:Symbol.Shape.Type.elt -> - ?b:Symbol.Shape.Type.elt -> - int array -> - Symbol.Shape.Type.arr

TODO

val gaussian : - ?mu:Symbol.Shape.Type.elt -> - ?sigma:Symbol.Shape.Type.elt -> - int array -> - Symbol.Shape.Type.arr

TODO

val bernoulli : ?p:Symbol.Shape.Type.elt -> int array -> Symbol.Shape.Type.arr

TODO

val init : int array -> (int -> Symbol.Shape.Type.elt) -> Symbol.Shape.Type.arr

TODO

val init_nd : - int array -> - (int array -> Symbol.Shape.Type.elt) -> - Symbol.Shape.Type.arr

TODO

val shape : Symbol.Shape.Type.arr -> int array

TODO

val numel : Symbol.Shape.Type.arr -> int

TODO

TODO

val set : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.elt -> unit

TODO

val get_slice : int list list -> Symbol.Shape.Type.arr -> Symbol.Shape.Type.arr

TODO

val set_slice : - int list list -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - unit

TODO

TODO

val set_fancy : - Owl_types.index list -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - unit

TODO

val copy_ : out:'a -> 'b -> 'c

TODO

val reset : Symbol.Shape.Type.arr -> unit

TODO

val reshape : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.arr

TODO

val tile : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.arr

TODO

val repeat : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.arr

TODO

val pad : - ?v:Symbol.Shape.Type.elt -> - int list list -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val expand : ?hi:bool -> Symbol.Shape.Type.arr -> int -> Symbol.Shape.Type.arr

TODO

val squeeze : ?axis:int array -> Symbol.Shape.Type.arr -> Symbol.Shape.Type.arr

TODO

val concatenate : - ?axis:int -> - Symbol.Shape.Type.arr array -> - Symbol.Shape.Type.arr

TODO

val stack : ?axis:int -> Symbol.Shape.Type.arr array -> Symbol.Shape.Type.arr

TODO

TODO

val split : ?axis:int -> 'a -> 'b -> 'c

TODO

val draw : - ?axis:int -> - Symbol.Shape.Type.arr -> - int -> - Symbol.Shape.Type.arr * 'a array

TODO

TODO

delay f x returns f x. It allows to use a function that is not tracked by the computation graph and delay its evaluation. The output must have the same shape as the input.

delay_array out_shape f x works in the same way as delay but is applied on an array of ndarrays. Needs the shape of the output as an argument.

val lazy_print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(Symbol.Shape.Type.Device.A.elt -> string) -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

lazy_print x prints the output of x when it is evaluated. Is implemented as an identity node. For information about the optional parameters, refer to the print function of the Ndarray module.

val print : ?max_row:'a -> ?max_col:'b -> ?header:'c -> ?fmt:'d -> 'e -> unit

TODO

val min : - ?axis:int -> - ?keep_dims:bool -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val max : - ?axis:int -> - ?keep_dims:bool -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val sum : - ?axis:int -> - ?keep_dims:bool -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val sum_reduce : - ?axis:int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val log_sum_exp : - ?axis:int -> - ?keep_dims:bool -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

TODO

TODO

TODO

TODO

val elt_less_equal_scalar : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.elt -> - Symbol.Shape.Type.arr

TODO

val elt_greater_equal_scalar : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.elt -> - Symbol.Shape.Type.arr

TODO

val conv1d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr

TODO

val conv2d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr

TODO

val conv3d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr

TODO

val transpose_conv1d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr

TODO

val transpose_conv2d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr

TODO

val transpose_conv3d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr

TODO

val dilated_conv1d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val dilated_conv2d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val dilated_conv3d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val max_pool1d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val max_pool2d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val max_pool3d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val avg_pool1d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val avg_pool2d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val avg_pool3d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val upsampling2d : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.arr

TODO

val conv1d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val conv1d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val conv2d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val conv2d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val conv3d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val conv3d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val transpose_conv1d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val transpose_conv1d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val transpose_conv2d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val transpose_conv2d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val transpose_conv3d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val transpose_conv3d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val dilated_conv1d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val dilated_conv1d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val dilated_conv2d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val dilated_conv2d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val dilated_conv3d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val dilated_conv3d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val max_pool1d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val max_pool2d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val max_pool3d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val avg_pool1d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val avg_pool2d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val avg_pool3d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val upsampling2d_backward : - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val row_num : Symbol.Shape.Type.arr -> int

TODO

val col_num : Symbol.Shape.Type.arr -> int

TODO

val rows : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.arr

TODO

val copy_row_to : Symbol.Shape.Type.arr -> 'a -> 'b -> unit

TODO

val copy_col_to : Symbol.Shape.Type.arr -> 'a -> 'b -> unit

TODO

TODO

val transpose : - ?axis:int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val to_rows : Symbol.Shape.Type.arr -> 'a array

TODO

TODO

val to_cols : Symbol.Shape.Type.arr -> 'a array

TODO

TODO

val of_array : - Symbol.Shape.Type.elt array -> - int array -> - Symbol.Shape.Type.arr

TODO

val of_arrays : Symbol.Shape.Type.elt array array -> Symbol.Shape.Type.arr

TODO

val to_arrays : Symbol.Shape.Type.arr -> Symbol.Shape.Type.elt array array

TODO

Scalar functions
module Scalar : sig ... end
module Mat : sig ... end
module Linalg : sig ... end
\ No newline at end of file diff --git a/owl-base/Owl_computation_graph/Make/argument-1-Optimiser/index.html b/owl-base/Owl_computation_graph/Make/argument-1-Optimiser/index.html deleted file mode 100644 index b6a59250b..000000000 --- a/owl-base/Owl_computation_graph/Make/argument-1-Optimiser/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Optimiser (owl-base.Owl_computation_graph.Make.Optimiser)

Parameter Make.Optimiser

Core functions
val estimate_complexity : 'a Owl_graph.node array -> int * int

TODO

val optimise_nodes : - Operator.Symbol.Shape.Type.attr Owl_graph.node array -> - unit

TODO

\ No newline at end of file diff --git a/owl-base/Owl_computation_graph/Make/index.html b/owl-base/Owl_computation_graph/Make/index.html deleted file mode 100644 index ccf703256..000000000 --- a/owl-base/Owl_computation_graph/Make/index.html +++ /dev/null @@ -1,37 +0,0 @@ - -Make (owl-base.Owl_computation_graph.Make)

Module Owl_computation_graph.Make

Parameters

Signature

module Optimiser = Optimiser
val shape_or_value : Optimiser.Operator.Symbol.Shape.Type.t -> string
val _block_colour : int -> string
val graph_to_dot : graph -> string
val graph_to_trace : graph -> string
val save_graph : 'a -> string -> unit
val load_graph : string -> 'a * 'b
val invalidate_rvs : graph -> unit
val is_iopair_safe : 'a Owl_graph.node -> 'b Owl_graph.node -> bool
val update_iopair : graph -> unit
val remove_unused_iopair : - 'a Owl_graph.node array -> - 'b array -> - 'a Owl_graph.node array * 'b array
val optimise : graph -> unit
\ No newline at end of file diff --git a/owl-base/Owl_computation_graph_sig/.dummy b/owl-base/Owl_computation_graph_sig/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl-base/Owl_computation_graph_sig/module-type-Sig/Optimiser/Operator/Linalg/index.html b/owl-base/Owl_computation_graph_sig/module-type-Sig/Optimiser/Operator/Linalg/index.html deleted file mode 100644 index 1b992101e..000000000 --- a/owl-base/Owl_computation_graph_sig/module-type-Sig/Optimiser/Operator/Linalg/index.html +++ /dev/null @@ -1,33 +0,0 @@ - -Linalg (owl-base.Owl_computation_graph_sig.Sig.Optimiser.Operator.Linalg)

Module Operator.Linalg

\ No newline at end of file diff --git a/owl-base/Owl_computation_graph_sig/module-type-Sig/Optimiser/Operator/Mat/index.html b/owl-base/Owl_computation_graph_sig/module-type-Sig/Optimiser/Operator/Mat/index.html deleted file mode 100644 index f9ed8ce2c..000000000 --- a/owl-base/Owl_computation_graph_sig/module-type-Sig/Optimiser/Operator/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl-base.Owl_computation_graph_sig.Sig.Optimiser.Operator.Mat)

Module Operator.Mat

val eye : int -> Symbol.Shape.Type.arr

TODO

TODO

TODO

TODO

\ No newline at end of file diff --git a/owl-base/Owl_computation_graph_sig/module-type-Sig/Optimiser/Operator/Scalar/index.html b/owl-base/Owl_computation_graph_sig/module-type-Sig/Optimiser/Operator/Scalar/index.html deleted file mode 100644 index bc3402ff3..000000000 --- a/owl-base/Owl_computation_graph_sig/module-type-Sig/Optimiser/Operator/Scalar/index.html +++ /dev/null @@ -1,20 +0,0 @@ - -Scalar (owl-base.Owl_computation_graph_sig.Sig.Optimiser.Operator.Scalar)

Module Operator.Scalar

\ No newline at end of file diff --git a/owl-base/Owl_computation_graph_sig/module-type-Sig/Optimiser/Operator/Symbol/Shape/Type/Device/A/Linalg/index.html b/owl-base/Owl_computation_graph_sig/module-type-Sig/Optimiser/Operator/Symbol/Shape/Type/Device/A/Linalg/index.html deleted file mode 100644 index 418a46ce5..000000000 --- a/owl-base/Owl_computation_graph_sig/module-type-Sig/Optimiser/Operator/Symbol/Shape/Type/Device/A/Linalg/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -Linalg (owl-base.Owl_computation_graph_sig.Sig.Optimiser.Operator.Symbol.Shape.Type.Device.A.Linalg)

Module A.Linalg

val inv : arr -> arr
val logdet : arr -> elt
val chol : ?upper:bool -> arr -> arr
val svd : ?thin:bool -> arr -> arr * arr * arr
val qr : arr -> arr * arr
val lq : arr -> arr * arr
val sylvester : arr -> arr -> arr -> arr
val lyapunov : arr -> arr -> arr
val discrete_lyapunov : - ?solver:[ `default | `bilinear | `direct ] -> - arr -> - arr -> - arr
val linsolve : ?trans:bool -> ?typ:[ `n | `u | `l ] -> arr -> arr -> arr
val care : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
val dare : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
\ No newline at end of file diff --git a/owl-base/Owl_computation_graph_sig/module-type-Sig/Optimiser/Operator/Symbol/Shape/Type/Device/A/Mat/index.html b/owl-base/Owl_computation_graph_sig/module-type-Sig/Optimiser/Operator/Symbol/Shape/Type/Device/A/Mat/index.html deleted file mode 100644 index 2def90b15..000000000 --- a/owl-base/Owl_computation_graph_sig/module-type-Sig/Optimiser/Operator/Symbol/Shape/Type/Device/A/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl-base.Owl_computation_graph_sig.Sig.Optimiser.Operator.Symbol.Shape.Type.Device.A.Mat)

Module A.Mat

val diagm : ?k:int -> arr -> arr
val triu : ?k:int -> arr -> arr
val tril : ?k:int -> arr -> arr
val eye : int -> arr
\ No newline at end of file diff --git a/owl-base/Owl_computation_graph_sig/module-type-Sig/Optimiser/Operator/Symbol/Shape/Type/Device/A/Scalar/index.html b/owl-base/Owl_computation_graph_sig/module-type-Sig/Optimiser/Operator/Symbol/Shape/Type/Device/A/Scalar/index.html deleted file mode 100644 index 2f1d83ac2..000000000 --- a/owl-base/Owl_computation_graph_sig/module-type-Sig/Optimiser/Operator/Symbol/Shape/Type/Device/A/Scalar/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Scalar (owl-base.Owl_computation_graph_sig.Sig.Optimiser.Operator.Symbol.Shape.Type.Device.A.Scalar)

Module A.Scalar

val add : elt -> elt -> elt
val sub : elt -> elt -> elt
val mul : elt -> elt -> elt
val div : elt -> elt -> elt
val pow : elt -> elt -> elt
val atan2 : elt -> elt -> elt
val abs : elt -> elt
val neg : elt -> elt
val sqr : elt -> elt
val sqrt : elt -> elt
val exp : elt -> elt
val log : elt -> elt
val log2 : elt -> elt
val log10 : elt -> elt
val signum : elt -> elt
val floor : elt -> elt
val ceil : elt -> elt
val round : elt -> elt
val sin : elt -> elt
val cos : elt -> elt
val tan : elt -> elt
val sinh : elt -> elt
val cosh : elt -> elt
val tanh : elt -> elt
val asin : elt -> elt
val acos : elt -> elt
val atan : elt -> elt
val asinh : elt -> elt
val acosh : elt -> elt
val atanh : elt -> elt
val relu : elt -> elt
val dawsn : elt -> elt
val sigmoid : elt -> elt
\ No newline at end of file diff --git a/owl-base/Owl_computation_graph_sig/module-type-Sig/Optimiser/Operator/Symbol/Shape/Type/Device/A/index.html b/owl-base/Owl_computation_graph_sig/module-type-Sig/Optimiser/Operator/Symbol/Shape/Type/Device/A/index.html deleted file mode 100644 index 118949be5..000000000 --- a/owl-base/Owl_computation_graph_sig/module-type-Sig/Optimiser/Operator/Symbol/Shape/Type/Device/A/index.html +++ /dev/null @@ -1,379 +0,0 @@ - -A (owl-base.Owl_computation_graph_sig.Sig.Optimiser.Operator.Symbol.Shape.Type.Device.A)

Module Device.A

include Owl_types_ndarray_algodiff.Sig
include Owl_types_ndarray_eltcmp.Sig
include Owl_types_ndarray_basic.Sig
type arr
type elt
val empty : int array -> arr
val zeros : int array -> arr
val ones : int array -> arr
val create : int array -> elt -> arr
val sequential : ?a:elt -> ?step:elt -> int array -> arr
val uniform : ?a:elt -> ?b:elt -> int array -> arr
val gaussian : ?mu:elt -> ?sigma:elt -> int array -> arr
val bernoulli : ?p:elt -> int array -> arr
val init : int array -> (int -> elt) -> arr
val init_nd : int array -> (int array -> elt) -> arr
val shape : arr -> int array
val numel : arr -> int
val get : arr -> int array -> elt
val set : arr -> int array -> elt -> unit
val get_slice : int list list -> arr -> arr
val set_slice : int list list -> arr -> arr -> unit
val get_fancy : Owl_types_common.index list -> arr -> arr
val set_fancy : Owl_types_common.index list -> arr -> arr -> unit
val copy : arr -> arr
val copy_ : out:arr -> arr -> unit
val reset : arr -> unit
val reshape : arr -> int array -> arr
val reverse : arr -> arr
val tile : arr -> int array -> arr
val repeat : arr -> int array -> arr
val concatenate : ?axis:int -> arr array -> arr
val stack : ?axis:int -> arr array -> arr
val split : ?axis:int -> int array -> arr -> arr array
val expand : ?hi:bool -> arr -> int -> arr
val squeeze : ?axis:int array -> arr -> arr
val draw : ?axis:int -> arr -> int -> arr * int array
val map : (elt -> elt) -> arr -> arr
val fold : ?axis:int -> (elt -> elt -> elt) -> elt -> arr -> arr
val scan : ?axis:int -> (elt -> elt -> elt) -> arr -> arr
val one_hot : int -> arr -> arr
val pad : ?v:elt -> int list list -> arr -> arr
val print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(elt -> string) -> - arr -> - unit
val abs : arr -> arr
val neg : arr -> arr
val floor : arr -> arr
val ceil : arr -> arr
val round : arr -> arr
val sqr : arr -> arr
val sqrt : arr -> arr
val log : arr -> arr
val log2 : arr -> arr
val log10 : arr -> arr
val exp : arr -> arr
val sin : arr -> arr
val cos : arr -> arr
val tan : arr -> arr
val sinh : arr -> arr
val cosh : arr -> arr
val tanh : arr -> arr
val asin : arr -> arr
val acos : arr -> arr
val atan : arr -> arr
val asinh : arr -> arr
val acosh : arr -> arr
val atanh : arr -> arr
val sum_reduce : ?axis:int array -> arr -> arr
val signum : arr -> arr
val sigmoid : arr -> arr
val relu : arr -> arr
val dawsn : arr -> arr
val min' : arr -> elt
val max' : arr -> elt
val sum' : arr -> elt
val log_sum_exp' : arr -> elt
val log_sum_exp : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm' : arr -> elt
val l2norm' : arr -> elt
val l2norm_sqr' : arr -> elt
val clip_by_value : ?amin:elt -> ?amax:elt -> arr -> arr
val clip_by_l2norm : elt -> arr -> arr
val pow : arr -> arr -> arr
val scalar_pow : elt -> arr -> arr
val pow_scalar : arr -> elt -> arr
val atan2 : arr -> arr -> arr
val scalar_atan2 : elt -> arr -> arr
val atan2_scalar : arr -> elt -> arr
val add : arr -> arr -> arr
val sub : arr -> arr -> arr
val mul : arr -> arr -> arr
val div : arr -> arr -> arr
val add_scalar : arr -> elt -> arr
val sub_scalar : arr -> elt -> arr
val mul_scalar : arr -> elt -> arr
val div_scalar : arr -> elt -> arr
val scalar_add : elt -> arr -> arr
val scalar_sub : elt -> arr -> arr
val scalar_mul : elt -> arr -> arr
val scalar_div : elt -> arr -> arr
val fma : arr -> arr -> arr -> arr
val conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val dilated_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val transpose_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val max_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val upsampling2d : arr -> int array -> arr
val conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val dilated_conv1d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv1d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val transpose_conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val max_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val upsampling2d_backward : arr -> int array -> arr -> arr
val row_num : arr -> int
val col_num : arr -> int
val row : arr -> int -> arr
val rows : arr -> int array -> arr
val copy_row_to : arr -> arr -> int -> unit
val copy_col_to : arr -> arr -> int -> unit
val dot : arr -> arr -> arr
val diag : ?k:int -> arr -> arr
val trace : arr -> elt
val transpose : ?axis:int array -> arr -> arr
val to_rows : arr -> arr array
val of_rows : arr array -> arr
val to_cols : arr -> arr array
val of_cols : arr array -> arr
val of_array : elt array -> int array -> arr
val of_arrays : elt array array -> arr
val float_to_elt : float -> elt
val elt_to_float : elt -> float
val elt_equal : arr -> arr -> arr
val elt_not_equal : arr -> arr -> arr
val elt_less : arr -> arr -> arr
val elt_greater : arr -> arr -> arr
val elt_less_equal : arr -> arr -> arr
val elt_greater_equal : arr -> arr -> arr
val elt_equal_scalar : arr -> elt -> arr
val elt_not_equal_scalar : arr -> elt -> arr
val elt_less_scalar : arr -> elt -> arr
val elt_greater_scalar : arr -> elt -> arr
val elt_less_equal_scalar : arr -> elt -> arr
val elt_greater_equal_scalar : arr -> elt -> arr
module Scalar : sig ... end
module Mat : sig ... end
module Linalg : sig ... end
val create_ : out:arr -> elt -> unit
val uniform_ : ?a:elt -> ?b:elt -> out:arr -> unit
val gaussian_ : ?mu:elt -> ?sigma:elt -> out:arr -> unit
val sequential_ : ?a:elt -> ?step:elt -> out:arr -> unit
val bernoulli_ : ?p:elt -> out:arr -> unit
val zeros_ : out:arr -> unit
val ones_ : out:arr -> unit
val one_hot_ : out:arr -> int -> arr -> unit
val get_slice_ : out:arr -> int list list -> arr -> unit
val set_slice_ : out:arr -> int list list -> arr -> arr -> unit
val sub_left : arr -> int -> int -> arr
val reshape_ : out:arr -> arr -> unit
val reverse_ : out:arr -> arr -> unit
val transpose_ : out:arr -> ?axis:int array -> arr -> unit
val repeat_ : out:arr -> arr -> int array -> unit
val tile_ : out:arr -> arr -> int array -> unit
val pad_ : out:arr -> ?v:elt -> int list list -> arr -> unit
val hypot : arr -> arr -> arr
val fmod : arr -> arr -> arr
val min2 : arr -> arr -> arr
val max2 : arr -> arr -> arr
val add_ : ?out:arr -> arr -> arr -> unit
val sub_ : ?out:arr -> arr -> arr -> unit
val mul_ : ?out:arr -> arr -> arr -> unit
val div_ : ?out:arr -> arr -> arr -> unit
val pow_ : ?out:arr -> arr -> arr -> unit
val atan2_ : ?out:arr -> arr -> arr -> unit
val hypot_ : ?out:arr -> arr -> arr -> unit
val fmod_ : ?out:arr -> arr -> arr -> unit
val min2_ : ?out:arr -> arr -> arr -> unit
val max2_ : ?out:arr -> arr -> arr -> unit
val add_scalar_ : ?out:arr -> arr -> elt -> unit
val sub_scalar_ : ?out:arr -> arr -> elt -> unit
val mul_scalar_ : ?out:arr -> arr -> elt -> unit
val div_scalar_ : ?out:arr -> arr -> elt -> unit
val pow_scalar_ : ?out:arr -> arr -> elt -> unit
val atan2_scalar_ : ?out:arr -> arr -> elt -> unit
val fmod_scalar_ : ?out:arr -> arr -> elt -> unit
val scalar_add_ : ?out:arr -> elt -> arr -> unit
val scalar_sub_ : ?out:arr -> elt -> arr -> unit
val scalar_mul_ : ?out:arr -> elt -> arr -> unit
val scalar_div_ : ?out:arr -> elt -> arr -> unit
val scalar_pow_ : ?out:arr -> elt -> arr -> unit
val scalar_atan2_ : ?out:arr -> elt -> arr -> unit
val scalar_fmod_ : ?out:arr -> elt -> arr -> unit
val fma_ : ?out:arr -> arr -> arr -> arr -> unit
val clip_by_value_ : ?out:arr -> ?amin:elt -> ?amax:elt -> arr -> unit
val clip_by_l2norm_ : ?out:arr -> elt -> arr -> unit
val dot_ : - ?transa:bool -> - ?transb:bool -> - ?alpha:elt -> - ?beta:elt -> - c:arr -> - arr -> - arr -> - unit
val abs_ : ?out:arr -> arr -> unit
val neg_ : ?out:arr -> arr -> unit
val conj_ : ?out:arr -> arr -> unit
val reci_ : ?out:arr -> arr -> unit
val signum_ : ?out:arr -> arr -> unit
val sqr_ : ?out:arr -> arr -> unit
val sqrt_ : ?out:arr -> arr -> unit
val cbrt_ : ?out:arr -> arr -> unit
val exp_ : ?out:arr -> arr -> unit
val exp2_ : ?out:arr -> arr -> unit
val exp10_ : ?out:arr -> arr -> unit
val expm1_ : ?out:arr -> arr -> unit
val log_ : ?out:arr -> arr -> unit
val log2_ : ?out:arr -> arr -> unit
val log10_ : ?out:arr -> arr -> unit
val log1p_ : ?out:arr -> arr -> unit
val sin_ : ?out:arr -> arr -> unit
val cos_ : ?out:arr -> arr -> unit
val tan_ : ?out:arr -> arr -> unit
val asin_ : ?out:arr -> arr -> unit
val acos_ : ?out:arr -> arr -> unit
val atan_ : ?out:arr -> arr -> unit
val sinh_ : ?out:arr -> arr -> unit
val cosh_ : ?out:arr -> arr -> unit
val tanh_ : ?out:arr -> arr -> unit
val asinh_ : ?out:arr -> arr -> unit
val acosh_ : ?out:arr -> arr -> unit
val atanh_ : ?out:arr -> arr -> unit
val floor_ : ?out:arr -> arr -> unit
val ceil_ : ?out:arr -> arr -> unit
val round_ : ?out:arr -> arr -> unit
val trunc_ : ?out:arr -> arr -> unit
val fix_ : ?out:arr -> arr -> unit
val erf_ : ?out:arr -> arr -> unit
val erfc_ : ?out:arr -> arr -> unit
val relu_ : ?out:arr -> arr -> unit
val softplus_ : ?out:arr -> arr -> unit
val softsign_ : ?out:arr -> arr -> unit
val softmax_ : ?out:arr -> ?axis:int -> arr -> unit
val sigmoid_ : ?out:arr -> arr -> unit
val sum_ : out:arr -> axis:int -> arr -> unit
val min_ : out:arr -> axis:int -> arr -> unit
val max_ : out:arr -> axis:int -> arr -> unit
val sum : ?axis:int -> ?keep_dims:bool -> arr -> arr
val prod : ?axis:int -> ?keep_dims:bool -> arr -> arr
val min : ?axis:int -> ?keep_dims:bool -> arr -> arr
val max : ?axis:int -> ?keep_dims:bool -> arr -> arr
val mean : ?axis:int -> ?keep_dims:bool -> arr -> arr
val var : ?axis:int -> ?keep_dims:bool -> arr -> arr
val std : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l2norm : ?axis:int -> ?keep_dims:bool -> arr -> arr
val cumsum_ : ?out:arr -> ?axis:int -> arr -> unit
val cumprod_ : ?out:arr -> ?axis:int -> arr -> unit
val cummin_ : ?out:arr -> ?axis:int -> arr -> unit
val cummax_ : ?out:arr -> ?axis:int -> arr -> unit
val dropout_ : ?out:arr -> ?rate:float -> arr -> unit
val prod' : arr -> elt
val mean' : arr -> elt
val var' : arr -> elt
val std' : arr -> elt
val elt_equal_ : ?out:arr -> arr -> arr -> unit
val elt_not_equal_ : ?out:arr -> arr -> arr -> unit
val elt_less_ : ?out:arr -> arr -> arr -> unit
val elt_greater_ : ?out:arr -> arr -> arr -> unit
val elt_less_equal_ : ?out:arr -> arr -> arr -> unit
val elt_greater_equal_ : ?out:arr -> arr -> arr -> unit
val elt_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_not_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_less_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_greater_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_less_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_greater_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val conv1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val conv2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val conv3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val dilated_conv1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val dilated_conv2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val dilated_conv3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val transpose_conv1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val transpose_conv2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val transpose_conv3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val max_pool1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val max_pool2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val max_pool3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val upsampling2d_ : out:arr -> arr -> int array -> unit
val conv1d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv1d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv2d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv2d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv3d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv3d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val dilated_conv1d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv1d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv2d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv2d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv3d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv3d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val transpose_conv1d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv1d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv2d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv2d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv3d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv3d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val max_pool1d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val max_pool2d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val max_pool3d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool1d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool2d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool3d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val upsampling2d_backward_ : out:arr -> arr -> int array -> arr -> unit
val fused_adagrad_ : ?out:arr -> rate:float -> eps:float -> arr -> unit
\ No newline at end of file diff --git a/owl-base/Owl_computation_graph_sig/module-type-Sig/Optimiser/Operator/Symbol/Shape/Type/Device/index.html b/owl-base/Owl_computation_graph_sig/module-type-Sig/Optimiser/Operator/Symbol/Shape/Type/Device/index.html deleted file mode 100644 index 2148ae157..000000000 --- a/owl-base/Owl_computation_graph_sig/module-type-Sig/Optimiser/Operator/Symbol/Shape/Type/Device/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Device (owl-base.Owl_computation_graph_sig.Sig.Optimiser.Operator.Symbol.Shape.Type.Device)

Module Type.Device

Type definition
type device

TODO

type value

TODO

Core functions
val make_device : unit -> device

TODO

val arr_to_value : A.arr -> value

TODO

val value_to_arr : value -> A.arr

TODO

val elt_to_value : A.elt -> value

TODO

val value_to_elt : value -> A.elt

TODO

val value_to_float : value -> float

TODO

val is_arr : value -> bool

TODO

val is_elt : value -> bool

TODO

\ No newline at end of file diff --git a/owl-base/Owl_computation_graph_sig/module-type-Sig/Optimiser/Operator/Symbol/Shape/Type/index.html b/owl-base/Owl_computation_graph_sig/module-type-Sig/Optimiser/Operator/Symbol/Shape/Type/index.html deleted file mode 100644 index 0fd866ffe..000000000 --- a/owl-base/Owl_computation_graph_sig/module-type-Sig/Optimiser/Operator/Symbol/Shape/Type/index.html +++ /dev/null @@ -1,5 +0,0 @@ - -Type (owl-base.Owl_computation_graph_sig.Sig.Optimiser.Operator.Symbol.Shape.Type)

Module Shape.Type

Type definition
type state =
  1. | Valid
  2. | Invalid
    (*

    TODO

    *)

TODO

and block = {
  1. size : int;
  2. block_id : int;
  3. mutable active : t option;
  4. mutable memory : Device.value;
  5. mutable nodes : t list;
}

block type keeps a reference to a block of memory and to the nodes sharing that block.

and attr = {
  1. mutable op : op;
  2. mutable freeze : bool;
  3. mutable reuse : bool;
  4. mutable state : state;
  5. mutable shape : int array option array;
  6. mutable value : Device.value array;
  7. mutable block : block array option;
}

TODO

and arr =
  1. | Arr of t
and elt =
  1. | Elt of t
and op =
  1. | Noop
  2. | Var
  3. | Const
  4. | Empty of int array
  5. | Zeros of int array
  6. | Ones of int array
  7. | Create of int array
  8. | Sequential of int array
  9. | Uniform of int array
  10. | Gaussian of int array
  11. | Bernoulli of int array
  12. | Init of int array * int -> elt
  13. | Get of int array
  14. | Set of int array
  15. | GetSlice of int list list
  16. | SetSlice of int list list
  17. | GetFancy of Owl_types_common.index list
  18. | SetFancy of Owl_types_common.index list
  19. | Copy
  20. | Reset
  21. | Reshape of int array
  22. | Reverse
  23. | Tile of int array
  24. | Repeat of int array
  25. | Pad of elt * int list list
  26. | Concatenate of int
  27. | Stack of int
  28. | Split of int * int array
  29. | Draw of int * int
  30. | Map of elt -> elt
  31. | Fold of int * elt -> elt -> elt
  32. | Scan of int * elt -> elt -> elt
  33. | OneHot of int
  34. | OfArray of int array
  35. | Delay of Device.A.arr -> Device.A.arr
  36. | DelayArray of int array * Device.A.arr array -> Device.A.arr
  37. | LazyPrint of int option - * int option - * bool option - * (Device.A.elt -> string) option
  38. | Abs
  39. | Neg
  40. | Floor
  41. | Ceil
  42. | Round
  43. | Sqr
  44. | Sqrt
  45. | Log
  46. | Log2
  47. | Log10
  48. | Exp
  49. | Sin
  50. | Cos
  51. | Tan
  52. | Sinh
  53. | Cosh
  54. | Tanh
  55. | Asin
  56. | Acos
  57. | Atan
  58. | Asinh
  59. | Acosh
  60. | Atanh
  61. | Min of bool * int
  62. | Max of bool * int
  63. | Sum of bool * int
  64. | SumReduce of int array
  65. | Signum
  66. | Sigmoid
  67. | Relu
  68. | Dawsn
  69. | Min'
  70. | Max'
  71. | Sum'
  72. | LogSumExp'
  73. | LogSumExp of bool * int
  74. | L1norm'
  75. | L2norm'
  76. | L2NormSqr'
  77. | ClipByValue
  78. | ClipByL2norm
  79. | Pow
  80. | ScalarPow
  81. | PowScalar
  82. | Atan2
  83. | ScalarAtan2
  84. | Atan2Scalar
  85. | Hypot
  86. | Min2
  87. | Max2
  88. | Add
  89. | Sub
  90. | Mul
  91. | Div
  92. | AddScalar
  93. | SubScalar
  94. | MulScalar
  95. | DivScalar
  96. | ScalarAdd
  97. | ScalarSub
  98. | ScalarMul
  99. | ScalarDiv
  100. | FMA
  101. | EltEqual
  102. | EltNotEqual
  103. | EltLess
  104. | EltGreater
  105. | EltLessEqual
  106. | EltGreaterEqual
  107. | EltEqualScalar
  108. | EltNotEqualScalar
  109. | EltLessScalar
  110. | EltGreaterScalar
  111. | EltLessEqualScalar
  112. | EltGreaterEqualScalar
  113. | Conv1d of Owl_types_common.padding * int array
  114. | Conv2d of Owl_types_common.padding * int array
  115. | Conv3d of Owl_types_common.padding * int array
  116. | TransposeConv1d of Owl_types_common.padding * int array
  117. | TransposeConv2d of Owl_types_common.padding * int array
  118. | TransposeConv3d of Owl_types_common.padding * int array
  119. | DilatedConv1d of Owl_types_common.padding * int array * int array
  120. | DilatedConv2d of Owl_types_common.padding * int array * int array
  121. | DilatedConv3d of Owl_types_common.padding * int array * int array
  122. | MaxPool1d of Owl_types_common.padding * int array * int array
  123. | MaxPool2d of Owl_types_common.padding * int array * int array
  124. | MaxPool3d of Owl_types_common.padding * int array * int array
  125. | AvgPool1d of Owl_types_common.padding * int array * int array
  126. | AvgPool2d of Owl_types_common.padding * int array * int array
  127. | AvgPool3d of Owl_types_common.padding * int array * int array
  128. | UpSampling2d of int array
  129. | Conv1dBackwardInput of int array
  130. | Conv1dBackwardKernel of int array
  131. | Conv2dBackwardInput of int array
  132. | Conv2dBackwardKernel of int array
  133. | Conv3dBackwardInput of int array
  134. | Conv3dBackwardKernel of int array
  135. | TransposeConv1dBackwardInput of int array
  136. | TransposeConv1dBackwardKernel of int array
  137. | TransposeConv2dBackwardInput of int array
  138. | TransposeConv2dBackwardKernel of int array
  139. | TransposeConv3dBackwardInput of int array
  140. | TransposeConv3dBackwardKernel of int array
  141. | DilatedConv1dBackwardInput of int array * int array
  142. | DilatedConv1dBackwardKernel of int array * int array
  143. | DilatedConv2dBackwardInput of int array * int array
  144. | DilatedConv2dBackwardKernel of int array * int array
  145. | DilatedConv3dBackwardInput of int array * int array
  146. | DilatedConv3dBackwardKernel of int array * int array
  147. | MaxPool1dBackward of Owl_types_common.padding * int array * int array
  148. | MaxPool2dBackward of Owl_types_common.padding * int array * int array
  149. | MaxPool3dBackward of Owl_types_common.padding * int array * int array
  150. | AvgPool1dBackward of Owl_types_common.padding * int array * int array
  151. | AvgPool2dBackward of Owl_types_common.padding * int array * int array
  152. | AvgPool3dBackward of Owl_types_common.padding * int array * int array
  153. | UpSampling2dBackward of int array
  154. | RowNum
  155. | ColNum
  156. | Row
  157. | Rows of int array
  158. | CopyRowTo
  159. | CopyColTo
  160. | Dot of bool * bool * elt * elt
  161. | Inv
  162. | Trace
  163. | Transpose of int array
  164. | ToRows
  165. | OfRows
  166. | Scalar_Add
  167. | Scalar_Sub
  168. | Scalar_Mul
  169. | Scalar_Div
  170. | Scalar_Pow
  171. | Scalar_Atan2
  172. | Scalar_Abs
  173. | Scalar_Neg
  174. | Scalar_Sqr
  175. | Scalar_Sqrt
  176. | Scalar_Exp
  177. | Scalar_Log
  178. | Scalar_Log2
  179. | Scalar_Log10
  180. | Scalar_Signum
  181. | Scalar_Floor
  182. | Scalar_Ceil
  183. | Scalar_Round
  184. | Scalar_Sin
  185. | Scalar_Cos
  186. | Scalar_Tan
  187. | Scalar_Sinh
  188. | Scalar_Cosh
  189. | Scalar_Tanh
  190. | Scalar_Asin
  191. | Scalar_Acos
  192. | Scalar_Atan
  193. | Scalar_Asinh
  194. | Scalar_Acosh
  195. | Scalar_Atanh
  196. | Scalar_Relu
  197. | Scalar_Dawsn
  198. | Scalar_Sigmoid
  199. | Fused_Adagrad of float * float
    (*

    TODO

    *)
\ No newline at end of file diff --git a/owl-base/Owl_computation_graph_sig/module-type-Sig/Optimiser/Operator/Symbol/Shape/index.html b/owl-base/Owl_computation_graph_sig/module-type-Sig/Optimiser/Operator/Symbol/Shape/index.html deleted file mode 100644 index 1f752137f..000000000 --- a/owl-base/Owl_computation_graph_sig/module-type-Sig/Optimiser/Operator/Symbol/Shape/index.html +++ /dev/null @@ -1,5 +0,0 @@ - -Shape (owl-base.Owl_computation_graph_sig.Sig.Optimiser.Operator.Symbol.Shape)

Module Symbol.Shape

Core functions
val infer_shape : - Type.op -> - Type.attr Owl_graph.node array -> - int array option array

TODO

\ No newline at end of file diff --git a/owl-base/Owl_computation_graph_sig/module-type-Sig/Optimiser/Operator/Symbol/index.html b/owl-base/Owl_computation_graph_sig/module-type-Sig/Optimiser/Operator/Symbol/index.html deleted file mode 100644 index 1268d42f4..000000000 --- a/owl-base/Owl_computation_graph_sig/module-type-Sig/Optimiser/Operator/Symbol/index.html +++ /dev/null @@ -1,28 +0,0 @@ - -Symbol (owl-base.Owl_computation_graph_sig.Sig.Optimiser.Operator.Symbol)

Module Operator.Symbol

Core functions
val op_to_str : Shape.Type.op -> string

TODO

val is_random_variable : Shape.Type.op -> bool

TODO

val refnum : 'a Owl_graph.node -> int

TODO

val node_shape : Shape.Type.attr Owl_graph.node -> int array

TODO

val node_numel : Shape.Type.attr Owl_graph.node -> int

TODO

val is_shape_unknown : Shape.Type.attr Owl_graph.node -> bool

TODO

val infer_shape_graph : Shape.Type.attr Owl_graph.node array -> unit

TODO

val shape_to_str : int array option array -> string

TODO

val node_to_str : Shape.Type.attr Owl_graph.node -> string

TODO

val node_to_arr : Shape.Type.t -> Shape.Type.arr

TODO

val arr_to_node : Shape.Type.arr -> Shape.Type.t

TODO

val node_to_elt : Shape.Type.t -> Shape.Type.elt

TODO

val elt_to_node : Shape.Type.elt -> Shape.Type.t

TODO

val make_node : - ?name:string -> - ?value:Shape.Type.Device.value array -> - ?shape:int array option array -> - ?freeze:bool -> - ?reuse:bool -> - ?state:Shape.Type.state -> - Shape.Type.op -> - Shape.Type.attr Owl_graph.node

TODO

val make_then_connect : - ?shape:int array option array -> - Shape.Type.op -> - Shape.Type.attr Owl_graph.node array -> - Shape.Type.attr Owl_graph.node

TODO

val var_arr : ?shape:int array -> string -> Shape.Type.arr

TODO

val var_elt : string -> Shape.Type.elt

TODO

val const_arr : string -> Shape.Type.Device.A.arr -> Shape.Type.arr

TODO

val const_elt : string -> Shape.Type.Device.A.elt -> Shape.Type.elt

TODO

val new_block_id : unit -> int

new_block_id () returns an unused block id.

val make_empty_block : ?block_id:int -> int -> Shape.Type.block

make_empty_block s returns an empty block of memory of size s.

val make_value_block : - Shape.Type.Device.value -> - Shape.Type.attr Owl_graph.node -> - unit

make_value_block value node creates a block of memory initialised with value and links the new block to node.

get_block node returns the memory block allocated to node. If no block is allocated, throws an exception.

val add_node_to_block : - Shape.Type.attr Owl_graph.node -> - Shape.Type.block -> - unit

Link a node to a reusable block and initialises its memory on the memory of the block.

val get_active_node : Shape.Type.block -> Shape.Type.attr Owl_graph.node option

Return the node that is currently using the memory of the block.

val set_active_node : - Shape.Type.block -> - Shape.Type.attr Owl_graph.node -> - unit

Update the node that is currently using the block of memory.

val get_block_id : Shape.Type.attr Owl_graph.node -> int

get_block_id node returns the id of the block assigned to node. If node has not been assigned yet, returns -1.

val set_value : - Shape.Type.attr Owl_graph.node -> - Shape.Type.Device.value array -> - unit

TODO

val set_operator : Shape.Type.attr Owl_graph.node -> Shape.Type.op -> unit

TODO

TODO

val set_reuse : Shape.Type.attr Owl_graph.node -> bool -> unit

TODO

val get_reuse : Shape.Type.attr Owl_graph.node -> bool

TODO

val is_shared : Shape.Type.attr Owl_graph.node -> bool

TODO

val get_shared_nodes : - Shape.Type.attr Owl_graph.node -> - Shape.Type.attr Owl_graph.node array

get_shared_nodes node returns the nodes sharing the same block of memory as node.

val is_var : Shape.Type.attr Owl_graph.node -> bool

TODO

val is_const : Shape.Type.attr Owl_graph.node -> bool

TODO

val is_node_arr : Shape.Type.attr Owl_graph.node -> bool

TODO

val is_node_elt : Shape.Type.attr Owl_graph.node -> bool

TODO

val is_assigned : Shape.Type.attr Owl_graph.node -> bool

is_assigned node checks if a block of memory has been assigned to node.

val check_assigned : Shape.Type.attr Owl_graph.node -> unit

check_assigned node throws an exception if node has not been assigned to a block.

val is_valid : Shape.Type.attr Owl_graph.node -> bool

TODO

val validate : Shape.Type.attr Owl_graph.node -> unit

TODO

val invalidate : Shape.Type.attr Owl_graph.node -> unit

TODO

val invalidate_graph : Shape.Type.attr Owl_graph.node -> unit

TODO

val is_freeze : Shape.Type.attr Owl_graph.node -> bool

TODO

val freeze : Shape.Type.attr Owl_graph.node -> unit

TODO

val freeze_descendants : Shape.Type.attr Owl_graph.node array -> unit

TODO

val freeze_ancestors : Shape.Type.attr Owl_graph.node array -> unit

TODO

TODO

TODO

val unsafe_assign_arr : Shape.Type.arr -> Shape.Type.Device.A.arr -> unit

TODO

val assign_arr : Shape.Type.arr -> Shape.Type.Device.A.arr -> unit

TODO

val assign_elt : Shape.Type.elt -> Shape.Type.Device.A.elt -> unit

TODO

val float_to_elt : float -> Shape.Type.elt

TODO

val elt_to_float : Shape.Type.elt -> float

TODO

\ No newline at end of file diff --git a/owl-base/Owl_computation_graph_sig/module-type-Sig/Optimiser/Operator/index.html b/owl-base/Owl_computation_graph_sig/module-type-Sig/Optimiser/Operator/index.html deleted file mode 100644 index 6765b8b0a..000000000 --- a/owl-base/Owl_computation_graph_sig/module-type-Sig/Optimiser/Operator/index.html +++ /dev/null @@ -1,420 +0,0 @@ - -Operator (owl-base.Owl_computation_graph_sig.Sig.Optimiser.Operator)

Module Optimiser.Operator

Vectorised functions
val empty : int array -> Symbol.Shape.Type.arr

TODO

val zeros : int array -> Symbol.Shape.Type.arr

TODO

val ones : int array -> Symbol.Shape.Type.arr

TODO

val create : int array -> Symbol.Shape.Type.elt -> Symbol.Shape.Type.arr

TODO

val sequential : - ?a:Symbol.Shape.Type.elt -> - ?step:Symbol.Shape.Type.elt -> - int array -> - Symbol.Shape.Type.arr

TODO

val uniform : - ?a:Symbol.Shape.Type.elt -> - ?b:Symbol.Shape.Type.elt -> - int array -> - Symbol.Shape.Type.arr

TODO

val gaussian : - ?mu:Symbol.Shape.Type.elt -> - ?sigma:Symbol.Shape.Type.elt -> - int array -> - Symbol.Shape.Type.arr

TODO

val bernoulli : ?p:Symbol.Shape.Type.elt -> int array -> Symbol.Shape.Type.arr

TODO

val init : int array -> (int -> Symbol.Shape.Type.elt) -> Symbol.Shape.Type.arr

TODO

val init_nd : - int array -> - (int array -> Symbol.Shape.Type.elt) -> - Symbol.Shape.Type.arr

TODO

val shape : Symbol.Shape.Type.arr -> int array

TODO

val numel : Symbol.Shape.Type.arr -> int

TODO

TODO

val set : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.elt -> unit

TODO

val get_slice : int list list -> Symbol.Shape.Type.arr -> Symbol.Shape.Type.arr

TODO

val set_slice : - int list list -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - unit

TODO

TODO

val set_fancy : - Owl_types.index list -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - unit

TODO

val copy_ : out:'a -> 'b -> 'c

TODO

val reset : Symbol.Shape.Type.arr -> unit

TODO

val reshape : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.arr

TODO

val tile : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.arr

TODO

val repeat : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.arr

TODO

val pad : - ?v:Symbol.Shape.Type.elt -> - int list list -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val expand : ?hi:bool -> Symbol.Shape.Type.arr -> int -> Symbol.Shape.Type.arr

TODO

val squeeze : ?axis:int array -> Symbol.Shape.Type.arr -> Symbol.Shape.Type.arr

TODO

val concatenate : - ?axis:int -> - Symbol.Shape.Type.arr array -> - Symbol.Shape.Type.arr

TODO

val stack : ?axis:int -> Symbol.Shape.Type.arr array -> Symbol.Shape.Type.arr

TODO

TODO

val split : ?axis:int -> 'a -> 'b -> 'c

TODO

val draw : - ?axis:int -> - Symbol.Shape.Type.arr -> - int -> - Symbol.Shape.Type.arr * 'a array

TODO

TODO

delay f x returns f x. It allows to use a function that is not tracked by the computation graph and delay its evaluation. The output must have the same shape as the input.

delay_array out_shape f x works in the same way as delay but is applied on an array of ndarrays. Needs the shape of the output as an argument.

val lazy_print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(Symbol.Shape.Type.Device.A.elt -> string) -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

lazy_print x prints the output of x when it is evaluated. Is implemented as an identity node. For information about the optional parameters, refer to the print function of the Ndarray module.

val print : ?max_row:'a -> ?max_col:'b -> ?header:'c -> ?fmt:'d -> 'e -> unit

TODO

val min : - ?axis:int -> - ?keep_dims:bool -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val max : - ?axis:int -> - ?keep_dims:bool -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val sum : - ?axis:int -> - ?keep_dims:bool -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val sum_reduce : - ?axis:int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val log_sum_exp : - ?axis:int -> - ?keep_dims:bool -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

TODO

TODO

TODO

TODO

val elt_less_equal_scalar : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.elt -> - Symbol.Shape.Type.arr

TODO

val elt_greater_equal_scalar : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.elt -> - Symbol.Shape.Type.arr

TODO

val conv1d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr

TODO

val conv2d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr

TODO

val conv3d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr

TODO

val transpose_conv1d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr

TODO

val transpose_conv2d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr

TODO

val transpose_conv3d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr

TODO

val dilated_conv1d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val dilated_conv2d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val dilated_conv3d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val max_pool1d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val max_pool2d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val max_pool3d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val avg_pool1d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val avg_pool2d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val avg_pool3d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val upsampling2d : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.arr

TODO

val conv1d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val conv1d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val conv2d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val conv2d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val conv3d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val conv3d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val transpose_conv1d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val transpose_conv1d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val transpose_conv2d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val transpose_conv2d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val transpose_conv3d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val transpose_conv3d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val dilated_conv1d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val dilated_conv1d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val dilated_conv2d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val dilated_conv2d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val dilated_conv3d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val dilated_conv3d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val max_pool1d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val max_pool2d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val max_pool3d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val avg_pool1d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val avg_pool2d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val avg_pool3d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val upsampling2d_backward : - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val row_num : Symbol.Shape.Type.arr -> int

TODO

val col_num : Symbol.Shape.Type.arr -> int

TODO

val rows : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.arr

TODO

val copy_row_to : Symbol.Shape.Type.arr -> 'a -> 'b -> unit

TODO

val copy_col_to : Symbol.Shape.Type.arr -> 'a -> 'b -> unit

TODO

TODO

val transpose : - ?axis:int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val to_rows : Symbol.Shape.Type.arr -> 'a array

TODO

TODO

val to_cols : Symbol.Shape.Type.arr -> 'a array

TODO

TODO

val of_array : - Symbol.Shape.Type.elt array -> - int array -> - Symbol.Shape.Type.arr

TODO

val of_arrays : Symbol.Shape.Type.elt array array -> Symbol.Shape.Type.arr

TODO

val to_arrays : Symbol.Shape.Type.arr -> Symbol.Shape.Type.elt array array

TODO

Scalar functions
module Scalar : sig ... end
module Mat : sig ... end
module Linalg : sig ... end
\ No newline at end of file diff --git a/owl-base/Owl_computation_graph_sig/module-type-Sig/Optimiser/index.html b/owl-base/Owl_computation_graph_sig/module-type-Sig/Optimiser/index.html deleted file mode 100644 index 45da71231..000000000 --- a/owl-base/Owl_computation_graph_sig/module-type-Sig/Optimiser/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Optimiser (owl-base.Owl_computation_graph_sig.Sig.Optimiser)

Module Sig.Optimiser

Core functions
val estimate_complexity : 'a Owl_graph.node array -> int * int

TODO

val optimise_nodes : - Operator.Symbol.Shape.Type.attr Owl_graph.node array -> - unit

TODO

\ No newline at end of file diff --git a/owl-base/Owl_computation_graph_sig/module-type-Sig/index.html b/owl-base/Owl_computation_graph_sig/module-type-Sig/index.html deleted file mode 100644 index cd78019cd..000000000 --- a/owl-base/Owl_computation_graph_sig/module-type-Sig/index.html +++ /dev/null @@ -1,33 +0,0 @@ - -Sig (owl-base.Owl_computation_graph_sig.Sig)

Module type Owl_computation_graph_sig.Sig

Type definition
type graph

TODO

Core functions
val shape_or_value : Optimiser.Operator.Symbol.Shape.Type.t -> string

TODO

val graph_to_dot : graph -> string

TODO

val graph_to_trace : graph -> string

TODO

val save_graph : 'a -> string -> unit

TODO

val load_graph : string -> 'a * 'b

TODO

val invalidate_rvs : graph -> unit

TODO

val is_iopair_safe : 'a Owl_graph.node -> 'a Owl_graph.node -> bool

TODO

val update_iopair : graph -> unit

TODO

val remove_unused_iopair : - 'a Owl_graph.node array -> - 'b array -> - 'a Owl_graph.node array * 'b array

TODO

val optimise : graph -> unit

TODO

\ No newline at end of file diff --git a/owl-base/Owl_computation_operator/.dummy b/owl-base/Owl_computation_operator/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl-base/Owl_computation_operator/Make/Linalg/index.html b/owl-base/Owl_computation_operator/Make/Linalg/index.html deleted file mode 100644 index 1f2c80830..000000000 --- a/owl-base/Owl_computation_operator/Make/Linalg/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Linalg (owl-base.Owl_computation_operator.Make.Linalg)

Module Make.Linalg

val logdet : 'a -> 'b
val chol : ?upper:bool -> 'a -> 'b
val svd : ?thin:bool -> 'a -> 'b
val qr : 'a -> 'b
val lq : 'a -> 'b
val sylvester : 'a -> 'b -> 'c -> 'd
val lyapunov : 'a -> 'b -> 'c
val discrete_lyapunov : ?solver:[> `default ] -> 'a -> 'b -> 'c
val linsolve : ?trans:'a -> ?typ:[> `n ] -> 'b -> 'c -> 'd
val care : ?diag_r:bool -> 'a -> 'b -> 'c -> 'd -> 'e
val dare : ?diag_r:bool -> 'a -> 'b -> 'c -> 'd -> 'e
\ No newline at end of file diff --git a/owl-base/Owl_computation_operator/Make/Mat/index.html b/owl-base/Owl_computation_operator/Make/Mat/index.html deleted file mode 100644 index 282126d2c..000000000 --- a/owl-base/Owl_computation_operator/Make/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl-base.Owl_computation_operator.Make.Mat)

Module Make.Mat

val eye : 'a -> 'b
val diagm : ?k:'a -> 'b -> 'c
val tril : ?k:'a -> 'b -> 'c
val triu : ?k:'a -> 'b -> 'c
\ No newline at end of file diff --git a/owl-base/Owl_computation_operator/Make/Scalar/index.html b/owl-base/Owl_computation_operator/Make/Scalar/index.html deleted file mode 100644 index 26dc08394..000000000 --- a/owl-base/Owl_computation_operator/Make/Scalar/index.html +++ /dev/null @@ -1,20 +0,0 @@ - -Scalar (owl-base.Owl_computation_operator.Make.Scalar)

Module Make.Scalar

\ No newline at end of file diff --git a/owl-base/Owl_computation_operator/Make/argument-1-Symbol/Shape/Type/Device/A/Linalg/index.html b/owl-base/Owl_computation_operator/Make/argument-1-Symbol/Shape/Type/Device/A/Linalg/index.html deleted file mode 100644 index 5ea60b3b9..000000000 --- a/owl-base/Owl_computation_operator/Make/argument-1-Symbol/Shape/Type/Device/A/Linalg/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -Linalg (owl-base.Owl_computation_operator.Make.Symbol.Shape.Type.Device.A.Linalg)

Module A.Linalg

val inv : arr -> arr
val logdet : arr -> elt
val chol : ?upper:bool -> arr -> arr
val svd : ?thin:bool -> arr -> arr * arr * arr
val qr : arr -> arr * arr
val lq : arr -> arr * arr
val sylvester : arr -> arr -> arr -> arr
val lyapunov : arr -> arr -> arr
val discrete_lyapunov : - ?solver:[ `default | `bilinear | `direct ] -> - arr -> - arr -> - arr
val linsolve : ?trans:bool -> ?typ:[ `n | `u | `l ] -> arr -> arr -> arr
val care : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
val dare : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
\ No newline at end of file diff --git a/owl-base/Owl_computation_operator/Make/argument-1-Symbol/Shape/Type/Device/A/Mat/index.html b/owl-base/Owl_computation_operator/Make/argument-1-Symbol/Shape/Type/Device/A/Mat/index.html deleted file mode 100644 index 39018f7a9..000000000 --- a/owl-base/Owl_computation_operator/Make/argument-1-Symbol/Shape/Type/Device/A/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl-base.Owl_computation_operator.Make.Symbol.Shape.Type.Device.A.Mat)

Module A.Mat

val diagm : ?k:int -> arr -> arr
val triu : ?k:int -> arr -> arr
val tril : ?k:int -> arr -> arr
val eye : int -> arr
\ No newline at end of file diff --git a/owl-base/Owl_computation_operator/Make/argument-1-Symbol/Shape/Type/Device/A/Scalar/index.html b/owl-base/Owl_computation_operator/Make/argument-1-Symbol/Shape/Type/Device/A/Scalar/index.html deleted file mode 100644 index e66c4f3d2..000000000 --- a/owl-base/Owl_computation_operator/Make/argument-1-Symbol/Shape/Type/Device/A/Scalar/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Scalar (owl-base.Owl_computation_operator.Make.Symbol.Shape.Type.Device.A.Scalar)

Module A.Scalar

val add : elt -> elt -> elt
val sub : elt -> elt -> elt
val mul : elt -> elt -> elt
val div : elt -> elt -> elt
val pow : elt -> elt -> elt
val atan2 : elt -> elt -> elt
val abs : elt -> elt
val neg : elt -> elt
val sqr : elt -> elt
val sqrt : elt -> elt
val exp : elt -> elt
val log : elt -> elt
val log2 : elt -> elt
val log10 : elt -> elt
val signum : elt -> elt
val floor : elt -> elt
val ceil : elt -> elt
val round : elt -> elt
val sin : elt -> elt
val cos : elt -> elt
val tan : elt -> elt
val sinh : elt -> elt
val cosh : elt -> elt
val tanh : elt -> elt
val asin : elt -> elt
val acos : elt -> elt
val atan : elt -> elt
val asinh : elt -> elt
val acosh : elt -> elt
val atanh : elt -> elt
val relu : elt -> elt
val dawsn : elt -> elt
val sigmoid : elt -> elt
\ No newline at end of file diff --git a/owl-base/Owl_computation_operator/Make/argument-1-Symbol/Shape/Type/Device/A/index.html b/owl-base/Owl_computation_operator/Make/argument-1-Symbol/Shape/Type/Device/A/index.html deleted file mode 100644 index 572512bd3..000000000 --- a/owl-base/Owl_computation_operator/Make/argument-1-Symbol/Shape/Type/Device/A/index.html +++ /dev/null @@ -1,379 +0,0 @@ - -A (owl-base.Owl_computation_operator.Make.Symbol.Shape.Type.Device.A)

Module Device.A

include Owl_types_ndarray_algodiff.Sig
include Owl_types_ndarray_eltcmp.Sig
include Owl_types_ndarray_basic.Sig
type arr
type elt
val empty : int array -> arr
val zeros : int array -> arr
val ones : int array -> arr
val create : int array -> elt -> arr
val sequential : ?a:elt -> ?step:elt -> int array -> arr
val uniform : ?a:elt -> ?b:elt -> int array -> arr
val gaussian : ?mu:elt -> ?sigma:elt -> int array -> arr
val bernoulli : ?p:elt -> int array -> arr
val init : int array -> (int -> elt) -> arr
val init_nd : int array -> (int array -> elt) -> arr
val shape : arr -> int array
val numel : arr -> int
val get : arr -> int array -> elt
val set : arr -> int array -> elt -> unit
val get_slice : int list list -> arr -> arr
val set_slice : int list list -> arr -> arr -> unit
val get_fancy : Owl_types_common.index list -> arr -> arr
val set_fancy : Owl_types_common.index list -> arr -> arr -> unit
val copy : arr -> arr
val copy_ : out:arr -> arr -> unit
val reset : arr -> unit
val reshape : arr -> int array -> arr
val reverse : arr -> arr
val tile : arr -> int array -> arr
val repeat : arr -> int array -> arr
val concatenate : ?axis:int -> arr array -> arr
val stack : ?axis:int -> arr array -> arr
val split : ?axis:int -> int array -> arr -> arr array
val expand : ?hi:bool -> arr -> int -> arr
val squeeze : ?axis:int array -> arr -> arr
val draw : ?axis:int -> arr -> int -> arr * int array
val map : (elt -> elt) -> arr -> arr
val fold : ?axis:int -> (elt -> elt -> elt) -> elt -> arr -> arr
val scan : ?axis:int -> (elt -> elt -> elt) -> arr -> arr
val one_hot : int -> arr -> arr
val pad : ?v:elt -> int list list -> arr -> arr
val print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(elt -> string) -> - arr -> - unit
val abs : arr -> arr
val neg : arr -> arr
val floor : arr -> arr
val ceil : arr -> arr
val round : arr -> arr
val sqr : arr -> arr
val sqrt : arr -> arr
val log : arr -> arr
val log2 : arr -> arr
val log10 : arr -> arr
val exp : arr -> arr
val sin : arr -> arr
val cos : arr -> arr
val tan : arr -> arr
val sinh : arr -> arr
val cosh : arr -> arr
val tanh : arr -> arr
val asin : arr -> arr
val acos : arr -> arr
val atan : arr -> arr
val asinh : arr -> arr
val acosh : arr -> arr
val atanh : arr -> arr
val sum_reduce : ?axis:int array -> arr -> arr
val signum : arr -> arr
val sigmoid : arr -> arr
val relu : arr -> arr
val dawsn : arr -> arr
val min' : arr -> elt
val max' : arr -> elt
val sum' : arr -> elt
val log_sum_exp' : arr -> elt
val log_sum_exp : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm' : arr -> elt
val l2norm' : arr -> elt
val l2norm_sqr' : arr -> elt
val clip_by_value : ?amin:elt -> ?amax:elt -> arr -> arr
val clip_by_l2norm : elt -> arr -> arr
val pow : arr -> arr -> arr
val scalar_pow : elt -> arr -> arr
val pow_scalar : arr -> elt -> arr
val atan2 : arr -> arr -> arr
val scalar_atan2 : elt -> arr -> arr
val atan2_scalar : arr -> elt -> arr
val add : arr -> arr -> arr
val sub : arr -> arr -> arr
val mul : arr -> arr -> arr
val div : arr -> arr -> arr
val add_scalar : arr -> elt -> arr
val sub_scalar : arr -> elt -> arr
val mul_scalar : arr -> elt -> arr
val div_scalar : arr -> elt -> arr
val scalar_add : elt -> arr -> arr
val scalar_sub : elt -> arr -> arr
val scalar_mul : elt -> arr -> arr
val scalar_div : elt -> arr -> arr
val fma : arr -> arr -> arr -> arr
val conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val dilated_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val transpose_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val max_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val upsampling2d : arr -> int array -> arr
val conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val dilated_conv1d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv1d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val transpose_conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val max_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val upsampling2d_backward : arr -> int array -> arr -> arr
val row_num : arr -> int
val col_num : arr -> int
val row : arr -> int -> arr
val rows : arr -> int array -> arr
val copy_row_to : arr -> arr -> int -> unit
val copy_col_to : arr -> arr -> int -> unit
val dot : arr -> arr -> arr
val diag : ?k:int -> arr -> arr
val trace : arr -> elt
val transpose : ?axis:int array -> arr -> arr
val to_rows : arr -> arr array
val of_rows : arr array -> arr
val to_cols : arr -> arr array
val of_cols : arr array -> arr
val of_array : elt array -> int array -> arr
val of_arrays : elt array array -> arr
val float_to_elt : float -> elt
val elt_to_float : elt -> float
val elt_equal : arr -> arr -> arr
val elt_not_equal : arr -> arr -> arr
val elt_less : arr -> arr -> arr
val elt_greater : arr -> arr -> arr
val elt_less_equal : arr -> arr -> arr
val elt_greater_equal : arr -> arr -> arr
val elt_equal_scalar : arr -> elt -> arr
val elt_not_equal_scalar : arr -> elt -> arr
val elt_less_scalar : arr -> elt -> arr
val elt_greater_scalar : arr -> elt -> arr
val elt_less_equal_scalar : arr -> elt -> arr
val elt_greater_equal_scalar : arr -> elt -> arr
module Scalar : sig ... end
module Mat : sig ... end
module Linalg : sig ... end
val create_ : out:arr -> elt -> unit
val uniform_ : ?a:elt -> ?b:elt -> out:arr -> unit
val gaussian_ : ?mu:elt -> ?sigma:elt -> out:arr -> unit
val sequential_ : ?a:elt -> ?step:elt -> out:arr -> unit
val bernoulli_ : ?p:elt -> out:arr -> unit
val zeros_ : out:arr -> unit
val ones_ : out:arr -> unit
val one_hot_ : out:arr -> int -> arr -> unit
val get_slice_ : out:arr -> int list list -> arr -> unit
val set_slice_ : out:arr -> int list list -> arr -> arr -> unit
val sub_left : arr -> int -> int -> arr
val reshape_ : out:arr -> arr -> unit
val reverse_ : out:arr -> arr -> unit
val transpose_ : out:arr -> ?axis:int array -> arr -> unit
val repeat_ : out:arr -> arr -> int array -> unit
val tile_ : out:arr -> arr -> int array -> unit
val pad_ : out:arr -> ?v:elt -> int list list -> arr -> unit
val hypot : arr -> arr -> arr
val fmod : arr -> arr -> arr
val min2 : arr -> arr -> arr
val max2 : arr -> arr -> arr
val add_ : ?out:arr -> arr -> arr -> unit
val sub_ : ?out:arr -> arr -> arr -> unit
val mul_ : ?out:arr -> arr -> arr -> unit
val div_ : ?out:arr -> arr -> arr -> unit
val pow_ : ?out:arr -> arr -> arr -> unit
val atan2_ : ?out:arr -> arr -> arr -> unit
val hypot_ : ?out:arr -> arr -> arr -> unit
val fmod_ : ?out:arr -> arr -> arr -> unit
val min2_ : ?out:arr -> arr -> arr -> unit
val max2_ : ?out:arr -> arr -> arr -> unit
val add_scalar_ : ?out:arr -> arr -> elt -> unit
val sub_scalar_ : ?out:arr -> arr -> elt -> unit
val mul_scalar_ : ?out:arr -> arr -> elt -> unit
val div_scalar_ : ?out:arr -> arr -> elt -> unit
val pow_scalar_ : ?out:arr -> arr -> elt -> unit
val atan2_scalar_ : ?out:arr -> arr -> elt -> unit
val fmod_scalar_ : ?out:arr -> arr -> elt -> unit
val scalar_add_ : ?out:arr -> elt -> arr -> unit
val scalar_sub_ : ?out:arr -> elt -> arr -> unit
val scalar_mul_ : ?out:arr -> elt -> arr -> unit
val scalar_div_ : ?out:arr -> elt -> arr -> unit
val scalar_pow_ : ?out:arr -> elt -> arr -> unit
val scalar_atan2_ : ?out:arr -> elt -> arr -> unit
val scalar_fmod_ : ?out:arr -> elt -> arr -> unit
val fma_ : ?out:arr -> arr -> arr -> arr -> unit
val clip_by_value_ : ?out:arr -> ?amin:elt -> ?amax:elt -> arr -> unit
val clip_by_l2norm_ : ?out:arr -> elt -> arr -> unit
val dot_ : - ?transa:bool -> - ?transb:bool -> - ?alpha:elt -> - ?beta:elt -> - c:arr -> - arr -> - arr -> - unit
val abs_ : ?out:arr -> arr -> unit
val neg_ : ?out:arr -> arr -> unit
val conj_ : ?out:arr -> arr -> unit
val reci_ : ?out:arr -> arr -> unit
val signum_ : ?out:arr -> arr -> unit
val sqr_ : ?out:arr -> arr -> unit
val sqrt_ : ?out:arr -> arr -> unit
val cbrt_ : ?out:arr -> arr -> unit
val exp_ : ?out:arr -> arr -> unit
val exp2_ : ?out:arr -> arr -> unit
val exp10_ : ?out:arr -> arr -> unit
val expm1_ : ?out:arr -> arr -> unit
val log_ : ?out:arr -> arr -> unit
val log2_ : ?out:arr -> arr -> unit
val log10_ : ?out:arr -> arr -> unit
val log1p_ : ?out:arr -> arr -> unit
val sin_ : ?out:arr -> arr -> unit
val cos_ : ?out:arr -> arr -> unit
val tan_ : ?out:arr -> arr -> unit
val asin_ : ?out:arr -> arr -> unit
val acos_ : ?out:arr -> arr -> unit
val atan_ : ?out:arr -> arr -> unit
val sinh_ : ?out:arr -> arr -> unit
val cosh_ : ?out:arr -> arr -> unit
val tanh_ : ?out:arr -> arr -> unit
val asinh_ : ?out:arr -> arr -> unit
val acosh_ : ?out:arr -> arr -> unit
val atanh_ : ?out:arr -> arr -> unit
val floor_ : ?out:arr -> arr -> unit
val ceil_ : ?out:arr -> arr -> unit
val round_ : ?out:arr -> arr -> unit
val trunc_ : ?out:arr -> arr -> unit
val fix_ : ?out:arr -> arr -> unit
val erf_ : ?out:arr -> arr -> unit
val erfc_ : ?out:arr -> arr -> unit
val relu_ : ?out:arr -> arr -> unit
val softplus_ : ?out:arr -> arr -> unit
val softsign_ : ?out:arr -> arr -> unit
val softmax_ : ?out:arr -> ?axis:int -> arr -> unit
val sigmoid_ : ?out:arr -> arr -> unit
val sum_ : out:arr -> axis:int -> arr -> unit
val min_ : out:arr -> axis:int -> arr -> unit
val max_ : out:arr -> axis:int -> arr -> unit
val sum : ?axis:int -> ?keep_dims:bool -> arr -> arr
val prod : ?axis:int -> ?keep_dims:bool -> arr -> arr
val min : ?axis:int -> ?keep_dims:bool -> arr -> arr
val max : ?axis:int -> ?keep_dims:bool -> arr -> arr
val mean : ?axis:int -> ?keep_dims:bool -> arr -> arr
val var : ?axis:int -> ?keep_dims:bool -> arr -> arr
val std : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l2norm : ?axis:int -> ?keep_dims:bool -> arr -> arr
val cumsum_ : ?out:arr -> ?axis:int -> arr -> unit
val cumprod_ : ?out:arr -> ?axis:int -> arr -> unit
val cummin_ : ?out:arr -> ?axis:int -> arr -> unit
val cummax_ : ?out:arr -> ?axis:int -> arr -> unit
val dropout_ : ?out:arr -> ?rate:float -> arr -> unit
val prod' : arr -> elt
val mean' : arr -> elt
val var' : arr -> elt
val std' : arr -> elt
val elt_equal_ : ?out:arr -> arr -> arr -> unit
val elt_not_equal_ : ?out:arr -> arr -> arr -> unit
val elt_less_ : ?out:arr -> arr -> arr -> unit
val elt_greater_ : ?out:arr -> arr -> arr -> unit
val elt_less_equal_ : ?out:arr -> arr -> arr -> unit
val elt_greater_equal_ : ?out:arr -> arr -> arr -> unit
val elt_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_not_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_less_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_greater_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_less_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_greater_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val conv1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val conv2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val conv3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val dilated_conv1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val dilated_conv2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val dilated_conv3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val transpose_conv1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val transpose_conv2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val transpose_conv3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val max_pool1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val max_pool2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val max_pool3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val upsampling2d_ : out:arr -> arr -> int array -> unit
val conv1d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv1d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv2d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv2d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv3d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv3d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val dilated_conv1d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv1d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv2d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv2d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv3d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv3d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val transpose_conv1d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv1d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv2d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv2d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv3d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv3d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val max_pool1d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val max_pool2d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val max_pool3d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool1d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool2d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool3d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val upsampling2d_backward_ : out:arr -> arr -> int array -> arr -> unit
val fused_adagrad_ : ?out:arr -> rate:float -> eps:float -> arr -> unit
\ No newline at end of file diff --git a/owl-base/Owl_computation_operator/Make/argument-1-Symbol/Shape/Type/Device/index.html b/owl-base/Owl_computation_operator/Make/argument-1-Symbol/Shape/Type/Device/index.html deleted file mode 100644 index 5a2c8bd20..000000000 --- a/owl-base/Owl_computation_operator/Make/argument-1-Symbol/Shape/Type/Device/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Device (owl-base.Owl_computation_operator.Make.Symbol.Shape.Type.Device)

Module Type.Device

Type definition
type device

TODO

type value

TODO

Core functions
val make_device : unit -> device

TODO

val arr_to_value : A.arr -> value

TODO

val value_to_arr : value -> A.arr

TODO

val elt_to_value : A.elt -> value

TODO

val value_to_elt : value -> A.elt

TODO

val value_to_float : value -> float

TODO

val is_arr : value -> bool

TODO

val is_elt : value -> bool

TODO

\ No newline at end of file diff --git a/owl-base/Owl_computation_operator/Make/argument-1-Symbol/Shape/Type/index.html b/owl-base/Owl_computation_operator/Make/argument-1-Symbol/Shape/Type/index.html deleted file mode 100644 index 8f2f26f46..000000000 --- a/owl-base/Owl_computation_operator/Make/argument-1-Symbol/Shape/Type/index.html +++ /dev/null @@ -1,5 +0,0 @@ - -Type (owl-base.Owl_computation_operator.Make.Symbol.Shape.Type)

Module Shape.Type

Type definition
type state =
  1. | Valid
  2. | Invalid
    (*

    TODO

    *)

TODO

and block = {
  1. size : int;
  2. block_id : int;
  3. mutable active : t option;
  4. mutable memory : Device.value;
  5. mutable nodes : t list;
}

block type keeps a reference to a block of memory and to the nodes sharing that block.

and attr = {
  1. mutable op : op;
  2. mutable freeze : bool;
  3. mutable reuse : bool;
  4. mutable state : state;
  5. mutable shape : int array option array;
  6. mutable value : Device.value array;
  7. mutable block : block array option;
}

TODO

and arr =
  1. | Arr of t
and elt =
  1. | Elt of t
and op =
  1. | Noop
  2. | Var
  3. | Const
  4. | Empty of int array
  5. | Zeros of int array
  6. | Ones of int array
  7. | Create of int array
  8. | Sequential of int array
  9. | Uniform of int array
  10. | Gaussian of int array
  11. | Bernoulli of int array
  12. | Init of int array * int -> elt
  13. | Get of int array
  14. | Set of int array
  15. | GetSlice of int list list
  16. | SetSlice of int list list
  17. | GetFancy of Owl_types_common.index list
  18. | SetFancy of Owl_types_common.index list
  19. | Copy
  20. | Reset
  21. | Reshape of int array
  22. | Reverse
  23. | Tile of int array
  24. | Repeat of int array
  25. | Pad of elt * int list list
  26. | Concatenate of int
  27. | Stack of int
  28. | Split of int * int array
  29. | Draw of int * int
  30. | Map of elt -> elt
  31. | Fold of int * elt -> elt -> elt
  32. | Scan of int * elt -> elt -> elt
  33. | OneHot of int
  34. | OfArray of int array
  35. | Delay of Device.A.arr -> Device.A.arr
  36. | DelayArray of int array * Device.A.arr array -> Device.A.arr
  37. | LazyPrint of int option - * int option - * bool option - * (Device.A.elt -> string) option
  38. | Abs
  39. | Neg
  40. | Floor
  41. | Ceil
  42. | Round
  43. | Sqr
  44. | Sqrt
  45. | Log
  46. | Log2
  47. | Log10
  48. | Exp
  49. | Sin
  50. | Cos
  51. | Tan
  52. | Sinh
  53. | Cosh
  54. | Tanh
  55. | Asin
  56. | Acos
  57. | Atan
  58. | Asinh
  59. | Acosh
  60. | Atanh
  61. | Min of bool * int
  62. | Max of bool * int
  63. | Sum of bool * int
  64. | SumReduce of int array
  65. | Signum
  66. | Sigmoid
  67. | Relu
  68. | Dawsn
  69. | Min'
  70. | Max'
  71. | Sum'
  72. | LogSumExp'
  73. | LogSumExp of bool * int
  74. | L1norm'
  75. | L2norm'
  76. | L2NormSqr'
  77. | ClipByValue
  78. | ClipByL2norm
  79. | Pow
  80. | ScalarPow
  81. | PowScalar
  82. | Atan2
  83. | ScalarAtan2
  84. | Atan2Scalar
  85. | Hypot
  86. | Min2
  87. | Max2
  88. | Add
  89. | Sub
  90. | Mul
  91. | Div
  92. | AddScalar
  93. | SubScalar
  94. | MulScalar
  95. | DivScalar
  96. | ScalarAdd
  97. | ScalarSub
  98. | ScalarMul
  99. | ScalarDiv
  100. | FMA
  101. | EltEqual
  102. | EltNotEqual
  103. | EltLess
  104. | EltGreater
  105. | EltLessEqual
  106. | EltGreaterEqual
  107. | EltEqualScalar
  108. | EltNotEqualScalar
  109. | EltLessScalar
  110. | EltGreaterScalar
  111. | EltLessEqualScalar
  112. | EltGreaterEqualScalar
  113. | Conv1d of Owl_types_common.padding * int array
  114. | Conv2d of Owl_types_common.padding * int array
  115. | Conv3d of Owl_types_common.padding * int array
  116. | TransposeConv1d of Owl_types_common.padding * int array
  117. | TransposeConv2d of Owl_types_common.padding * int array
  118. | TransposeConv3d of Owl_types_common.padding * int array
  119. | DilatedConv1d of Owl_types_common.padding * int array * int array
  120. | DilatedConv2d of Owl_types_common.padding * int array * int array
  121. | DilatedConv3d of Owl_types_common.padding * int array * int array
  122. | MaxPool1d of Owl_types_common.padding * int array * int array
  123. | MaxPool2d of Owl_types_common.padding * int array * int array
  124. | MaxPool3d of Owl_types_common.padding * int array * int array
  125. | AvgPool1d of Owl_types_common.padding * int array * int array
  126. | AvgPool2d of Owl_types_common.padding * int array * int array
  127. | AvgPool3d of Owl_types_common.padding * int array * int array
  128. | UpSampling2d of int array
  129. | Conv1dBackwardInput of int array
  130. | Conv1dBackwardKernel of int array
  131. | Conv2dBackwardInput of int array
  132. | Conv2dBackwardKernel of int array
  133. | Conv3dBackwardInput of int array
  134. | Conv3dBackwardKernel of int array
  135. | TransposeConv1dBackwardInput of int array
  136. | TransposeConv1dBackwardKernel of int array
  137. | TransposeConv2dBackwardInput of int array
  138. | TransposeConv2dBackwardKernel of int array
  139. | TransposeConv3dBackwardInput of int array
  140. | TransposeConv3dBackwardKernel of int array
  141. | DilatedConv1dBackwardInput of int array * int array
  142. | DilatedConv1dBackwardKernel of int array * int array
  143. | DilatedConv2dBackwardInput of int array * int array
  144. | DilatedConv2dBackwardKernel of int array * int array
  145. | DilatedConv3dBackwardInput of int array * int array
  146. | DilatedConv3dBackwardKernel of int array * int array
  147. | MaxPool1dBackward of Owl_types_common.padding * int array * int array
  148. | MaxPool2dBackward of Owl_types_common.padding * int array * int array
  149. | MaxPool3dBackward of Owl_types_common.padding * int array * int array
  150. | AvgPool1dBackward of Owl_types_common.padding * int array * int array
  151. | AvgPool2dBackward of Owl_types_common.padding * int array * int array
  152. | AvgPool3dBackward of Owl_types_common.padding * int array * int array
  153. | UpSampling2dBackward of int array
  154. | RowNum
  155. | ColNum
  156. | Row
  157. | Rows of int array
  158. | CopyRowTo
  159. | CopyColTo
  160. | Dot of bool * bool * elt * elt
  161. | Inv
  162. | Trace
  163. | Transpose of int array
  164. | ToRows
  165. | OfRows
  166. | Scalar_Add
  167. | Scalar_Sub
  168. | Scalar_Mul
  169. | Scalar_Div
  170. | Scalar_Pow
  171. | Scalar_Atan2
  172. | Scalar_Abs
  173. | Scalar_Neg
  174. | Scalar_Sqr
  175. | Scalar_Sqrt
  176. | Scalar_Exp
  177. | Scalar_Log
  178. | Scalar_Log2
  179. | Scalar_Log10
  180. | Scalar_Signum
  181. | Scalar_Floor
  182. | Scalar_Ceil
  183. | Scalar_Round
  184. | Scalar_Sin
  185. | Scalar_Cos
  186. | Scalar_Tan
  187. | Scalar_Sinh
  188. | Scalar_Cosh
  189. | Scalar_Tanh
  190. | Scalar_Asin
  191. | Scalar_Acos
  192. | Scalar_Atan
  193. | Scalar_Asinh
  194. | Scalar_Acosh
  195. | Scalar_Atanh
  196. | Scalar_Relu
  197. | Scalar_Dawsn
  198. | Scalar_Sigmoid
  199. | Fused_Adagrad of float * float
    (*

    TODO

    *)
\ No newline at end of file diff --git a/owl-base/Owl_computation_operator/Make/argument-1-Symbol/Shape/index.html b/owl-base/Owl_computation_operator/Make/argument-1-Symbol/Shape/index.html deleted file mode 100644 index da199a4c6..000000000 --- a/owl-base/Owl_computation_operator/Make/argument-1-Symbol/Shape/index.html +++ /dev/null @@ -1,5 +0,0 @@ - -Shape (owl-base.Owl_computation_operator.Make.Symbol.Shape)

Module Symbol.Shape

Core functions
val infer_shape : - Type.op -> - Type.attr Owl_graph.node array -> - int array option array

TODO

\ No newline at end of file diff --git a/owl-base/Owl_computation_operator/Make/argument-1-Symbol/index.html b/owl-base/Owl_computation_operator/Make/argument-1-Symbol/index.html deleted file mode 100644 index df4df4cb0..000000000 --- a/owl-base/Owl_computation_operator/Make/argument-1-Symbol/index.html +++ /dev/null @@ -1,28 +0,0 @@ - -Symbol (owl-base.Owl_computation_operator.Make.Symbol)

Parameter Make.Symbol

Core functions
val op_to_str : Shape.Type.op -> string

TODO

val is_random_variable : Shape.Type.op -> bool

TODO

val refnum : 'a Owl_graph.node -> int

TODO

val node_shape : Shape.Type.attr Owl_graph.node -> int array

TODO

val node_numel : Shape.Type.attr Owl_graph.node -> int

TODO

val is_shape_unknown : Shape.Type.attr Owl_graph.node -> bool

TODO

val infer_shape_graph : Shape.Type.attr Owl_graph.node array -> unit

TODO

val shape_to_str : int array option array -> string

TODO

val node_to_str : Shape.Type.attr Owl_graph.node -> string

TODO

val node_to_arr : Shape.Type.t -> Shape.Type.arr

TODO

val arr_to_node : Shape.Type.arr -> Shape.Type.t

TODO

val node_to_elt : Shape.Type.t -> Shape.Type.elt

TODO

val elt_to_node : Shape.Type.elt -> Shape.Type.t

TODO

val make_node : - ?name:string -> - ?value:Shape.Type.Device.value array -> - ?shape:int array option array -> - ?freeze:bool -> - ?reuse:bool -> - ?state:Shape.Type.state -> - Shape.Type.op -> - Shape.Type.attr Owl_graph.node

TODO

val make_then_connect : - ?shape:int array option array -> - Shape.Type.op -> - Shape.Type.attr Owl_graph.node array -> - Shape.Type.attr Owl_graph.node

TODO

val var_arr : ?shape:int array -> string -> Shape.Type.arr

TODO

val var_elt : string -> Shape.Type.elt

TODO

val const_arr : string -> Shape.Type.Device.A.arr -> Shape.Type.arr

TODO

val const_elt : string -> Shape.Type.Device.A.elt -> Shape.Type.elt

TODO

val new_block_id : unit -> int

new_block_id () returns an unused block id.

val make_empty_block : ?block_id:int -> int -> Shape.Type.block

make_empty_block s returns an empty block of memory of size s.

val make_value_block : - Shape.Type.Device.value -> - Shape.Type.attr Owl_graph.node -> - unit

make_value_block value node creates a block of memory initialised with value and links the new block to node.

get_block node returns the memory block allocated to node. If no block is allocated, throws an exception.

val add_node_to_block : - Shape.Type.attr Owl_graph.node -> - Shape.Type.block -> - unit

Link a node to a reusable block and initialises its memory on the memory of the block.

val get_active_node : Shape.Type.block -> Shape.Type.attr Owl_graph.node option

Return the node that is currently using the memory of the block.

val set_active_node : - Shape.Type.block -> - Shape.Type.attr Owl_graph.node -> - unit

Update the node that is currently using the block of memory.

val get_block_id : Shape.Type.attr Owl_graph.node -> int

get_block_id node returns the id of the block assigned to node. If node has not been assigned yet, returns -1.

val set_value : - Shape.Type.attr Owl_graph.node -> - Shape.Type.Device.value array -> - unit

TODO

val set_operator : Shape.Type.attr Owl_graph.node -> Shape.Type.op -> unit

TODO

TODO

val set_reuse : Shape.Type.attr Owl_graph.node -> bool -> unit

TODO

val get_reuse : Shape.Type.attr Owl_graph.node -> bool

TODO

val is_shared : Shape.Type.attr Owl_graph.node -> bool

TODO

val get_shared_nodes : - Shape.Type.attr Owl_graph.node -> - Shape.Type.attr Owl_graph.node array

get_shared_nodes node returns the nodes sharing the same block of memory as node.

val is_var : Shape.Type.attr Owl_graph.node -> bool

TODO

val is_const : Shape.Type.attr Owl_graph.node -> bool

TODO

val is_node_arr : Shape.Type.attr Owl_graph.node -> bool

TODO

val is_node_elt : Shape.Type.attr Owl_graph.node -> bool

TODO

val is_assigned : Shape.Type.attr Owl_graph.node -> bool

is_assigned node checks if a block of memory has been assigned to node.

val check_assigned : Shape.Type.attr Owl_graph.node -> unit

check_assigned node throws an exception if node has not been assigned to a block.

val is_valid : Shape.Type.attr Owl_graph.node -> bool

TODO

val validate : Shape.Type.attr Owl_graph.node -> unit

TODO

val invalidate : Shape.Type.attr Owl_graph.node -> unit

TODO

val invalidate_graph : Shape.Type.attr Owl_graph.node -> unit

TODO

val is_freeze : Shape.Type.attr Owl_graph.node -> bool

TODO

val freeze : Shape.Type.attr Owl_graph.node -> unit

TODO

val freeze_descendants : Shape.Type.attr Owl_graph.node array -> unit

TODO

val freeze_ancestors : Shape.Type.attr Owl_graph.node array -> unit

TODO

TODO

TODO

val unsafe_assign_arr : Shape.Type.arr -> Shape.Type.Device.A.arr -> unit

TODO

val assign_arr : Shape.Type.arr -> Shape.Type.Device.A.arr -> unit

TODO

val assign_elt : Shape.Type.elt -> Shape.Type.Device.A.elt -> unit

TODO

val float_to_elt : float -> Shape.Type.elt

TODO

val elt_to_float : Shape.Type.elt -> float

TODO

\ No newline at end of file diff --git a/owl-base/Owl_computation_operator/Make/index.html b/owl-base/Owl_computation_operator/Make/index.html deleted file mode 100644 index 132740c21..000000000 --- a/owl-base/Owl_computation_operator/Make/index.html +++ /dev/null @@ -1,413 +0,0 @@ - -Make (owl-base.Owl_computation_operator.Make)

Module Owl_computation_operator.Make

Parameters

Signature

module Symbol = Symbol
val empty : int array -> Symbol.Shape.Type.arr
val zeros : int array -> Symbol.Shape.Type.arr
val ones : int array -> Symbol.Shape.Type.arr
val create : int array -> Symbol.Shape.Type.elt -> Symbol.Shape.Type.arr
val sequential : - ?a:Symbol.Shape.Type.elt -> - ?step:Symbol.Shape.Type.elt -> - int array -> - Symbol.Shape.Type.arr
val uniform : - ?a:Symbol.Shape.Type.elt -> - ?b:Symbol.Shape.Type.elt -> - int array -> - Symbol.Shape.Type.arr
val gaussian : - ?mu:Symbol.Shape.Type.elt -> - ?sigma:Symbol.Shape.Type.elt -> - int array -> - Symbol.Shape.Type.arr
val bernoulli : ?p:Symbol.Shape.Type.elt -> int array -> Symbol.Shape.Type.arr
val init : int array -> (int -> Symbol.Shape.Type.elt) -> Symbol.Shape.Type.arr
val init_nd : 'a -> 'b -> 'c
val shape : Symbol.Shape.Type.arr -> int array
val numel : Symbol.Shape.Type.arr -> int
val set : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.elt -> unit
val get_slice : int list list -> Symbol.Shape.Type.arr -> Symbol.Shape.Type.arr
val set_slice : - int list list -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - unit
val set_fancy : - Owl_types_common.index list -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - unit
val copy_ : out:'a -> 'b -> 'c
val reset : Symbol.Shape.Type.arr -> unit
val reshape : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.arr
val tile : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.arr
val repeat : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.arr
val pad : - ?v:Symbol.Shape.Type.elt -> - int list list -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val expand : ?hi:bool -> 'a -> 'b -> 'c
val squeeze : ?axis:'a array -> 'b -> 'c
val concatenate : - ?axis:int -> - Symbol.Shape.Type.arr array -> - Symbol.Shape.Type.arr
val stack : ?axis:int -> Symbol.Shape.Type.arr array -> Symbol.Shape.Type.arr
val concat : axis:'a -> 'b
val split : ?axis:int -> 'a -> 'b -> 'c
val draw : - ?axis:int -> - Symbol.Shape.Type.arr -> - int -> - Symbol.Shape.Type.arr * 'a array
val lazy_print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(Symbol.Shape.Type.Device.A.elt -> string) -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val print : ?max_row:'a -> ?max_col:'b -> ?header:'c -> ?fmt:'d -> 'e -> unit
val min : - ?axis:int -> - ?keep_dims:bool -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val max : - ?axis:int -> - ?keep_dims:bool -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val sum : - ?axis:int -> - ?keep_dims:bool -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val sum_reduce : - ?axis:int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val log_sum_exp : - ?axis:int -> - ?keep_dims:bool -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val elt_less_equal_scalar : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.elt -> - Symbol.Shape.Type.arr
val elt_greater_equal_scalar : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.elt -> - Symbol.Shape.Type.arr
val conv1d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr
val conv2d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr
val conv3d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr
val transpose_conv1d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr
val transpose_conv2d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr
val transpose_conv3d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr
val dilated_conv1d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr
val dilated_conv2d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr
val dilated_conv3d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr
val max_pool1d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr
val max_pool2d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr
val max_pool3d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr
val avg_pool1d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr
val avg_pool2d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr
val avg_pool3d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr
val upsampling2d : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.arr
val conv1d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val conv1d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val conv2d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val conv2d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val conv3d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val conv3d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val transpose_conv1d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val transpose_conv1d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val transpose_conv2d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val transpose_conv2d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val transpose_conv3d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val transpose_conv3d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val dilated_conv1d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val dilated_conv1d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val dilated_conv2d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val dilated_conv2d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val dilated_conv3d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val dilated_conv3d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val max_pool1d_backward : - Owl_types_common.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val max_pool2d_backward : - Owl_types_common.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val max_pool3d_backward : - Owl_types_common.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val avg_pool1d_backward : - Owl_types_common.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val avg_pool2d_backward : - Owl_types_common.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val avg_pool3d_backward : - Owl_types_common.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val upsampling2d_backward : - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val row_num : Symbol.Shape.Type.arr -> int
val col_num : Symbol.Shape.Type.arr -> int
val rows : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.arr
val copy_row_to : Symbol.Shape.Type.arr -> 'a -> 'b -> unit
val copy_col_to : Symbol.Shape.Type.arr -> 'a -> 'b -> unit
val diag : ?k:'a -> 'b -> 'c
val transpose : - ?axis:int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val to_rows : Symbol.Shape.Type.arr -> 'a array
val of_array : - Symbol.Shape.Type.elt array -> - int array -> - Symbol.Shape.Type.arr
val of_cols : 'a -> 'b
val to_cols : 'a -> 'b
val of_arrays : Symbol.Shape.Type.elt array array -> Symbol.Shape.Type.arr
val to_arrays : 'a -> 'b

Scalar maths

module Scalar : sig ... end
module Mat : sig ... end
module Linalg : sig ... end
\ No newline at end of file diff --git a/owl-base/Owl_computation_operator_sig/.dummy b/owl-base/Owl_computation_operator_sig/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl-base/Owl_computation_operator_sig/module-type-Sig/Linalg/index.html b/owl-base/Owl_computation_operator_sig/module-type-Sig/Linalg/index.html deleted file mode 100644 index 020adace6..000000000 --- a/owl-base/Owl_computation_operator_sig/module-type-Sig/Linalg/index.html +++ /dev/null @@ -1,33 +0,0 @@ - -Linalg (owl-base.Owl_computation_operator_sig.Sig.Linalg)

Module Sig.Linalg

\ No newline at end of file diff --git a/owl-base/Owl_computation_operator_sig/module-type-Sig/Mat/index.html b/owl-base/Owl_computation_operator_sig/module-type-Sig/Mat/index.html deleted file mode 100644 index d3debbc31..000000000 --- a/owl-base/Owl_computation_operator_sig/module-type-Sig/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl-base.Owl_computation_operator_sig.Sig.Mat)

Module Sig.Mat

val eye : int -> Symbol.Shape.Type.arr

TODO

TODO

TODO

TODO

\ No newline at end of file diff --git a/owl-base/Owl_computation_operator_sig/module-type-Sig/Scalar/index.html b/owl-base/Owl_computation_operator_sig/module-type-Sig/Scalar/index.html deleted file mode 100644 index dfe954164..000000000 --- a/owl-base/Owl_computation_operator_sig/module-type-Sig/Scalar/index.html +++ /dev/null @@ -1,20 +0,0 @@ - -Scalar (owl-base.Owl_computation_operator_sig.Sig.Scalar)

Module Sig.Scalar

\ No newline at end of file diff --git a/owl-base/Owl_computation_operator_sig/module-type-Sig/Symbol/Shape/Type/Device/A/Linalg/index.html b/owl-base/Owl_computation_operator_sig/module-type-Sig/Symbol/Shape/Type/Device/A/Linalg/index.html deleted file mode 100644 index 9d8de9a0e..000000000 --- a/owl-base/Owl_computation_operator_sig/module-type-Sig/Symbol/Shape/Type/Device/A/Linalg/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -Linalg (owl-base.Owl_computation_operator_sig.Sig.Symbol.Shape.Type.Device.A.Linalg)

Module A.Linalg

val inv : arr -> arr
val logdet : arr -> elt
val chol : ?upper:bool -> arr -> arr
val svd : ?thin:bool -> arr -> arr * arr * arr
val qr : arr -> arr * arr
val lq : arr -> arr * arr
val sylvester : arr -> arr -> arr -> arr
val lyapunov : arr -> arr -> arr
val discrete_lyapunov : - ?solver:[ `default | `bilinear | `direct ] -> - arr -> - arr -> - arr
val linsolve : ?trans:bool -> ?typ:[ `n | `u | `l ] -> arr -> arr -> arr
val care : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
val dare : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
\ No newline at end of file diff --git a/owl-base/Owl_computation_operator_sig/module-type-Sig/Symbol/Shape/Type/Device/A/Mat/index.html b/owl-base/Owl_computation_operator_sig/module-type-Sig/Symbol/Shape/Type/Device/A/Mat/index.html deleted file mode 100644 index 5331fb678..000000000 --- a/owl-base/Owl_computation_operator_sig/module-type-Sig/Symbol/Shape/Type/Device/A/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl-base.Owl_computation_operator_sig.Sig.Symbol.Shape.Type.Device.A.Mat)

Module A.Mat

val diagm : ?k:int -> arr -> arr
val triu : ?k:int -> arr -> arr
val tril : ?k:int -> arr -> arr
val eye : int -> arr
\ No newline at end of file diff --git a/owl-base/Owl_computation_operator_sig/module-type-Sig/Symbol/Shape/Type/Device/A/Scalar/index.html b/owl-base/Owl_computation_operator_sig/module-type-Sig/Symbol/Shape/Type/Device/A/Scalar/index.html deleted file mode 100644 index f1a85bbde..000000000 --- a/owl-base/Owl_computation_operator_sig/module-type-Sig/Symbol/Shape/Type/Device/A/Scalar/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Scalar (owl-base.Owl_computation_operator_sig.Sig.Symbol.Shape.Type.Device.A.Scalar)

Module A.Scalar

val add : elt -> elt -> elt
val sub : elt -> elt -> elt
val mul : elt -> elt -> elt
val div : elt -> elt -> elt
val pow : elt -> elt -> elt
val atan2 : elt -> elt -> elt
val abs : elt -> elt
val neg : elt -> elt
val sqr : elt -> elt
val sqrt : elt -> elt
val exp : elt -> elt
val log : elt -> elt
val log2 : elt -> elt
val log10 : elt -> elt
val signum : elt -> elt
val floor : elt -> elt
val ceil : elt -> elt
val round : elt -> elt
val sin : elt -> elt
val cos : elt -> elt
val tan : elt -> elt
val sinh : elt -> elt
val cosh : elt -> elt
val tanh : elt -> elt
val asin : elt -> elt
val acos : elt -> elt
val atan : elt -> elt
val asinh : elt -> elt
val acosh : elt -> elt
val atanh : elt -> elt
val relu : elt -> elt
val dawsn : elt -> elt
val sigmoid : elt -> elt
\ No newline at end of file diff --git a/owl-base/Owl_computation_operator_sig/module-type-Sig/Symbol/Shape/Type/Device/A/index.html b/owl-base/Owl_computation_operator_sig/module-type-Sig/Symbol/Shape/Type/Device/A/index.html deleted file mode 100644 index b7756596c..000000000 --- a/owl-base/Owl_computation_operator_sig/module-type-Sig/Symbol/Shape/Type/Device/A/index.html +++ /dev/null @@ -1,379 +0,0 @@ - -A (owl-base.Owl_computation_operator_sig.Sig.Symbol.Shape.Type.Device.A)

Module Device.A

include Owl_types_ndarray_algodiff.Sig
include Owl_types_ndarray_eltcmp.Sig
include Owl_types_ndarray_basic.Sig
type arr
type elt
val empty : int array -> arr
val zeros : int array -> arr
val ones : int array -> arr
val create : int array -> elt -> arr
val sequential : ?a:elt -> ?step:elt -> int array -> arr
val uniform : ?a:elt -> ?b:elt -> int array -> arr
val gaussian : ?mu:elt -> ?sigma:elt -> int array -> arr
val bernoulli : ?p:elt -> int array -> arr
val init : int array -> (int -> elt) -> arr
val init_nd : int array -> (int array -> elt) -> arr
val shape : arr -> int array
val numel : arr -> int
val get : arr -> int array -> elt
val set : arr -> int array -> elt -> unit
val get_slice : int list list -> arr -> arr
val set_slice : int list list -> arr -> arr -> unit
val get_fancy : Owl_types_common.index list -> arr -> arr
val set_fancy : Owl_types_common.index list -> arr -> arr -> unit
val copy : arr -> arr
val copy_ : out:arr -> arr -> unit
val reset : arr -> unit
val reshape : arr -> int array -> arr
val reverse : arr -> arr
val tile : arr -> int array -> arr
val repeat : arr -> int array -> arr
val concatenate : ?axis:int -> arr array -> arr
val stack : ?axis:int -> arr array -> arr
val split : ?axis:int -> int array -> arr -> arr array
val expand : ?hi:bool -> arr -> int -> arr
val squeeze : ?axis:int array -> arr -> arr
val draw : ?axis:int -> arr -> int -> arr * int array
val map : (elt -> elt) -> arr -> arr
val fold : ?axis:int -> (elt -> elt -> elt) -> elt -> arr -> arr
val scan : ?axis:int -> (elt -> elt -> elt) -> arr -> arr
val one_hot : int -> arr -> arr
val pad : ?v:elt -> int list list -> arr -> arr
val print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(elt -> string) -> - arr -> - unit
val abs : arr -> arr
val neg : arr -> arr
val floor : arr -> arr
val ceil : arr -> arr
val round : arr -> arr
val sqr : arr -> arr
val sqrt : arr -> arr
val log : arr -> arr
val log2 : arr -> arr
val log10 : arr -> arr
val exp : arr -> arr
val sin : arr -> arr
val cos : arr -> arr
val tan : arr -> arr
val sinh : arr -> arr
val cosh : arr -> arr
val tanh : arr -> arr
val asin : arr -> arr
val acos : arr -> arr
val atan : arr -> arr
val asinh : arr -> arr
val acosh : arr -> arr
val atanh : arr -> arr
val sum_reduce : ?axis:int array -> arr -> arr
val signum : arr -> arr
val sigmoid : arr -> arr
val relu : arr -> arr
val dawsn : arr -> arr
val min' : arr -> elt
val max' : arr -> elt
val sum' : arr -> elt
val log_sum_exp' : arr -> elt
val log_sum_exp : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm' : arr -> elt
val l2norm' : arr -> elt
val l2norm_sqr' : arr -> elt
val clip_by_value : ?amin:elt -> ?amax:elt -> arr -> arr
val clip_by_l2norm : elt -> arr -> arr
val pow : arr -> arr -> arr
val scalar_pow : elt -> arr -> arr
val pow_scalar : arr -> elt -> arr
val atan2 : arr -> arr -> arr
val scalar_atan2 : elt -> arr -> arr
val atan2_scalar : arr -> elt -> arr
val add : arr -> arr -> arr
val sub : arr -> arr -> arr
val mul : arr -> arr -> arr
val div : arr -> arr -> arr
val add_scalar : arr -> elt -> arr
val sub_scalar : arr -> elt -> arr
val mul_scalar : arr -> elt -> arr
val div_scalar : arr -> elt -> arr
val scalar_add : elt -> arr -> arr
val scalar_sub : elt -> arr -> arr
val scalar_mul : elt -> arr -> arr
val scalar_div : elt -> arr -> arr
val fma : arr -> arr -> arr -> arr
val conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val dilated_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val transpose_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val max_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val upsampling2d : arr -> int array -> arr
val conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val dilated_conv1d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv1d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val transpose_conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val max_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val upsampling2d_backward : arr -> int array -> arr -> arr
val row_num : arr -> int
val col_num : arr -> int
val row : arr -> int -> arr
val rows : arr -> int array -> arr
val copy_row_to : arr -> arr -> int -> unit
val copy_col_to : arr -> arr -> int -> unit
val dot : arr -> arr -> arr
val diag : ?k:int -> arr -> arr
val trace : arr -> elt
val transpose : ?axis:int array -> arr -> arr
val to_rows : arr -> arr array
val of_rows : arr array -> arr
val to_cols : arr -> arr array
val of_cols : arr array -> arr
val of_array : elt array -> int array -> arr
val of_arrays : elt array array -> arr
val float_to_elt : float -> elt
val elt_to_float : elt -> float
val elt_equal : arr -> arr -> arr
val elt_not_equal : arr -> arr -> arr
val elt_less : arr -> arr -> arr
val elt_greater : arr -> arr -> arr
val elt_less_equal : arr -> arr -> arr
val elt_greater_equal : arr -> arr -> arr
val elt_equal_scalar : arr -> elt -> arr
val elt_not_equal_scalar : arr -> elt -> arr
val elt_less_scalar : arr -> elt -> arr
val elt_greater_scalar : arr -> elt -> arr
val elt_less_equal_scalar : arr -> elt -> arr
val elt_greater_equal_scalar : arr -> elt -> arr
module Scalar : sig ... end
module Mat : sig ... end
module Linalg : sig ... end
val create_ : out:arr -> elt -> unit
val uniform_ : ?a:elt -> ?b:elt -> out:arr -> unit
val gaussian_ : ?mu:elt -> ?sigma:elt -> out:arr -> unit
val sequential_ : ?a:elt -> ?step:elt -> out:arr -> unit
val bernoulli_ : ?p:elt -> out:arr -> unit
val zeros_ : out:arr -> unit
val ones_ : out:arr -> unit
val one_hot_ : out:arr -> int -> arr -> unit
val get_slice_ : out:arr -> int list list -> arr -> unit
val set_slice_ : out:arr -> int list list -> arr -> arr -> unit
val sub_left : arr -> int -> int -> arr
val reshape_ : out:arr -> arr -> unit
val reverse_ : out:arr -> arr -> unit
val transpose_ : out:arr -> ?axis:int array -> arr -> unit
val repeat_ : out:arr -> arr -> int array -> unit
val tile_ : out:arr -> arr -> int array -> unit
val pad_ : out:arr -> ?v:elt -> int list list -> arr -> unit
val hypot : arr -> arr -> arr
val fmod : arr -> arr -> arr
val min2 : arr -> arr -> arr
val max2 : arr -> arr -> arr
val add_ : ?out:arr -> arr -> arr -> unit
val sub_ : ?out:arr -> arr -> arr -> unit
val mul_ : ?out:arr -> arr -> arr -> unit
val div_ : ?out:arr -> arr -> arr -> unit
val pow_ : ?out:arr -> arr -> arr -> unit
val atan2_ : ?out:arr -> arr -> arr -> unit
val hypot_ : ?out:arr -> arr -> arr -> unit
val fmod_ : ?out:arr -> arr -> arr -> unit
val min2_ : ?out:arr -> arr -> arr -> unit
val max2_ : ?out:arr -> arr -> arr -> unit
val add_scalar_ : ?out:arr -> arr -> elt -> unit
val sub_scalar_ : ?out:arr -> arr -> elt -> unit
val mul_scalar_ : ?out:arr -> arr -> elt -> unit
val div_scalar_ : ?out:arr -> arr -> elt -> unit
val pow_scalar_ : ?out:arr -> arr -> elt -> unit
val atan2_scalar_ : ?out:arr -> arr -> elt -> unit
val fmod_scalar_ : ?out:arr -> arr -> elt -> unit
val scalar_add_ : ?out:arr -> elt -> arr -> unit
val scalar_sub_ : ?out:arr -> elt -> arr -> unit
val scalar_mul_ : ?out:arr -> elt -> arr -> unit
val scalar_div_ : ?out:arr -> elt -> arr -> unit
val scalar_pow_ : ?out:arr -> elt -> arr -> unit
val scalar_atan2_ : ?out:arr -> elt -> arr -> unit
val scalar_fmod_ : ?out:arr -> elt -> arr -> unit
val fma_ : ?out:arr -> arr -> arr -> arr -> unit
val clip_by_value_ : ?out:arr -> ?amin:elt -> ?amax:elt -> arr -> unit
val clip_by_l2norm_ : ?out:arr -> elt -> arr -> unit
val dot_ : - ?transa:bool -> - ?transb:bool -> - ?alpha:elt -> - ?beta:elt -> - c:arr -> - arr -> - arr -> - unit
val abs_ : ?out:arr -> arr -> unit
val neg_ : ?out:arr -> arr -> unit
val conj_ : ?out:arr -> arr -> unit
val reci_ : ?out:arr -> arr -> unit
val signum_ : ?out:arr -> arr -> unit
val sqr_ : ?out:arr -> arr -> unit
val sqrt_ : ?out:arr -> arr -> unit
val cbrt_ : ?out:arr -> arr -> unit
val exp_ : ?out:arr -> arr -> unit
val exp2_ : ?out:arr -> arr -> unit
val exp10_ : ?out:arr -> arr -> unit
val expm1_ : ?out:arr -> arr -> unit
val log_ : ?out:arr -> arr -> unit
val log2_ : ?out:arr -> arr -> unit
val log10_ : ?out:arr -> arr -> unit
val log1p_ : ?out:arr -> arr -> unit
val sin_ : ?out:arr -> arr -> unit
val cos_ : ?out:arr -> arr -> unit
val tan_ : ?out:arr -> arr -> unit
val asin_ : ?out:arr -> arr -> unit
val acos_ : ?out:arr -> arr -> unit
val atan_ : ?out:arr -> arr -> unit
val sinh_ : ?out:arr -> arr -> unit
val cosh_ : ?out:arr -> arr -> unit
val tanh_ : ?out:arr -> arr -> unit
val asinh_ : ?out:arr -> arr -> unit
val acosh_ : ?out:arr -> arr -> unit
val atanh_ : ?out:arr -> arr -> unit
val floor_ : ?out:arr -> arr -> unit
val ceil_ : ?out:arr -> arr -> unit
val round_ : ?out:arr -> arr -> unit
val trunc_ : ?out:arr -> arr -> unit
val fix_ : ?out:arr -> arr -> unit
val erf_ : ?out:arr -> arr -> unit
val erfc_ : ?out:arr -> arr -> unit
val relu_ : ?out:arr -> arr -> unit
val softplus_ : ?out:arr -> arr -> unit
val softsign_ : ?out:arr -> arr -> unit
val softmax_ : ?out:arr -> ?axis:int -> arr -> unit
val sigmoid_ : ?out:arr -> arr -> unit
val sum_ : out:arr -> axis:int -> arr -> unit
val min_ : out:arr -> axis:int -> arr -> unit
val max_ : out:arr -> axis:int -> arr -> unit
val sum : ?axis:int -> ?keep_dims:bool -> arr -> arr
val prod : ?axis:int -> ?keep_dims:bool -> arr -> arr
val min : ?axis:int -> ?keep_dims:bool -> arr -> arr
val max : ?axis:int -> ?keep_dims:bool -> arr -> arr
val mean : ?axis:int -> ?keep_dims:bool -> arr -> arr
val var : ?axis:int -> ?keep_dims:bool -> arr -> arr
val std : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l2norm : ?axis:int -> ?keep_dims:bool -> arr -> arr
val cumsum_ : ?out:arr -> ?axis:int -> arr -> unit
val cumprod_ : ?out:arr -> ?axis:int -> arr -> unit
val cummin_ : ?out:arr -> ?axis:int -> arr -> unit
val cummax_ : ?out:arr -> ?axis:int -> arr -> unit
val dropout_ : ?out:arr -> ?rate:float -> arr -> unit
val prod' : arr -> elt
val mean' : arr -> elt
val var' : arr -> elt
val std' : arr -> elt
val elt_equal_ : ?out:arr -> arr -> arr -> unit
val elt_not_equal_ : ?out:arr -> arr -> arr -> unit
val elt_less_ : ?out:arr -> arr -> arr -> unit
val elt_greater_ : ?out:arr -> arr -> arr -> unit
val elt_less_equal_ : ?out:arr -> arr -> arr -> unit
val elt_greater_equal_ : ?out:arr -> arr -> arr -> unit
val elt_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_not_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_less_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_greater_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_less_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_greater_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val conv1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val conv2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val conv3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val dilated_conv1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val dilated_conv2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val dilated_conv3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val transpose_conv1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val transpose_conv2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val transpose_conv3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val max_pool1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val max_pool2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val max_pool3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val upsampling2d_ : out:arr -> arr -> int array -> unit
val conv1d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv1d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv2d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv2d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv3d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv3d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val dilated_conv1d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv1d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv2d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv2d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv3d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv3d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val transpose_conv1d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv1d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv2d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv2d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv3d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv3d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val max_pool1d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val max_pool2d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val max_pool3d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool1d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool2d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool3d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val upsampling2d_backward_ : out:arr -> arr -> int array -> arr -> unit
val fused_adagrad_ : ?out:arr -> rate:float -> eps:float -> arr -> unit
\ No newline at end of file diff --git a/owl-base/Owl_computation_operator_sig/module-type-Sig/Symbol/Shape/Type/Device/index.html b/owl-base/Owl_computation_operator_sig/module-type-Sig/Symbol/Shape/Type/Device/index.html deleted file mode 100644 index 7955b7870..000000000 --- a/owl-base/Owl_computation_operator_sig/module-type-Sig/Symbol/Shape/Type/Device/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Device (owl-base.Owl_computation_operator_sig.Sig.Symbol.Shape.Type.Device)

Module Type.Device

Type definition
type device

TODO

type value

TODO

Core functions
val make_device : unit -> device

TODO

val arr_to_value : A.arr -> value

TODO

val value_to_arr : value -> A.arr

TODO

val elt_to_value : A.elt -> value

TODO

val value_to_elt : value -> A.elt

TODO

val value_to_float : value -> float

TODO

val is_arr : value -> bool

TODO

val is_elt : value -> bool

TODO

\ No newline at end of file diff --git a/owl-base/Owl_computation_operator_sig/module-type-Sig/Symbol/Shape/Type/index.html b/owl-base/Owl_computation_operator_sig/module-type-Sig/Symbol/Shape/Type/index.html deleted file mode 100644 index e4c6a6be0..000000000 --- a/owl-base/Owl_computation_operator_sig/module-type-Sig/Symbol/Shape/Type/index.html +++ /dev/null @@ -1,5 +0,0 @@ - -Type (owl-base.Owl_computation_operator_sig.Sig.Symbol.Shape.Type)

Module Shape.Type

Type definition
type state =
  1. | Valid
  2. | Invalid
    (*

    TODO

    *)

TODO

and block = {
  1. size : int;
  2. block_id : int;
  3. mutable active : t option;
  4. mutable memory : Device.value;
  5. mutable nodes : t list;
}

block type keeps a reference to a block of memory and to the nodes sharing that block.

and attr = {
  1. mutable op : op;
  2. mutable freeze : bool;
  3. mutable reuse : bool;
  4. mutable state : state;
  5. mutable shape : int array option array;
  6. mutable value : Device.value array;
  7. mutable block : block array option;
}

TODO

and arr =
  1. | Arr of t
and elt =
  1. | Elt of t
and op =
  1. | Noop
  2. | Var
  3. | Const
  4. | Empty of int array
  5. | Zeros of int array
  6. | Ones of int array
  7. | Create of int array
  8. | Sequential of int array
  9. | Uniform of int array
  10. | Gaussian of int array
  11. | Bernoulli of int array
  12. | Init of int array * int -> elt
  13. | Get of int array
  14. | Set of int array
  15. | GetSlice of int list list
  16. | SetSlice of int list list
  17. | GetFancy of Owl_types_common.index list
  18. | SetFancy of Owl_types_common.index list
  19. | Copy
  20. | Reset
  21. | Reshape of int array
  22. | Reverse
  23. | Tile of int array
  24. | Repeat of int array
  25. | Pad of elt * int list list
  26. | Concatenate of int
  27. | Stack of int
  28. | Split of int * int array
  29. | Draw of int * int
  30. | Map of elt -> elt
  31. | Fold of int * elt -> elt -> elt
  32. | Scan of int * elt -> elt -> elt
  33. | OneHot of int
  34. | OfArray of int array
  35. | Delay of Device.A.arr -> Device.A.arr
  36. | DelayArray of int array * Device.A.arr array -> Device.A.arr
  37. | LazyPrint of int option - * int option - * bool option - * (Device.A.elt -> string) option
  38. | Abs
  39. | Neg
  40. | Floor
  41. | Ceil
  42. | Round
  43. | Sqr
  44. | Sqrt
  45. | Log
  46. | Log2
  47. | Log10
  48. | Exp
  49. | Sin
  50. | Cos
  51. | Tan
  52. | Sinh
  53. | Cosh
  54. | Tanh
  55. | Asin
  56. | Acos
  57. | Atan
  58. | Asinh
  59. | Acosh
  60. | Atanh
  61. | Min of bool * int
  62. | Max of bool * int
  63. | Sum of bool * int
  64. | SumReduce of int array
  65. | Signum
  66. | Sigmoid
  67. | Relu
  68. | Dawsn
  69. | Min'
  70. | Max'
  71. | Sum'
  72. | LogSumExp'
  73. | LogSumExp of bool * int
  74. | L1norm'
  75. | L2norm'
  76. | L2NormSqr'
  77. | ClipByValue
  78. | ClipByL2norm
  79. | Pow
  80. | ScalarPow
  81. | PowScalar
  82. | Atan2
  83. | ScalarAtan2
  84. | Atan2Scalar
  85. | Hypot
  86. | Min2
  87. | Max2
  88. | Add
  89. | Sub
  90. | Mul
  91. | Div
  92. | AddScalar
  93. | SubScalar
  94. | MulScalar
  95. | DivScalar
  96. | ScalarAdd
  97. | ScalarSub
  98. | ScalarMul
  99. | ScalarDiv
  100. | FMA
  101. | EltEqual
  102. | EltNotEqual
  103. | EltLess
  104. | EltGreater
  105. | EltLessEqual
  106. | EltGreaterEqual
  107. | EltEqualScalar
  108. | EltNotEqualScalar
  109. | EltLessScalar
  110. | EltGreaterScalar
  111. | EltLessEqualScalar
  112. | EltGreaterEqualScalar
  113. | Conv1d of Owl_types_common.padding * int array
  114. | Conv2d of Owl_types_common.padding * int array
  115. | Conv3d of Owl_types_common.padding * int array
  116. | TransposeConv1d of Owl_types_common.padding * int array
  117. | TransposeConv2d of Owl_types_common.padding * int array
  118. | TransposeConv3d of Owl_types_common.padding * int array
  119. | DilatedConv1d of Owl_types_common.padding * int array * int array
  120. | DilatedConv2d of Owl_types_common.padding * int array * int array
  121. | DilatedConv3d of Owl_types_common.padding * int array * int array
  122. | MaxPool1d of Owl_types_common.padding * int array * int array
  123. | MaxPool2d of Owl_types_common.padding * int array * int array
  124. | MaxPool3d of Owl_types_common.padding * int array * int array
  125. | AvgPool1d of Owl_types_common.padding * int array * int array
  126. | AvgPool2d of Owl_types_common.padding * int array * int array
  127. | AvgPool3d of Owl_types_common.padding * int array * int array
  128. | UpSampling2d of int array
  129. | Conv1dBackwardInput of int array
  130. | Conv1dBackwardKernel of int array
  131. | Conv2dBackwardInput of int array
  132. | Conv2dBackwardKernel of int array
  133. | Conv3dBackwardInput of int array
  134. | Conv3dBackwardKernel of int array
  135. | TransposeConv1dBackwardInput of int array
  136. | TransposeConv1dBackwardKernel of int array
  137. | TransposeConv2dBackwardInput of int array
  138. | TransposeConv2dBackwardKernel of int array
  139. | TransposeConv3dBackwardInput of int array
  140. | TransposeConv3dBackwardKernel of int array
  141. | DilatedConv1dBackwardInput of int array * int array
  142. | DilatedConv1dBackwardKernel of int array * int array
  143. | DilatedConv2dBackwardInput of int array * int array
  144. | DilatedConv2dBackwardKernel of int array * int array
  145. | DilatedConv3dBackwardInput of int array * int array
  146. | DilatedConv3dBackwardKernel of int array * int array
  147. | MaxPool1dBackward of Owl_types_common.padding * int array * int array
  148. | MaxPool2dBackward of Owl_types_common.padding * int array * int array
  149. | MaxPool3dBackward of Owl_types_common.padding * int array * int array
  150. | AvgPool1dBackward of Owl_types_common.padding * int array * int array
  151. | AvgPool2dBackward of Owl_types_common.padding * int array * int array
  152. | AvgPool3dBackward of Owl_types_common.padding * int array * int array
  153. | UpSampling2dBackward of int array
  154. | RowNum
  155. | ColNum
  156. | Row
  157. | Rows of int array
  158. | CopyRowTo
  159. | CopyColTo
  160. | Dot of bool * bool * elt * elt
  161. | Inv
  162. | Trace
  163. | Transpose of int array
  164. | ToRows
  165. | OfRows
  166. | Scalar_Add
  167. | Scalar_Sub
  168. | Scalar_Mul
  169. | Scalar_Div
  170. | Scalar_Pow
  171. | Scalar_Atan2
  172. | Scalar_Abs
  173. | Scalar_Neg
  174. | Scalar_Sqr
  175. | Scalar_Sqrt
  176. | Scalar_Exp
  177. | Scalar_Log
  178. | Scalar_Log2
  179. | Scalar_Log10
  180. | Scalar_Signum
  181. | Scalar_Floor
  182. | Scalar_Ceil
  183. | Scalar_Round
  184. | Scalar_Sin
  185. | Scalar_Cos
  186. | Scalar_Tan
  187. | Scalar_Sinh
  188. | Scalar_Cosh
  189. | Scalar_Tanh
  190. | Scalar_Asin
  191. | Scalar_Acos
  192. | Scalar_Atan
  193. | Scalar_Asinh
  194. | Scalar_Acosh
  195. | Scalar_Atanh
  196. | Scalar_Relu
  197. | Scalar_Dawsn
  198. | Scalar_Sigmoid
  199. | Fused_Adagrad of float * float
    (*

    TODO

    *)
\ No newline at end of file diff --git a/owl-base/Owl_computation_operator_sig/module-type-Sig/Symbol/Shape/index.html b/owl-base/Owl_computation_operator_sig/module-type-Sig/Symbol/Shape/index.html deleted file mode 100644 index 9a0dd1b7c..000000000 --- a/owl-base/Owl_computation_operator_sig/module-type-Sig/Symbol/Shape/index.html +++ /dev/null @@ -1,5 +0,0 @@ - -Shape (owl-base.Owl_computation_operator_sig.Sig.Symbol.Shape)

Module Symbol.Shape

Core functions
val infer_shape : - Type.op -> - Type.attr Owl_graph.node array -> - int array option array

TODO

\ No newline at end of file diff --git a/owl-base/Owl_computation_operator_sig/module-type-Sig/Symbol/index.html b/owl-base/Owl_computation_operator_sig/module-type-Sig/Symbol/index.html deleted file mode 100644 index fd2281f2e..000000000 --- a/owl-base/Owl_computation_operator_sig/module-type-Sig/Symbol/index.html +++ /dev/null @@ -1,28 +0,0 @@ - -Symbol (owl-base.Owl_computation_operator_sig.Sig.Symbol)

Module Sig.Symbol

Core functions
val op_to_str : Shape.Type.op -> string

TODO

val is_random_variable : Shape.Type.op -> bool

TODO

val refnum : 'a Owl_graph.node -> int

TODO

val node_shape : Shape.Type.attr Owl_graph.node -> int array

TODO

val node_numel : Shape.Type.attr Owl_graph.node -> int

TODO

val is_shape_unknown : Shape.Type.attr Owl_graph.node -> bool

TODO

val infer_shape_graph : Shape.Type.attr Owl_graph.node array -> unit

TODO

val shape_to_str : int array option array -> string

TODO

val node_to_str : Shape.Type.attr Owl_graph.node -> string

TODO

val node_to_arr : Shape.Type.t -> Shape.Type.arr

TODO

val arr_to_node : Shape.Type.arr -> Shape.Type.t

TODO

val node_to_elt : Shape.Type.t -> Shape.Type.elt

TODO

val elt_to_node : Shape.Type.elt -> Shape.Type.t

TODO

val make_node : - ?name:string -> - ?value:Shape.Type.Device.value array -> - ?shape:int array option array -> - ?freeze:bool -> - ?reuse:bool -> - ?state:Shape.Type.state -> - Shape.Type.op -> - Shape.Type.attr Owl_graph.node

TODO

val make_then_connect : - ?shape:int array option array -> - Shape.Type.op -> - Shape.Type.attr Owl_graph.node array -> - Shape.Type.attr Owl_graph.node

TODO

val var_arr : ?shape:int array -> string -> Shape.Type.arr

TODO

val var_elt : string -> Shape.Type.elt

TODO

val const_arr : string -> Shape.Type.Device.A.arr -> Shape.Type.arr

TODO

val const_elt : string -> Shape.Type.Device.A.elt -> Shape.Type.elt

TODO

val new_block_id : unit -> int

new_block_id () returns an unused block id.

val make_empty_block : ?block_id:int -> int -> Shape.Type.block

make_empty_block s returns an empty block of memory of size s.

val make_value_block : - Shape.Type.Device.value -> - Shape.Type.attr Owl_graph.node -> - unit

make_value_block value node creates a block of memory initialised with value and links the new block to node.

get_block node returns the memory block allocated to node. If no block is allocated, throws an exception.

val add_node_to_block : - Shape.Type.attr Owl_graph.node -> - Shape.Type.block -> - unit

Link a node to a reusable block and initialises its memory on the memory of the block.

val get_active_node : Shape.Type.block -> Shape.Type.attr Owl_graph.node option

Return the node that is currently using the memory of the block.

val set_active_node : - Shape.Type.block -> - Shape.Type.attr Owl_graph.node -> - unit

Update the node that is currently using the block of memory.

val get_block_id : Shape.Type.attr Owl_graph.node -> int

get_block_id node returns the id of the block assigned to node. If node has not been assigned yet, returns -1.

val set_value : - Shape.Type.attr Owl_graph.node -> - Shape.Type.Device.value array -> - unit

TODO

val set_operator : Shape.Type.attr Owl_graph.node -> Shape.Type.op -> unit

TODO

TODO

val set_reuse : Shape.Type.attr Owl_graph.node -> bool -> unit

TODO

val get_reuse : Shape.Type.attr Owl_graph.node -> bool

TODO

val is_shared : Shape.Type.attr Owl_graph.node -> bool

TODO

val get_shared_nodes : - Shape.Type.attr Owl_graph.node -> - Shape.Type.attr Owl_graph.node array

get_shared_nodes node returns the nodes sharing the same block of memory as node.

val is_var : Shape.Type.attr Owl_graph.node -> bool

TODO

val is_const : Shape.Type.attr Owl_graph.node -> bool

TODO

val is_node_arr : Shape.Type.attr Owl_graph.node -> bool

TODO

val is_node_elt : Shape.Type.attr Owl_graph.node -> bool

TODO

val is_assigned : Shape.Type.attr Owl_graph.node -> bool

is_assigned node checks if a block of memory has been assigned to node.

val check_assigned : Shape.Type.attr Owl_graph.node -> unit

check_assigned node throws an exception if node has not been assigned to a block.

val is_valid : Shape.Type.attr Owl_graph.node -> bool

TODO

val validate : Shape.Type.attr Owl_graph.node -> unit

TODO

val invalidate : Shape.Type.attr Owl_graph.node -> unit

TODO

val invalidate_graph : Shape.Type.attr Owl_graph.node -> unit

TODO

val is_freeze : Shape.Type.attr Owl_graph.node -> bool

TODO

val freeze : Shape.Type.attr Owl_graph.node -> unit

TODO

val freeze_descendants : Shape.Type.attr Owl_graph.node array -> unit

TODO

val freeze_ancestors : Shape.Type.attr Owl_graph.node array -> unit

TODO

TODO

TODO

val unsafe_assign_arr : Shape.Type.arr -> Shape.Type.Device.A.arr -> unit

TODO

val assign_arr : Shape.Type.arr -> Shape.Type.Device.A.arr -> unit

TODO

val assign_elt : Shape.Type.elt -> Shape.Type.Device.A.elt -> unit

TODO

val float_to_elt : float -> Shape.Type.elt

TODO

val elt_to_float : Shape.Type.elt -> float

TODO

\ No newline at end of file diff --git a/owl-base/Owl_computation_operator_sig/module-type-Sig/index.html b/owl-base/Owl_computation_operator_sig/module-type-Sig/index.html deleted file mode 100644 index 3583bb837..000000000 --- a/owl-base/Owl_computation_operator_sig/module-type-Sig/index.html +++ /dev/null @@ -1,420 +0,0 @@ - -Sig (owl-base.Owl_computation_operator_sig.Sig)

Module type Owl_computation_operator_sig.Sig

Vectorised functions
val empty : int array -> Symbol.Shape.Type.arr

TODO

val zeros : int array -> Symbol.Shape.Type.arr

TODO

val ones : int array -> Symbol.Shape.Type.arr

TODO

val create : int array -> Symbol.Shape.Type.elt -> Symbol.Shape.Type.arr

TODO

val sequential : - ?a:Symbol.Shape.Type.elt -> - ?step:Symbol.Shape.Type.elt -> - int array -> - Symbol.Shape.Type.arr

TODO

val uniform : - ?a:Symbol.Shape.Type.elt -> - ?b:Symbol.Shape.Type.elt -> - int array -> - Symbol.Shape.Type.arr

TODO

val gaussian : - ?mu:Symbol.Shape.Type.elt -> - ?sigma:Symbol.Shape.Type.elt -> - int array -> - Symbol.Shape.Type.arr

TODO

val bernoulli : ?p:Symbol.Shape.Type.elt -> int array -> Symbol.Shape.Type.arr

TODO

val init : int array -> (int -> Symbol.Shape.Type.elt) -> Symbol.Shape.Type.arr

TODO

val init_nd : - int array -> - (int array -> Symbol.Shape.Type.elt) -> - Symbol.Shape.Type.arr

TODO

val shape : Symbol.Shape.Type.arr -> int array

TODO

val numel : Symbol.Shape.Type.arr -> int

TODO

TODO

val set : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.elt -> unit

TODO

val get_slice : int list list -> Symbol.Shape.Type.arr -> Symbol.Shape.Type.arr

TODO

val set_slice : - int list list -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - unit

TODO

TODO

val set_fancy : - Owl_types.index list -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - unit

TODO

val copy_ : out:'a -> 'b -> 'c

TODO

val reset : Symbol.Shape.Type.arr -> unit

TODO

val reshape : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.arr

TODO

val tile : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.arr

TODO

val repeat : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.arr

TODO

val pad : - ?v:Symbol.Shape.Type.elt -> - int list list -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val expand : ?hi:bool -> Symbol.Shape.Type.arr -> int -> Symbol.Shape.Type.arr

TODO

val squeeze : ?axis:int array -> Symbol.Shape.Type.arr -> Symbol.Shape.Type.arr

TODO

val concatenate : - ?axis:int -> - Symbol.Shape.Type.arr array -> - Symbol.Shape.Type.arr

TODO

val stack : ?axis:int -> Symbol.Shape.Type.arr array -> Symbol.Shape.Type.arr

TODO

TODO

val split : ?axis:int -> 'a -> 'b -> 'c

TODO

val draw : - ?axis:int -> - Symbol.Shape.Type.arr -> - int -> - Symbol.Shape.Type.arr * 'a array

TODO

TODO

delay f x returns f x. It allows to use a function that is not tracked by the computation graph and delay its evaluation. The output must have the same shape as the input.

delay_array out_shape f x works in the same way as delay but is applied on an array of ndarrays. Needs the shape of the output as an argument.

val lazy_print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(Symbol.Shape.Type.Device.A.elt -> string) -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

lazy_print x prints the output of x when it is evaluated. Is implemented as an identity node. For information about the optional parameters, refer to the print function of the Ndarray module.

val print : ?max_row:'a -> ?max_col:'b -> ?header:'c -> ?fmt:'d -> 'e -> unit

TODO

val min : - ?axis:int -> - ?keep_dims:bool -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val max : - ?axis:int -> - ?keep_dims:bool -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val sum : - ?axis:int -> - ?keep_dims:bool -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val sum_reduce : - ?axis:int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val log_sum_exp : - ?axis:int -> - ?keep_dims:bool -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

TODO

TODO

TODO

TODO

val elt_less_equal_scalar : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.elt -> - Symbol.Shape.Type.arr

TODO

val elt_greater_equal_scalar : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.elt -> - Symbol.Shape.Type.arr

TODO

val conv1d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr

TODO

val conv2d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr

TODO

val conv3d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr

TODO

val transpose_conv1d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr

TODO

val transpose_conv2d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr

TODO

val transpose_conv3d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr

TODO

val dilated_conv1d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val dilated_conv2d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val dilated_conv3d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val max_pool1d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val max_pool2d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val max_pool3d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val avg_pool1d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val avg_pool2d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val avg_pool3d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val upsampling2d : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.arr

TODO

val conv1d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val conv1d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val conv2d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val conv2d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val conv3d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val conv3d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val transpose_conv1d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val transpose_conv1d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val transpose_conv2d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val transpose_conv2d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val transpose_conv3d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val transpose_conv3d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val dilated_conv1d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val dilated_conv1d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val dilated_conv2d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val dilated_conv2d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val dilated_conv3d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val dilated_conv3d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val max_pool1d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val max_pool2d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val max_pool3d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val avg_pool1d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val avg_pool2d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val avg_pool3d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val upsampling2d_backward : - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val row_num : Symbol.Shape.Type.arr -> int

TODO

val col_num : Symbol.Shape.Type.arr -> int

TODO

val rows : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.arr

TODO

val copy_row_to : Symbol.Shape.Type.arr -> 'a -> 'b -> unit

TODO

val copy_col_to : Symbol.Shape.Type.arr -> 'a -> 'b -> unit

TODO

TODO

val transpose : - ?axis:int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val to_rows : Symbol.Shape.Type.arr -> 'a array

TODO

TODO

val to_cols : Symbol.Shape.Type.arr -> 'a array

TODO

TODO

val of_array : - Symbol.Shape.Type.elt array -> - int array -> - Symbol.Shape.Type.arr

TODO

val of_arrays : Symbol.Shape.Type.elt array array -> Symbol.Shape.Type.arr

TODO

val to_arrays : Symbol.Shape.Type.arr -> Symbol.Shape.Type.elt array array

TODO

Scalar functions
module Scalar : sig ... end
module Mat : sig ... end
module Linalg : sig ... end
\ No newline at end of file diff --git a/owl-base/Owl_computation_optimiser/.dummy b/owl-base/Owl_computation_optimiser/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl-base/Owl_computation_optimiser/Make/argument-1-Operator/Linalg/index.html b/owl-base/Owl_computation_optimiser/Make/argument-1-Operator/Linalg/index.html deleted file mode 100644 index 6cae3c041..000000000 --- a/owl-base/Owl_computation_optimiser/Make/argument-1-Operator/Linalg/index.html +++ /dev/null @@ -1,33 +0,0 @@ - -Linalg (owl-base.Owl_computation_optimiser.Make.Operator.Linalg)

Module Operator.Linalg

\ No newline at end of file diff --git a/owl-base/Owl_computation_optimiser/Make/argument-1-Operator/Mat/index.html b/owl-base/Owl_computation_optimiser/Make/argument-1-Operator/Mat/index.html deleted file mode 100644 index 59f8e418f..000000000 --- a/owl-base/Owl_computation_optimiser/Make/argument-1-Operator/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl-base.Owl_computation_optimiser.Make.Operator.Mat)

Module Operator.Mat

val eye : int -> Symbol.Shape.Type.arr

TODO

TODO

TODO

TODO

\ No newline at end of file diff --git a/owl-base/Owl_computation_optimiser/Make/argument-1-Operator/Scalar/index.html b/owl-base/Owl_computation_optimiser/Make/argument-1-Operator/Scalar/index.html deleted file mode 100644 index fea86f481..000000000 --- a/owl-base/Owl_computation_optimiser/Make/argument-1-Operator/Scalar/index.html +++ /dev/null @@ -1,20 +0,0 @@ - -Scalar (owl-base.Owl_computation_optimiser.Make.Operator.Scalar)

Module Operator.Scalar

\ No newline at end of file diff --git a/owl-base/Owl_computation_optimiser/Make/argument-1-Operator/Symbol/Shape/Type/Device/A/Linalg/index.html b/owl-base/Owl_computation_optimiser/Make/argument-1-Operator/Symbol/Shape/Type/Device/A/Linalg/index.html deleted file mode 100644 index 4b5ce4356..000000000 --- a/owl-base/Owl_computation_optimiser/Make/argument-1-Operator/Symbol/Shape/Type/Device/A/Linalg/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -Linalg (owl-base.Owl_computation_optimiser.Make.Operator.Symbol.Shape.Type.Device.A.Linalg)

Module A.Linalg

val inv : arr -> arr
val logdet : arr -> elt
val chol : ?upper:bool -> arr -> arr
val svd : ?thin:bool -> arr -> arr * arr * arr
val qr : arr -> arr * arr
val lq : arr -> arr * arr
val sylvester : arr -> arr -> arr -> arr
val lyapunov : arr -> arr -> arr
val discrete_lyapunov : - ?solver:[ `default | `bilinear | `direct ] -> - arr -> - arr -> - arr
val linsolve : ?trans:bool -> ?typ:[ `n | `u | `l ] -> arr -> arr -> arr
val care : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
val dare : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
\ No newline at end of file diff --git a/owl-base/Owl_computation_optimiser/Make/argument-1-Operator/Symbol/Shape/Type/Device/A/Mat/index.html b/owl-base/Owl_computation_optimiser/Make/argument-1-Operator/Symbol/Shape/Type/Device/A/Mat/index.html deleted file mode 100644 index cee4b2e95..000000000 --- a/owl-base/Owl_computation_optimiser/Make/argument-1-Operator/Symbol/Shape/Type/Device/A/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl-base.Owl_computation_optimiser.Make.Operator.Symbol.Shape.Type.Device.A.Mat)

Module A.Mat

val diagm : ?k:int -> arr -> arr
val triu : ?k:int -> arr -> arr
val tril : ?k:int -> arr -> arr
val eye : int -> arr
\ No newline at end of file diff --git a/owl-base/Owl_computation_optimiser/Make/argument-1-Operator/Symbol/Shape/Type/Device/A/Scalar/index.html b/owl-base/Owl_computation_optimiser/Make/argument-1-Operator/Symbol/Shape/Type/Device/A/Scalar/index.html deleted file mode 100644 index ad3f81e91..000000000 --- a/owl-base/Owl_computation_optimiser/Make/argument-1-Operator/Symbol/Shape/Type/Device/A/Scalar/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Scalar (owl-base.Owl_computation_optimiser.Make.Operator.Symbol.Shape.Type.Device.A.Scalar)

Module A.Scalar

val add : elt -> elt -> elt
val sub : elt -> elt -> elt
val mul : elt -> elt -> elt
val div : elt -> elt -> elt
val pow : elt -> elt -> elt
val atan2 : elt -> elt -> elt
val abs : elt -> elt
val neg : elt -> elt
val sqr : elt -> elt
val sqrt : elt -> elt
val exp : elt -> elt
val log : elt -> elt
val log2 : elt -> elt
val log10 : elt -> elt
val signum : elt -> elt
val floor : elt -> elt
val ceil : elt -> elt
val round : elt -> elt
val sin : elt -> elt
val cos : elt -> elt
val tan : elt -> elt
val sinh : elt -> elt
val cosh : elt -> elt
val tanh : elt -> elt
val asin : elt -> elt
val acos : elt -> elt
val atan : elt -> elt
val asinh : elt -> elt
val acosh : elt -> elt
val atanh : elt -> elt
val relu : elt -> elt
val dawsn : elt -> elt
val sigmoid : elt -> elt
\ No newline at end of file diff --git a/owl-base/Owl_computation_optimiser/Make/argument-1-Operator/Symbol/Shape/Type/Device/A/index.html b/owl-base/Owl_computation_optimiser/Make/argument-1-Operator/Symbol/Shape/Type/Device/A/index.html deleted file mode 100644 index 43e5c2919..000000000 --- a/owl-base/Owl_computation_optimiser/Make/argument-1-Operator/Symbol/Shape/Type/Device/A/index.html +++ /dev/null @@ -1,379 +0,0 @@ - -A (owl-base.Owl_computation_optimiser.Make.Operator.Symbol.Shape.Type.Device.A)

Module Device.A

include Owl_types_ndarray_algodiff.Sig
include Owl_types_ndarray_eltcmp.Sig
include Owl_types_ndarray_basic.Sig
type arr
type elt
val empty : int array -> arr
val zeros : int array -> arr
val ones : int array -> arr
val create : int array -> elt -> arr
val sequential : ?a:elt -> ?step:elt -> int array -> arr
val uniform : ?a:elt -> ?b:elt -> int array -> arr
val gaussian : ?mu:elt -> ?sigma:elt -> int array -> arr
val bernoulli : ?p:elt -> int array -> arr
val init : int array -> (int -> elt) -> arr
val init_nd : int array -> (int array -> elt) -> arr
val shape : arr -> int array
val numel : arr -> int
val get : arr -> int array -> elt
val set : arr -> int array -> elt -> unit
val get_slice : int list list -> arr -> arr
val set_slice : int list list -> arr -> arr -> unit
val get_fancy : Owl_types_common.index list -> arr -> arr
val set_fancy : Owl_types_common.index list -> arr -> arr -> unit
val copy : arr -> arr
val copy_ : out:arr -> arr -> unit
val reset : arr -> unit
val reshape : arr -> int array -> arr
val reverse : arr -> arr
val tile : arr -> int array -> arr
val repeat : arr -> int array -> arr
val concatenate : ?axis:int -> arr array -> arr
val stack : ?axis:int -> arr array -> arr
val split : ?axis:int -> int array -> arr -> arr array
val expand : ?hi:bool -> arr -> int -> arr
val squeeze : ?axis:int array -> arr -> arr
val draw : ?axis:int -> arr -> int -> arr * int array
val map : (elt -> elt) -> arr -> arr
val fold : ?axis:int -> (elt -> elt -> elt) -> elt -> arr -> arr
val scan : ?axis:int -> (elt -> elt -> elt) -> arr -> arr
val one_hot : int -> arr -> arr
val pad : ?v:elt -> int list list -> arr -> arr
val print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(elt -> string) -> - arr -> - unit
val abs : arr -> arr
val neg : arr -> arr
val floor : arr -> arr
val ceil : arr -> arr
val round : arr -> arr
val sqr : arr -> arr
val sqrt : arr -> arr
val log : arr -> arr
val log2 : arr -> arr
val log10 : arr -> arr
val exp : arr -> arr
val sin : arr -> arr
val cos : arr -> arr
val tan : arr -> arr
val sinh : arr -> arr
val cosh : arr -> arr
val tanh : arr -> arr
val asin : arr -> arr
val acos : arr -> arr
val atan : arr -> arr
val asinh : arr -> arr
val acosh : arr -> arr
val atanh : arr -> arr
val sum_reduce : ?axis:int array -> arr -> arr
val signum : arr -> arr
val sigmoid : arr -> arr
val relu : arr -> arr
val dawsn : arr -> arr
val min' : arr -> elt
val max' : arr -> elt
val sum' : arr -> elt
val log_sum_exp' : arr -> elt
val log_sum_exp : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm' : arr -> elt
val l2norm' : arr -> elt
val l2norm_sqr' : arr -> elt
val clip_by_value : ?amin:elt -> ?amax:elt -> arr -> arr
val clip_by_l2norm : elt -> arr -> arr
val pow : arr -> arr -> arr
val scalar_pow : elt -> arr -> arr
val pow_scalar : arr -> elt -> arr
val atan2 : arr -> arr -> arr
val scalar_atan2 : elt -> arr -> arr
val atan2_scalar : arr -> elt -> arr
val add : arr -> arr -> arr
val sub : arr -> arr -> arr
val mul : arr -> arr -> arr
val div : arr -> arr -> arr
val add_scalar : arr -> elt -> arr
val sub_scalar : arr -> elt -> arr
val mul_scalar : arr -> elt -> arr
val div_scalar : arr -> elt -> arr
val scalar_add : elt -> arr -> arr
val scalar_sub : elt -> arr -> arr
val scalar_mul : elt -> arr -> arr
val scalar_div : elt -> arr -> arr
val fma : arr -> arr -> arr -> arr
val conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val dilated_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val transpose_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val max_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val upsampling2d : arr -> int array -> arr
val conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val dilated_conv1d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv1d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val transpose_conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val max_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val upsampling2d_backward : arr -> int array -> arr -> arr
val row_num : arr -> int
val col_num : arr -> int
val row : arr -> int -> arr
val rows : arr -> int array -> arr
val copy_row_to : arr -> arr -> int -> unit
val copy_col_to : arr -> arr -> int -> unit
val dot : arr -> arr -> arr
val diag : ?k:int -> arr -> arr
val trace : arr -> elt
val transpose : ?axis:int array -> arr -> arr
val to_rows : arr -> arr array
val of_rows : arr array -> arr
val to_cols : arr -> arr array
val of_cols : arr array -> arr
val of_array : elt array -> int array -> arr
val of_arrays : elt array array -> arr
val float_to_elt : float -> elt
val elt_to_float : elt -> float
val elt_equal : arr -> arr -> arr
val elt_not_equal : arr -> arr -> arr
val elt_less : arr -> arr -> arr
val elt_greater : arr -> arr -> arr
val elt_less_equal : arr -> arr -> arr
val elt_greater_equal : arr -> arr -> arr
val elt_equal_scalar : arr -> elt -> arr
val elt_not_equal_scalar : arr -> elt -> arr
val elt_less_scalar : arr -> elt -> arr
val elt_greater_scalar : arr -> elt -> arr
val elt_less_equal_scalar : arr -> elt -> arr
val elt_greater_equal_scalar : arr -> elt -> arr
module Scalar : sig ... end
module Mat : sig ... end
module Linalg : sig ... end
val create_ : out:arr -> elt -> unit
val uniform_ : ?a:elt -> ?b:elt -> out:arr -> unit
val gaussian_ : ?mu:elt -> ?sigma:elt -> out:arr -> unit
val sequential_ : ?a:elt -> ?step:elt -> out:arr -> unit
val bernoulli_ : ?p:elt -> out:arr -> unit
val zeros_ : out:arr -> unit
val ones_ : out:arr -> unit
val one_hot_ : out:arr -> int -> arr -> unit
val get_slice_ : out:arr -> int list list -> arr -> unit
val set_slice_ : out:arr -> int list list -> arr -> arr -> unit
val sub_left : arr -> int -> int -> arr
val reshape_ : out:arr -> arr -> unit
val reverse_ : out:arr -> arr -> unit
val transpose_ : out:arr -> ?axis:int array -> arr -> unit
val repeat_ : out:arr -> arr -> int array -> unit
val tile_ : out:arr -> arr -> int array -> unit
val pad_ : out:arr -> ?v:elt -> int list list -> arr -> unit
val hypot : arr -> arr -> arr
val fmod : arr -> arr -> arr
val min2 : arr -> arr -> arr
val max2 : arr -> arr -> arr
val add_ : ?out:arr -> arr -> arr -> unit
val sub_ : ?out:arr -> arr -> arr -> unit
val mul_ : ?out:arr -> arr -> arr -> unit
val div_ : ?out:arr -> arr -> arr -> unit
val pow_ : ?out:arr -> arr -> arr -> unit
val atan2_ : ?out:arr -> arr -> arr -> unit
val hypot_ : ?out:arr -> arr -> arr -> unit
val fmod_ : ?out:arr -> arr -> arr -> unit
val min2_ : ?out:arr -> arr -> arr -> unit
val max2_ : ?out:arr -> arr -> arr -> unit
val add_scalar_ : ?out:arr -> arr -> elt -> unit
val sub_scalar_ : ?out:arr -> arr -> elt -> unit
val mul_scalar_ : ?out:arr -> arr -> elt -> unit
val div_scalar_ : ?out:arr -> arr -> elt -> unit
val pow_scalar_ : ?out:arr -> arr -> elt -> unit
val atan2_scalar_ : ?out:arr -> arr -> elt -> unit
val fmod_scalar_ : ?out:arr -> arr -> elt -> unit
val scalar_add_ : ?out:arr -> elt -> arr -> unit
val scalar_sub_ : ?out:arr -> elt -> arr -> unit
val scalar_mul_ : ?out:arr -> elt -> arr -> unit
val scalar_div_ : ?out:arr -> elt -> arr -> unit
val scalar_pow_ : ?out:arr -> elt -> arr -> unit
val scalar_atan2_ : ?out:arr -> elt -> arr -> unit
val scalar_fmod_ : ?out:arr -> elt -> arr -> unit
val fma_ : ?out:arr -> arr -> arr -> arr -> unit
val clip_by_value_ : ?out:arr -> ?amin:elt -> ?amax:elt -> arr -> unit
val clip_by_l2norm_ : ?out:arr -> elt -> arr -> unit
val dot_ : - ?transa:bool -> - ?transb:bool -> - ?alpha:elt -> - ?beta:elt -> - c:arr -> - arr -> - arr -> - unit
val abs_ : ?out:arr -> arr -> unit
val neg_ : ?out:arr -> arr -> unit
val conj_ : ?out:arr -> arr -> unit
val reci_ : ?out:arr -> arr -> unit
val signum_ : ?out:arr -> arr -> unit
val sqr_ : ?out:arr -> arr -> unit
val sqrt_ : ?out:arr -> arr -> unit
val cbrt_ : ?out:arr -> arr -> unit
val exp_ : ?out:arr -> arr -> unit
val exp2_ : ?out:arr -> arr -> unit
val exp10_ : ?out:arr -> arr -> unit
val expm1_ : ?out:arr -> arr -> unit
val log_ : ?out:arr -> arr -> unit
val log2_ : ?out:arr -> arr -> unit
val log10_ : ?out:arr -> arr -> unit
val log1p_ : ?out:arr -> arr -> unit
val sin_ : ?out:arr -> arr -> unit
val cos_ : ?out:arr -> arr -> unit
val tan_ : ?out:arr -> arr -> unit
val asin_ : ?out:arr -> arr -> unit
val acos_ : ?out:arr -> arr -> unit
val atan_ : ?out:arr -> arr -> unit
val sinh_ : ?out:arr -> arr -> unit
val cosh_ : ?out:arr -> arr -> unit
val tanh_ : ?out:arr -> arr -> unit
val asinh_ : ?out:arr -> arr -> unit
val acosh_ : ?out:arr -> arr -> unit
val atanh_ : ?out:arr -> arr -> unit
val floor_ : ?out:arr -> arr -> unit
val ceil_ : ?out:arr -> arr -> unit
val round_ : ?out:arr -> arr -> unit
val trunc_ : ?out:arr -> arr -> unit
val fix_ : ?out:arr -> arr -> unit
val erf_ : ?out:arr -> arr -> unit
val erfc_ : ?out:arr -> arr -> unit
val relu_ : ?out:arr -> arr -> unit
val softplus_ : ?out:arr -> arr -> unit
val softsign_ : ?out:arr -> arr -> unit
val softmax_ : ?out:arr -> ?axis:int -> arr -> unit
val sigmoid_ : ?out:arr -> arr -> unit
val sum_ : out:arr -> axis:int -> arr -> unit
val min_ : out:arr -> axis:int -> arr -> unit
val max_ : out:arr -> axis:int -> arr -> unit
val sum : ?axis:int -> ?keep_dims:bool -> arr -> arr
val prod : ?axis:int -> ?keep_dims:bool -> arr -> arr
val min : ?axis:int -> ?keep_dims:bool -> arr -> arr
val max : ?axis:int -> ?keep_dims:bool -> arr -> arr
val mean : ?axis:int -> ?keep_dims:bool -> arr -> arr
val var : ?axis:int -> ?keep_dims:bool -> arr -> arr
val std : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l2norm : ?axis:int -> ?keep_dims:bool -> arr -> arr
val cumsum_ : ?out:arr -> ?axis:int -> arr -> unit
val cumprod_ : ?out:arr -> ?axis:int -> arr -> unit
val cummin_ : ?out:arr -> ?axis:int -> arr -> unit
val cummax_ : ?out:arr -> ?axis:int -> arr -> unit
val dropout_ : ?out:arr -> ?rate:float -> arr -> unit
val prod' : arr -> elt
val mean' : arr -> elt
val var' : arr -> elt
val std' : arr -> elt
val elt_equal_ : ?out:arr -> arr -> arr -> unit
val elt_not_equal_ : ?out:arr -> arr -> arr -> unit
val elt_less_ : ?out:arr -> arr -> arr -> unit
val elt_greater_ : ?out:arr -> arr -> arr -> unit
val elt_less_equal_ : ?out:arr -> arr -> arr -> unit
val elt_greater_equal_ : ?out:arr -> arr -> arr -> unit
val elt_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_not_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_less_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_greater_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_less_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_greater_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val conv1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val conv2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val conv3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val dilated_conv1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val dilated_conv2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val dilated_conv3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val transpose_conv1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val transpose_conv2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val transpose_conv3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val max_pool1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val max_pool2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val max_pool3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val upsampling2d_ : out:arr -> arr -> int array -> unit
val conv1d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv1d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv2d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv2d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv3d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv3d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val dilated_conv1d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv1d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv2d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv2d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv3d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv3d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val transpose_conv1d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv1d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv2d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv2d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv3d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv3d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val max_pool1d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val max_pool2d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val max_pool3d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool1d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool2d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool3d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val upsampling2d_backward_ : out:arr -> arr -> int array -> arr -> unit
val fused_adagrad_ : ?out:arr -> rate:float -> eps:float -> arr -> unit
\ No newline at end of file diff --git a/owl-base/Owl_computation_optimiser/Make/argument-1-Operator/Symbol/Shape/Type/Device/index.html b/owl-base/Owl_computation_optimiser/Make/argument-1-Operator/Symbol/Shape/Type/Device/index.html deleted file mode 100644 index 446541cb3..000000000 --- a/owl-base/Owl_computation_optimiser/Make/argument-1-Operator/Symbol/Shape/Type/Device/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Device (owl-base.Owl_computation_optimiser.Make.Operator.Symbol.Shape.Type.Device)

Module Type.Device

Type definition
type device

TODO

type value

TODO

Core functions
val make_device : unit -> device

TODO

val arr_to_value : A.arr -> value

TODO

val value_to_arr : value -> A.arr

TODO

val elt_to_value : A.elt -> value

TODO

val value_to_elt : value -> A.elt

TODO

val value_to_float : value -> float

TODO

val is_arr : value -> bool

TODO

val is_elt : value -> bool

TODO

\ No newline at end of file diff --git a/owl-base/Owl_computation_optimiser/Make/argument-1-Operator/Symbol/Shape/Type/index.html b/owl-base/Owl_computation_optimiser/Make/argument-1-Operator/Symbol/Shape/Type/index.html deleted file mode 100644 index bff0d1b6d..000000000 --- a/owl-base/Owl_computation_optimiser/Make/argument-1-Operator/Symbol/Shape/Type/index.html +++ /dev/null @@ -1,5 +0,0 @@ - -Type (owl-base.Owl_computation_optimiser.Make.Operator.Symbol.Shape.Type)

Module Shape.Type

Type definition
type state =
  1. | Valid
  2. | Invalid
    (*

    TODO

    *)

TODO

and block = {
  1. size : int;
  2. block_id : int;
  3. mutable active : t option;
  4. mutable memory : Device.value;
  5. mutable nodes : t list;
}

block type keeps a reference to a block of memory and to the nodes sharing that block.

and attr = {
  1. mutable op : op;
  2. mutable freeze : bool;
  3. mutable reuse : bool;
  4. mutable state : state;
  5. mutable shape : int array option array;
  6. mutable value : Device.value array;
  7. mutable block : block array option;
}

TODO

and arr =
  1. | Arr of t
and elt =
  1. | Elt of t
and op =
  1. | Noop
  2. | Var
  3. | Const
  4. | Empty of int array
  5. | Zeros of int array
  6. | Ones of int array
  7. | Create of int array
  8. | Sequential of int array
  9. | Uniform of int array
  10. | Gaussian of int array
  11. | Bernoulli of int array
  12. | Init of int array * int -> elt
  13. | Get of int array
  14. | Set of int array
  15. | GetSlice of int list list
  16. | SetSlice of int list list
  17. | GetFancy of Owl_types_common.index list
  18. | SetFancy of Owl_types_common.index list
  19. | Copy
  20. | Reset
  21. | Reshape of int array
  22. | Reverse
  23. | Tile of int array
  24. | Repeat of int array
  25. | Pad of elt * int list list
  26. | Concatenate of int
  27. | Stack of int
  28. | Split of int * int array
  29. | Draw of int * int
  30. | Map of elt -> elt
  31. | Fold of int * elt -> elt -> elt
  32. | Scan of int * elt -> elt -> elt
  33. | OneHot of int
  34. | OfArray of int array
  35. | Delay of Device.A.arr -> Device.A.arr
  36. | DelayArray of int array * Device.A.arr array -> Device.A.arr
  37. | LazyPrint of int option - * int option - * bool option - * (Device.A.elt -> string) option
  38. | Abs
  39. | Neg
  40. | Floor
  41. | Ceil
  42. | Round
  43. | Sqr
  44. | Sqrt
  45. | Log
  46. | Log2
  47. | Log10
  48. | Exp
  49. | Sin
  50. | Cos
  51. | Tan
  52. | Sinh
  53. | Cosh
  54. | Tanh
  55. | Asin
  56. | Acos
  57. | Atan
  58. | Asinh
  59. | Acosh
  60. | Atanh
  61. | Min of bool * int
  62. | Max of bool * int
  63. | Sum of bool * int
  64. | SumReduce of int array
  65. | Signum
  66. | Sigmoid
  67. | Relu
  68. | Dawsn
  69. | Min'
  70. | Max'
  71. | Sum'
  72. | LogSumExp'
  73. | LogSumExp of bool * int
  74. | L1norm'
  75. | L2norm'
  76. | L2NormSqr'
  77. | ClipByValue
  78. | ClipByL2norm
  79. | Pow
  80. | ScalarPow
  81. | PowScalar
  82. | Atan2
  83. | ScalarAtan2
  84. | Atan2Scalar
  85. | Hypot
  86. | Min2
  87. | Max2
  88. | Add
  89. | Sub
  90. | Mul
  91. | Div
  92. | AddScalar
  93. | SubScalar
  94. | MulScalar
  95. | DivScalar
  96. | ScalarAdd
  97. | ScalarSub
  98. | ScalarMul
  99. | ScalarDiv
  100. | FMA
  101. | EltEqual
  102. | EltNotEqual
  103. | EltLess
  104. | EltGreater
  105. | EltLessEqual
  106. | EltGreaterEqual
  107. | EltEqualScalar
  108. | EltNotEqualScalar
  109. | EltLessScalar
  110. | EltGreaterScalar
  111. | EltLessEqualScalar
  112. | EltGreaterEqualScalar
  113. | Conv1d of Owl_types_common.padding * int array
  114. | Conv2d of Owl_types_common.padding * int array
  115. | Conv3d of Owl_types_common.padding * int array
  116. | TransposeConv1d of Owl_types_common.padding * int array
  117. | TransposeConv2d of Owl_types_common.padding * int array
  118. | TransposeConv3d of Owl_types_common.padding * int array
  119. | DilatedConv1d of Owl_types_common.padding * int array * int array
  120. | DilatedConv2d of Owl_types_common.padding * int array * int array
  121. | DilatedConv3d of Owl_types_common.padding * int array * int array
  122. | MaxPool1d of Owl_types_common.padding * int array * int array
  123. | MaxPool2d of Owl_types_common.padding * int array * int array
  124. | MaxPool3d of Owl_types_common.padding * int array * int array
  125. | AvgPool1d of Owl_types_common.padding * int array * int array
  126. | AvgPool2d of Owl_types_common.padding * int array * int array
  127. | AvgPool3d of Owl_types_common.padding * int array * int array
  128. | UpSampling2d of int array
  129. | Conv1dBackwardInput of int array
  130. | Conv1dBackwardKernel of int array
  131. | Conv2dBackwardInput of int array
  132. | Conv2dBackwardKernel of int array
  133. | Conv3dBackwardInput of int array
  134. | Conv3dBackwardKernel of int array
  135. | TransposeConv1dBackwardInput of int array
  136. | TransposeConv1dBackwardKernel of int array
  137. | TransposeConv2dBackwardInput of int array
  138. | TransposeConv2dBackwardKernel of int array
  139. | TransposeConv3dBackwardInput of int array
  140. | TransposeConv3dBackwardKernel of int array
  141. | DilatedConv1dBackwardInput of int array * int array
  142. | DilatedConv1dBackwardKernel of int array * int array
  143. | DilatedConv2dBackwardInput of int array * int array
  144. | DilatedConv2dBackwardKernel of int array * int array
  145. | DilatedConv3dBackwardInput of int array * int array
  146. | DilatedConv3dBackwardKernel of int array * int array
  147. | MaxPool1dBackward of Owl_types_common.padding * int array * int array
  148. | MaxPool2dBackward of Owl_types_common.padding * int array * int array
  149. | MaxPool3dBackward of Owl_types_common.padding * int array * int array
  150. | AvgPool1dBackward of Owl_types_common.padding * int array * int array
  151. | AvgPool2dBackward of Owl_types_common.padding * int array * int array
  152. | AvgPool3dBackward of Owl_types_common.padding * int array * int array
  153. | UpSampling2dBackward of int array
  154. | RowNum
  155. | ColNum
  156. | Row
  157. | Rows of int array
  158. | CopyRowTo
  159. | CopyColTo
  160. | Dot of bool * bool * elt * elt
  161. | Inv
  162. | Trace
  163. | Transpose of int array
  164. | ToRows
  165. | OfRows
  166. | Scalar_Add
  167. | Scalar_Sub
  168. | Scalar_Mul
  169. | Scalar_Div
  170. | Scalar_Pow
  171. | Scalar_Atan2
  172. | Scalar_Abs
  173. | Scalar_Neg
  174. | Scalar_Sqr
  175. | Scalar_Sqrt
  176. | Scalar_Exp
  177. | Scalar_Log
  178. | Scalar_Log2
  179. | Scalar_Log10
  180. | Scalar_Signum
  181. | Scalar_Floor
  182. | Scalar_Ceil
  183. | Scalar_Round
  184. | Scalar_Sin
  185. | Scalar_Cos
  186. | Scalar_Tan
  187. | Scalar_Sinh
  188. | Scalar_Cosh
  189. | Scalar_Tanh
  190. | Scalar_Asin
  191. | Scalar_Acos
  192. | Scalar_Atan
  193. | Scalar_Asinh
  194. | Scalar_Acosh
  195. | Scalar_Atanh
  196. | Scalar_Relu
  197. | Scalar_Dawsn
  198. | Scalar_Sigmoid
  199. | Fused_Adagrad of float * float
    (*

    TODO

    *)
\ No newline at end of file diff --git a/owl-base/Owl_computation_optimiser/Make/argument-1-Operator/Symbol/Shape/index.html b/owl-base/Owl_computation_optimiser/Make/argument-1-Operator/Symbol/Shape/index.html deleted file mode 100644 index 50091dd4f..000000000 --- a/owl-base/Owl_computation_optimiser/Make/argument-1-Operator/Symbol/Shape/index.html +++ /dev/null @@ -1,5 +0,0 @@ - -Shape (owl-base.Owl_computation_optimiser.Make.Operator.Symbol.Shape)

Module Symbol.Shape

Core functions
val infer_shape : - Type.op -> - Type.attr Owl_graph.node array -> - int array option array

TODO

\ No newline at end of file diff --git a/owl-base/Owl_computation_optimiser/Make/argument-1-Operator/Symbol/index.html b/owl-base/Owl_computation_optimiser/Make/argument-1-Operator/Symbol/index.html deleted file mode 100644 index 0c0572955..000000000 --- a/owl-base/Owl_computation_optimiser/Make/argument-1-Operator/Symbol/index.html +++ /dev/null @@ -1,28 +0,0 @@ - -Symbol (owl-base.Owl_computation_optimiser.Make.Operator.Symbol)

Module Operator.Symbol

Core functions
val op_to_str : Shape.Type.op -> string

TODO

val is_random_variable : Shape.Type.op -> bool

TODO

val refnum : 'a Owl_graph.node -> int

TODO

val node_shape : Shape.Type.attr Owl_graph.node -> int array

TODO

val node_numel : Shape.Type.attr Owl_graph.node -> int

TODO

val is_shape_unknown : Shape.Type.attr Owl_graph.node -> bool

TODO

val infer_shape_graph : Shape.Type.attr Owl_graph.node array -> unit

TODO

val shape_to_str : int array option array -> string

TODO

val node_to_str : Shape.Type.attr Owl_graph.node -> string

TODO

val node_to_arr : Shape.Type.t -> Shape.Type.arr

TODO

val arr_to_node : Shape.Type.arr -> Shape.Type.t

TODO

val node_to_elt : Shape.Type.t -> Shape.Type.elt

TODO

val elt_to_node : Shape.Type.elt -> Shape.Type.t

TODO

val make_node : - ?name:string -> - ?value:Shape.Type.Device.value array -> - ?shape:int array option array -> - ?freeze:bool -> - ?reuse:bool -> - ?state:Shape.Type.state -> - Shape.Type.op -> - Shape.Type.attr Owl_graph.node

TODO

val make_then_connect : - ?shape:int array option array -> - Shape.Type.op -> - Shape.Type.attr Owl_graph.node array -> - Shape.Type.attr Owl_graph.node

TODO

val var_arr : ?shape:int array -> string -> Shape.Type.arr

TODO

val var_elt : string -> Shape.Type.elt

TODO

val const_arr : string -> Shape.Type.Device.A.arr -> Shape.Type.arr

TODO

val const_elt : string -> Shape.Type.Device.A.elt -> Shape.Type.elt

TODO

val new_block_id : unit -> int

new_block_id () returns an unused block id.

val make_empty_block : ?block_id:int -> int -> Shape.Type.block

make_empty_block s returns an empty block of memory of size s.

val make_value_block : - Shape.Type.Device.value -> - Shape.Type.attr Owl_graph.node -> - unit

make_value_block value node creates a block of memory initialised with value and links the new block to node.

get_block node returns the memory block allocated to node. If no block is allocated, throws an exception.

val add_node_to_block : - Shape.Type.attr Owl_graph.node -> - Shape.Type.block -> - unit

Link a node to a reusable block and initialises its memory on the memory of the block.

val get_active_node : Shape.Type.block -> Shape.Type.attr Owl_graph.node option

Return the node that is currently using the memory of the block.

val set_active_node : - Shape.Type.block -> - Shape.Type.attr Owl_graph.node -> - unit

Update the node that is currently using the block of memory.

val get_block_id : Shape.Type.attr Owl_graph.node -> int

get_block_id node returns the id of the block assigned to node. If node has not been assigned yet, returns -1.

val set_value : - Shape.Type.attr Owl_graph.node -> - Shape.Type.Device.value array -> - unit

TODO

val set_operator : Shape.Type.attr Owl_graph.node -> Shape.Type.op -> unit

TODO

TODO

val set_reuse : Shape.Type.attr Owl_graph.node -> bool -> unit

TODO

val get_reuse : Shape.Type.attr Owl_graph.node -> bool

TODO

val is_shared : Shape.Type.attr Owl_graph.node -> bool

TODO

val get_shared_nodes : - Shape.Type.attr Owl_graph.node -> - Shape.Type.attr Owl_graph.node array

get_shared_nodes node returns the nodes sharing the same block of memory as node.

val is_var : Shape.Type.attr Owl_graph.node -> bool

TODO

val is_const : Shape.Type.attr Owl_graph.node -> bool

TODO

val is_node_arr : Shape.Type.attr Owl_graph.node -> bool

TODO

val is_node_elt : Shape.Type.attr Owl_graph.node -> bool

TODO

val is_assigned : Shape.Type.attr Owl_graph.node -> bool

is_assigned node checks if a block of memory has been assigned to node.

val check_assigned : Shape.Type.attr Owl_graph.node -> unit

check_assigned node throws an exception if node has not been assigned to a block.

val is_valid : Shape.Type.attr Owl_graph.node -> bool

TODO

val validate : Shape.Type.attr Owl_graph.node -> unit

TODO

val invalidate : Shape.Type.attr Owl_graph.node -> unit

TODO

val invalidate_graph : Shape.Type.attr Owl_graph.node -> unit

TODO

val is_freeze : Shape.Type.attr Owl_graph.node -> bool

TODO

val freeze : Shape.Type.attr Owl_graph.node -> unit

TODO

val freeze_descendants : Shape.Type.attr Owl_graph.node array -> unit

TODO

val freeze_ancestors : Shape.Type.attr Owl_graph.node array -> unit

TODO

TODO

TODO

val unsafe_assign_arr : Shape.Type.arr -> Shape.Type.Device.A.arr -> unit

TODO

val assign_arr : Shape.Type.arr -> Shape.Type.Device.A.arr -> unit

TODO

val assign_elt : Shape.Type.elt -> Shape.Type.Device.A.elt -> unit

TODO

val float_to_elt : float -> Shape.Type.elt

TODO

val elt_to_float : Shape.Type.elt -> float

TODO

\ No newline at end of file diff --git a/owl-base/Owl_computation_optimiser/Make/argument-1-Operator/index.html b/owl-base/Owl_computation_optimiser/Make/argument-1-Operator/index.html deleted file mode 100644 index 4aa24105b..000000000 --- a/owl-base/Owl_computation_optimiser/Make/argument-1-Operator/index.html +++ /dev/null @@ -1,420 +0,0 @@ - -Operator (owl-base.Owl_computation_optimiser.Make.Operator)

Parameter Make.Operator

Vectorised functions
val empty : int array -> Symbol.Shape.Type.arr

TODO

val zeros : int array -> Symbol.Shape.Type.arr

TODO

val ones : int array -> Symbol.Shape.Type.arr

TODO

val create : int array -> Symbol.Shape.Type.elt -> Symbol.Shape.Type.arr

TODO

val sequential : - ?a:Symbol.Shape.Type.elt -> - ?step:Symbol.Shape.Type.elt -> - int array -> - Symbol.Shape.Type.arr

TODO

val uniform : - ?a:Symbol.Shape.Type.elt -> - ?b:Symbol.Shape.Type.elt -> - int array -> - Symbol.Shape.Type.arr

TODO

val gaussian : - ?mu:Symbol.Shape.Type.elt -> - ?sigma:Symbol.Shape.Type.elt -> - int array -> - Symbol.Shape.Type.arr

TODO

val bernoulli : ?p:Symbol.Shape.Type.elt -> int array -> Symbol.Shape.Type.arr

TODO

val init : int array -> (int -> Symbol.Shape.Type.elt) -> Symbol.Shape.Type.arr

TODO

val init_nd : - int array -> - (int array -> Symbol.Shape.Type.elt) -> - Symbol.Shape.Type.arr

TODO

val shape : Symbol.Shape.Type.arr -> int array

TODO

val numel : Symbol.Shape.Type.arr -> int

TODO

TODO

val set : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.elt -> unit

TODO

val get_slice : int list list -> Symbol.Shape.Type.arr -> Symbol.Shape.Type.arr

TODO

val set_slice : - int list list -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - unit

TODO

TODO

val set_fancy : - Owl_types.index list -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - unit

TODO

val copy_ : out:'a -> 'b -> 'c

TODO

val reset : Symbol.Shape.Type.arr -> unit

TODO

val reshape : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.arr

TODO

val tile : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.arr

TODO

val repeat : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.arr

TODO

val pad : - ?v:Symbol.Shape.Type.elt -> - int list list -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val expand : ?hi:bool -> Symbol.Shape.Type.arr -> int -> Symbol.Shape.Type.arr

TODO

val squeeze : ?axis:int array -> Symbol.Shape.Type.arr -> Symbol.Shape.Type.arr

TODO

val concatenate : - ?axis:int -> - Symbol.Shape.Type.arr array -> - Symbol.Shape.Type.arr

TODO

val stack : ?axis:int -> Symbol.Shape.Type.arr array -> Symbol.Shape.Type.arr

TODO

TODO

val split : ?axis:int -> 'a -> 'b -> 'c

TODO

val draw : - ?axis:int -> - Symbol.Shape.Type.arr -> - int -> - Symbol.Shape.Type.arr * 'a array

TODO

TODO

delay f x returns f x. It allows to use a function that is not tracked by the computation graph and delay its evaluation. The output must have the same shape as the input.

delay_array out_shape f x works in the same way as delay but is applied on an array of ndarrays. Needs the shape of the output as an argument.

val lazy_print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(Symbol.Shape.Type.Device.A.elt -> string) -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

lazy_print x prints the output of x when it is evaluated. Is implemented as an identity node. For information about the optional parameters, refer to the print function of the Ndarray module.

val print : ?max_row:'a -> ?max_col:'b -> ?header:'c -> ?fmt:'d -> 'e -> unit

TODO

val min : - ?axis:int -> - ?keep_dims:bool -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val max : - ?axis:int -> - ?keep_dims:bool -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val sum : - ?axis:int -> - ?keep_dims:bool -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val sum_reduce : - ?axis:int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val log_sum_exp : - ?axis:int -> - ?keep_dims:bool -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

TODO

TODO

TODO

TODO

val elt_less_equal_scalar : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.elt -> - Symbol.Shape.Type.arr

TODO

val elt_greater_equal_scalar : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.elt -> - Symbol.Shape.Type.arr

TODO

val conv1d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr

TODO

val conv2d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr

TODO

val conv3d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr

TODO

val transpose_conv1d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr

TODO

val transpose_conv2d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr

TODO

val transpose_conv3d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr

TODO

val dilated_conv1d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val dilated_conv2d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val dilated_conv3d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val max_pool1d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val max_pool2d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val max_pool3d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val avg_pool1d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val avg_pool2d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val avg_pool3d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val upsampling2d : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.arr

TODO

val conv1d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val conv1d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val conv2d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val conv2d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val conv3d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val conv3d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val transpose_conv1d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val transpose_conv1d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val transpose_conv2d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val transpose_conv2d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val transpose_conv3d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val transpose_conv3d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val dilated_conv1d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val dilated_conv1d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val dilated_conv2d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val dilated_conv2d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val dilated_conv3d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val dilated_conv3d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val max_pool1d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val max_pool2d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val max_pool3d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val avg_pool1d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val avg_pool2d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val avg_pool3d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val upsampling2d_backward : - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val row_num : Symbol.Shape.Type.arr -> int

TODO

val col_num : Symbol.Shape.Type.arr -> int

TODO

val rows : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.arr

TODO

val copy_row_to : Symbol.Shape.Type.arr -> 'a -> 'b -> unit

TODO

val copy_col_to : Symbol.Shape.Type.arr -> 'a -> 'b -> unit

TODO

TODO

val transpose : - ?axis:int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val to_rows : Symbol.Shape.Type.arr -> 'a array

TODO

TODO

val to_cols : Symbol.Shape.Type.arr -> 'a array

TODO

TODO

val of_array : - Symbol.Shape.Type.elt array -> - int array -> - Symbol.Shape.Type.arr

TODO

val of_arrays : Symbol.Shape.Type.elt array array -> Symbol.Shape.Type.arr

TODO

val to_arrays : Symbol.Shape.Type.arr -> Symbol.Shape.Type.elt array array

TODO

Scalar functions
module Scalar : sig ... end
module Mat : sig ... end
module Linalg : sig ... end
\ No newline at end of file diff --git a/owl-base/Owl_computation_optimiser/Make/index.html b/owl-base/Owl_computation_optimiser/Make/index.html deleted file mode 100644 index 5d7d3fae6..000000000 --- a/owl-base/Owl_computation_optimiser/Make/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Make (owl-base.Owl_computation_optimiser.Make)

Module Owl_computation_optimiser.Make

Parameters

Signature

module Operator = Operator
val _optimise_term : Operator.Symbol.Shape.Type.attr Owl_graph.node -> unit
val pattern_011 : Operator.Symbol.Shape.Type.op -> float -> float -> float
val pattern_013 : Operator.Symbol.Shape.Type.op -> float -> float
val pattern_021 : 'a -> 'b
val estimate_complexity : 'a Owl_graph.node array -> int * int
val optimise_nodes : - Operator.Symbol.Shape.Type.attr Owl_graph.node array -> - unit
\ No newline at end of file diff --git a/owl-base/Owl_computation_optimiser_sig/.dummy b/owl-base/Owl_computation_optimiser_sig/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl-base/Owl_computation_optimiser_sig/module-type-Sig/Operator/Linalg/index.html b/owl-base/Owl_computation_optimiser_sig/module-type-Sig/Operator/Linalg/index.html deleted file mode 100644 index 1e6999795..000000000 --- a/owl-base/Owl_computation_optimiser_sig/module-type-Sig/Operator/Linalg/index.html +++ /dev/null @@ -1,33 +0,0 @@ - -Linalg (owl-base.Owl_computation_optimiser_sig.Sig.Operator.Linalg)

Module Operator.Linalg

\ No newline at end of file diff --git a/owl-base/Owl_computation_optimiser_sig/module-type-Sig/Operator/Mat/index.html b/owl-base/Owl_computation_optimiser_sig/module-type-Sig/Operator/Mat/index.html deleted file mode 100644 index 079e43721..000000000 --- a/owl-base/Owl_computation_optimiser_sig/module-type-Sig/Operator/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl-base.Owl_computation_optimiser_sig.Sig.Operator.Mat)

Module Operator.Mat

val eye : int -> Symbol.Shape.Type.arr

TODO

TODO

TODO

TODO

\ No newline at end of file diff --git a/owl-base/Owl_computation_optimiser_sig/module-type-Sig/Operator/Scalar/index.html b/owl-base/Owl_computation_optimiser_sig/module-type-Sig/Operator/Scalar/index.html deleted file mode 100644 index ee19378b6..000000000 --- a/owl-base/Owl_computation_optimiser_sig/module-type-Sig/Operator/Scalar/index.html +++ /dev/null @@ -1,20 +0,0 @@ - -Scalar (owl-base.Owl_computation_optimiser_sig.Sig.Operator.Scalar)

Module Operator.Scalar

\ No newline at end of file diff --git a/owl-base/Owl_computation_optimiser_sig/module-type-Sig/Operator/Symbol/Shape/Type/Device/A/Linalg/index.html b/owl-base/Owl_computation_optimiser_sig/module-type-Sig/Operator/Symbol/Shape/Type/Device/A/Linalg/index.html deleted file mode 100644 index 85185fe9e..000000000 --- a/owl-base/Owl_computation_optimiser_sig/module-type-Sig/Operator/Symbol/Shape/Type/Device/A/Linalg/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -Linalg (owl-base.Owl_computation_optimiser_sig.Sig.Operator.Symbol.Shape.Type.Device.A.Linalg)

Module A.Linalg

val inv : arr -> arr
val logdet : arr -> elt
val chol : ?upper:bool -> arr -> arr
val svd : ?thin:bool -> arr -> arr * arr * arr
val qr : arr -> arr * arr
val lq : arr -> arr * arr
val sylvester : arr -> arr -> arr -> arr
val lyapunov : arr -> arr -> arr
val discrete_lyapunov : - ?solver:[ `default | `bilinear | `direct ] -> - arr -> - arr -> - arr
val linsolve : ?trans:bool -> ?typ:[ `n | `u | `l ] -> arr -> arr -> arr
val care : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
val dare : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
\ No newline at end of file diff --git a/owl-base/Owl_computation_optimiser_sig/module-type-Sig/Operator/Symbol/Shape/Type/Device/A/Mat/index.html b/owl-base/Owl_computation_optimiser_sig/module-type-Sig/Operator/Symbol/Shape/Type/Device/A/Mat/index.html deleted file mode 100644 index 3157250d6..000000000 --- a/owl-base/Owl_computation_optimiser_sig/module-type-Sig/Operator/Symbol/Shape/Type/Device/A/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl-base.Owl_computation_optimiser_sig.Sig.Operator.Symbol.Shape.Type.Device.A.Mat)

Module A.Mat

val diagm : ?k:int -> arr -> arr
val triu : ?k:int -> arr -> arr
val tril : ?k:int -> arr -> arr
val eye : int -> arr
\ No newline at end of file diff --git a/owl-base/Owl_computation_optimiser_sig/module-type-Sig/Operator/Symbol/Shape/Type/Device/A/Scalar/index.html b/owl-base/Owl_computation_optimiser_sig/module-type-Sig/Operator/Symbol/Shape/Type/Device/A/Scalar/index.html deleted file mode 100644 index 2b668ca1d..000000000 --- a/owl-base/Owl_computation_optimiser_sig/module-type-Sig/Operator/Symbol/Shape/Type/Device/A/Scalar/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Scalar (owl-base.Owl_computation_optimiser_sig.Sig.Operator.Symbol.Shape.Type.Device.A.Scalar)

Module A.Scalar

val add : elt -> elt -> elt
val sub : elt -> elt -> elt
val mul : elt -> elt -> elt
val div : elt -> elt -> elt
val pow : elt -> elt -> elt
val atan2 : elt -> elt -> elt
val abs : elt -> elt
val neg : elt -> elt
val sqr : elt -> elt
val sqrt : elt -> elt
val exp : elt -> elt
val log : elt -> elt
val log2 : elt -> elt
val log10 : elt -> elt
val signum : elt -> elt
val floor : elt -> elt
val ceil : elt -> elt
val round : elt -> elt
val sin : elt -> elt
val cos : elt -> elt
val tan : elt -> elt
val sinh : elt -> elt
val cosh : elt -> elt
val tanh : elt -> elt
val asin : elt -> elt
val acos : elt -> elt
val atan : elt -> elt
val asinh : elt -> elt
val acosh : elt -> elt
val atanh : elt -> elt
val relu : elt -> elt
val dawsn : elt -> elt
val sigmoid : elt -> elt
\ No newline at end of file diff --git a/owl-base/Owl_computation_optimiser_sig/module-type-Sig/Operator/Symbol/Shape/Type/Device/A/index.html b/owl-base/Owl_computation_optimiser_sig/module-type-Sig/Operator/Symbol/Shape/Type/Device/A/index.html deleted file mode 100644 index 0db2a9321..000000000 --- a/owl-base/Owl_computation_optimiser_sig/module-type-Sig/Operator/Symbol/Shape/Type/Device/A/index.html +++ /dev/null @@ -1,379 +0,0 @@ - -A (owl-base.Owl_computation_optimiser_sig.Sig.Operator.Symbol.Shape.Type.Device.A)

Module Device.A

include Owl_types_ndarray_algodiff.Sig
include Owl_types_ndarray_eltcmp.Sig
include Owl_types_ndarray_basic.Sig
type arr
type elt
val empty : int array -> arr
val zeros : int array -> arr
val ones : int array -> arr
val create : int array -> elt -> arr
val sequential : ?a:elt -> ?step:elt -> int array -> arr
val uniform : ?a:elt -> ?b:elt -> int array -> arr
val gaussian : ?mu:elt -> ?sigma:elt -> int array -> arr
val bernoulli : ?p:elt -> int array -> arr
val init : int array -> (int -> elt) -> arr
val init_nd : int array -> (int array -> elt) -> arr
val shape : arr -> int array
val numel : arr -> int
val get : arr -> int array -> elt
val set : arr -> int array -> elt -> unit
val get_slice : int list list -> arr -> arr
val set_slice : int list list -> arr -> arr -> unit
val get_fancy : Owl_types_common.index list -> arr -> arr
val set_fancy : Owl_types_common.index list -> arr -> arr -> unit
val copy : arr -> arr
val copy_ : out:arr -> arr -> unit
val reset : arr -> unit
val reshape : arr -> int array -> arr
val reverse : arr -> arr
val tile : arr -> int array -> arr
val repeat : arr -> int array -> arr
val concatenate : ?axis:int -> arr array -> arr
val stack : ?axis:int -> arr array -> arr
val split : ?axis:int -> int array -> arr -> arr array
val expand : ?hi:bool -> arr -> int -> arr
val squeeze : ?axis:int array -> arr -> arr
val draw : ?axis:int -> arr -> int -> arr * int array
val map : (elt -> elt) -> arr -> arr
val fold : ?axis:int -> (elt -> elt -> elt) -> elt -> arr -> arr
val scan : ?axis:int -> (elt -> elt -> elt) -> arr -> arr
val one_hot : int -> arr -> arr
val pad : ?v:elt -> int list list -> arr -> arr
val print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(elt -> string) -> - arr -> - unit
val abs : arr -> arr
val neg : arr -> arr
val floor : arr -> arr
val ceil : arr -> arr
val round : arr -> arr
val sqr : arr -> arr
val sqrt : arr -> arr
val log : arr -> arr
val log2 : arr -> arr
val log10 : arr -> arr
val exp : arr -> arr
val sin : arr -> arr
val cos : arr -> arr
val tan : arr -> arr
val sinh : arr -> arr
val cosh : arr -> arr
val tanh : arr -> arr
val asin : arr -> arr
val acos : arr -> arr
val atan : arr -> arr
val asinh : arr -> arr
val acosh : arr -> arr
val atanh : arr -> arr
val sum_reduce : ?axis:int array -> arr -> arr
val signum : arr -> arr
val sigmoid : arr -> arr
val relu : arr -> arr
val dawsn : arr -> arr
val min' : arr -> elt
val max' : arr -> elt
val sum' : arr -> elt
val log_sum_exp' : arr -> elt
val log_sum_exp : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm' : arr -> elt
val l2norm' : arr -> elt
val l2norm_sqr' : arr -> elt
val clip_by_value : ?amin:elt -> ?amax:elt -> arr -> arr
val clip_by_l2norm : elt -> arr -> arr
val pow : arr -> arr -> arr
val scalar_pow : elt -> arr -> arr
val pow_scalar : arr -> elt -> arr
val atan2 : arr -> arr -> arr
val scalar_atan2 : elt -> arr -> arr
val atan2_scalar : arr -> elt -> arr
val add : arr -> arr -> arr
val sub : arr -> arr -> arr
val mul : arr -> arr -> arr
val div : arr -> arr -> arr
val add_scalar : arr -> elt -> arr
val sub_scalar : arr -> elt -> arr
val mul_scalar : arr -> elt -> arr
val div_scalar : arr -> elt -> arr
val scalar_add : elt -> arr -> arr
val scalar_sub : elt -> arr -> arr
val scalar_mul : elt -> arr -> arr
val scalar_div : elt -> arr -> arr
val fma : arr -> arr -> arr -> arr
val conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val dilated_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val transpose_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val max_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val upsampling2d : arr -> int array -> arr
val conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val dilated_conv1d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv1d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val transpose_conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val max_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val upsampling2d_backward : arr -> int array -> arr -> arr
val row_num : arr -> int
val col_num : arr -> int
val row : arr -> int -> arr
val rows : arr -> int array -> arr
val copy_row_to : arr -> arr -> int -> unit
val copy_col_to : arr -> arr -> int -> unit
val dot : arr -> arr -> arr
val diag : ?k:int -> arr -> arr
val trace : arr -> elt
val transpose : ?axis:int array -> arr -> arr
val to_rows : arr -> arr array
val of_rows : arr array -> arr
val to_cols : arr -> arr array
val of_cols : arr array -> arr
val of_array : elt array -> int array -> arr
val of_arrays : elt array array -> arr
val float_to_elt : float -> elt
val elt_to_float : elt -> float
val elt_equal : arr -> arr -> arr
val elt_not_equal : arr -> arr -> arr
val elt_less : arr -> arr -> arr
val elt_greater : arr -> arr -> arr
val elt_less_equal : arr -> arr -> arr
val elt_greater_equal : arr -> arr -> arr
val elt_equal_scalar : arr -> elt -> arr
val elt_not_equal_scalar : arr -> elt -> arr
val elt_less_scalar : arr -> elt -> arr
val elt_greater_scalar : arr -> elt -> arr
val elt_less_equal_scalar : arr -> elt -> arr
val elt_greater_equal_scalar : arr -> elt -> arr
module Scalar : sig ... end
module Mat : sig ... end
module Linalg : sig ... end
val create_ : out:arr -> elt -> unit
val uniform_ : ?a:elt -> ?b:elt -> out:arr -> unit
val gaussian_ : ?mu:elt -> ?sigma:elt -> out:arr -> unit
val sequential_ : ?a:elt -> ?step:elt -> out:arr -> unit
val bernoulli_ : ?p:elt -> out:arr -> unit
val zeros_ : out:arr -> unit
val ones_ : out:arr -> unit
val one_hot_ : out:arr -> int -> arr -> unit
val get_slice_ : out:arr -> int list list -> arr -> unit
val set_slice_ : out:arr -> int list list -> arr -> arr -> unit
val sub_left : arr -> int -> int -> arr
val reshape_ : out:arr -> arr -> unit
val reverse_ : out:arr -> arr -> unit
val transpose_ : out:arr -> ?axis:int array -> arr -> unit
val repeat_ : out:arr -> arr -> int array -> unit
val tile_ : out:arr -> arr -> int array -> unit
val pad_ : out:arr -> ?v:elt -> int list list -> arr -> unit
val hypot : arr -> arr -> arr
val fmod : arr -> arr -> arr
val min2 : arr -> arr -> arr
val max2 : arr -> arr -> arr
val add_ : ?out:arr -> arr -> arr -> unit
val sub_ : ?out:arr -> arr -> arr -> unit
val mul_ : ?out:arr -> arr -> arr -> unit
val div_ : ?out:arr -> arr -> arr -> unit
val pow_ : ?out:arr -> arr -> arr -> unit
val atan2_ : ?out:arr -> arr -> arr -> unit
val hypot_ : ?out:arr -> arr -> arr -> unit
val fmod_ : ?out:arr -> arr -> arr -> unit
val min2_ : ?out:arr -> arr -> arr -> unit
val max2_ : ?out:arr -> arr -> arr -> unit
val add_scalar_ : ?out:arr -> arr -> elt -> unit
val sub_scalar_ : ?out:arr -> arr -> elt -> unit
val mul_scalar_ : ?out:arr -> arr -> elt -> unit
val div_scalar_ : ?out:arr -> arr -> elt -> unit
val pow_scalar_ : ?out:arr -> arr -> elt -> unit
val atan2_scalar_ : ?out:arr -> arr -> elt -> unit
val fmod_scalar_ : ?out:arr -> arr -> elt -> unit
val scalar_add_ : ?out:arr -> elt -> arr -> unit
val scalar_sub_ : ?out:arr -> elt -> arr -> unit
val scalar_mul_ : ?out:arr -> elt -> arr -> unit
val scalar_div_ : ?out:arr -> elt -> arr -> unit
val scalar_pow_ : ?out:arr -> elt -> arr -> unit
val scalar_atan2_ : ?out:arr -> elt -> arr -> unit
val scalar_fmod_ : ?out:arr -> elt -> arr -> unit
val fma_ : ?out:arr -> arr -> arr -> arr -> unit
val clip_by_value_ : ?out:arr -> ?amin:elt -> ?amax:elt -> arr -> unit
val clip_by_l2norm_ : ?out:arr -> elt -> arr -> unit
val dot_ : - ?transa:bool -> - ?transb:bool -> - ?alpha:elt -> - ?beta:elt -> - c:arr -> - arr -> - arr -> - unit
val abs_ : ?out:arr -> arr -> unit
val neg_ : ?out:arr -> arr -> unit
val conj_ : ?out:arr -> arr -> unit
val reci_ : ?out:arr -> arr -> unit
val signum_ : ?out:arr -> arr -> unit
val sqr_ : ?out:arr -> arr -> unit
val sqrt_ : ?out:arr -> arr -> unit
val cbrt_ : ?out:arr -> arr -> unit
val exp_ : ?out:arr -> arr -> unit
val exp2_ : ?out:arr -> arr -> unit
val exp10_ : ?out:arr -> arr -> unit
val expm1_ : ?out:arr -> arr -> unit
val log_ : ?out:arr -> arr -> unit
val log2_ : ?out:arr -> arr -> unit
val log10_ : ?out:arr -> arr -> unit
val log1p_ : ?out:arr -> arr -> unit
val sin_ : ?out:arr -> arr -> unit
val cos_ : ?out:arr -> arr -> unit
val tan_ : ?out:arr -> arr -> unit
val asin_ : ?out:arr -> arr -> unit
val acos_ : ?out:arr -> arr -> unit
val atan_ : ?out:arr -> arr -> unit
val sinh_ : ?out:arr -> arr -> unit
val cosh_ : ?out:arr -> arr -> unit
val tanh_ : ?out:arr -> arr -> unit
val asinh_ : ?out:arr -> arr -> unit
val acosh_ : ?out:arr -> arr -> unit
val atanh_ : ?out:arr -> arr -> unit
val floor_ : ?out:arr -> arr -> unit
val ceil_ : ?out:arr -> arr -> unit
val round_ : ?out:arr -> arr -> unit
val trunc_ : ?out:arr -> arr -> unit
val fix_ : ?out:arr -> arr -> unit
val erf_ : ?out:arr -> arr -> unit
val erfc_ : ?out:arr -> arr -> unit
val relu_ : ?out:arr -> arr -> unit
val softplus_ : ?out:arr -> arr -> unit
val softsign_ : ?out:arr -> arr -> unit
val softmax_ : ?out:arr -> ?axis:int -> arr -> unit
val sigmoid_ : ?out:arr -> arr -> unit
val sum_ : out:arr -> axis:int -> arr -> unit
val min_ : out:arr -> axis:int -> arr -> unit
val max_ : out:arr -> axis:int -> arr -> unit
val sum : ?axis:int -> ?keep_dims:bool -> arr -> arr
val prod : ?axis:int -> ?keep_dims:bool -> arr -> arr
val min : ?axis:int -> ?keep_dims:bool -> arr -> arr
val max : ?axis:int -> ?keep_dims:bool -> arr -> arr
val mean : ?axis:int -> ?keep_dims:bool -> arr -> arr
val var : ?axis:int -> ?keep_dims:bool -> arr -> arr
val std : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l2norm : ?axis:int -> ?keep_dims:bool -> arr -> arr
val cumsum_ : ?out:arr -> ?axis:int -> arr -> unit
val cumprod_ : ?out:arr -> ?axis:int -> arr -> unit
val cummin_ : ?out:arr -> ?axis:int -> arr -> unit
val cummax_ : ?out:arr -> ?axis:int -> arr -> unit
val dropout_ : ?out:arr -> ?rate:float -> arr -> unit
val prod' : arr -> elt
val mean' : arr -> elt
val var' : arr -> elt
val std' : arr -> elt
val elt_equal_ : ?out:arr -> arr -> arr -> unit
val elt_not_equal_ : ?out:arr -> arr -> arr -> unit
val elt_less_ : ?out:arr -> arr -> arr -> unit
val elt_greater_ : ?out:arr -> arr -> arr -> unit
val elt_less_equal_ : ?out:arr -> arr -> arr -> unit
val elt_greater_equal_ : ?out:arr -> arr -> arr -> unit
val elt_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_not_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_less_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_greater_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_less_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_greater_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val conv1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val conv2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val conv3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val dilated_conv1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val dilated_conv2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val dilated_conv3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val transpose_conv1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val transpose_conv2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val transpose_conv3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val max_pool1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val max_pool2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val max_pool3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val upsampling2d_ : out:arr -> arr -> int array -> unit
val conv1d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv1d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv2d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv2d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv3d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv3d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val dilated_conv1d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv1d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv2d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv2d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv3d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv3d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val transpose_conv1d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv1d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv2d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv2d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv3d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv3d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val max_pool1d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val max_pool2d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val max_pool3d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool1d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool2d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool3d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val upsampling2d_backward_ : out:arr -> arr -> int array -> arr -> unit
val fused_adagrad_ : ?out:arr -> rate:float -> eps:float -> arr -> unit
\ No newline at end of file diff --git a/owl-base/Owl_computation_optimiser_sig/module-type-Sig/Operator/Symbol/Shape/Type/Device/index.html b/owl-base/Owl_computation_optimiser_sig/module-type-Sig/Operator/Symbol/Shape/Type/Device/index.html deleted file mode 100644 index 0b0ae3dea..000000000 --- a/owl-base/Owl_computation_optimiser_sig/module-type-Sig/Operator/Symbol/Shape/Type/Device/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Device (owl-base.Owl_computation_optimiser_sig.Sig.Operator.Symbol.Shape.Type.Device)

Module Type.Device

Type definition
type device

TODO

type value

TODO

Core functions
val make_device : unit -> device

TODO

val arr_to_value : A.arr -> value

TODO

val value_to_arr : value -> A.arr

TODO

val elt_to_value : A.elt -> value

TODO

val value_to_elt : value -> A.elt

TODO

val value_to_float : value -> float

TODO

val is_arr : value -> bool

TODO

val is_elt : value -> bool

TODO

\ No newline at end of file diff --git a/owl-base/Owl_computation_optimiser_sig/module-type-Sig/Operator/Symbol/Shape/Type/index.html b/owl-base/Owl_computation_optimiser_sig/module-type-Sig/Operator/Symbol/Shape/Type/index.html deleted file mode 100644 index 3ec181f2e..000000000 --- a/owl-base/Owl_computation_optimiser_sig/module-type-Sig/Operator/Symbol/Shape/Type/index.html +++ /dev/null @@ -1,5 +0,0 @@ - -Type (owl-base.Owl_computation_optimiser_sig.Sig.Operator.Symbol.Shape.Type)

Module Shape.Type

Type definition
type state =
  1. | Valid
  2. | Invalid
    (*

    TODO

    *)

TODO

and block = {
  1. size : int;
  2. block_id : int;
  3. mutable active : t option;
  4. mutable memory : Device.value;
  5. mutable nodes : t list;
}

block type keeps a reference to a block of memory and to the nodes sharing that block.

and attr = {
  1. mutable op : op;
  2. mutable freeze : bool;
  3. mutable reuse : bool;
  4. mutable state : state;
  5. mutable shape : int array option array;
  6. mutable value : Device.value array;
  7. mutable block : block array option;
}

TODO

and arr =
  1. | Arr of t
and elt =
  1. | Elt of t
and op =
  1. | Noop
  2. | Var
  3. | Const
  4. | Empty of int array
  5. | Zeros of int array
  6. | Ones of int array
  7. | Create of int array
  8. | Sequential of int array
  9. | Uniform of int array
  10. | Gaussian of int array
  11. | Bernoulli of int array
  12. | Init of int array * int -> elt
  13. | Get of int array
  14. | Set of int array
  15. | GetSlice of int list list
  16. | SetSlice of int list list
  17. | GetFancy of Owl_types_common.index list
  18. | SetFancy of Owl_types_common.index list
  19. | Copy
  20. | Reset
  21. | Reshape of int array
  22. | Reverse
  23. | Tile of int array
  24. | Repeat of int array
  25. | Pad of elt * int list list
  26. | Concatenate of int
  27. | Stack of int
  28. | Split of int * int array
  29. | Draw of int * int
  30. | Map of elt -> elt
  31. | Fold of int * elt -> elt -> elt
  32. | Scan of int * elt -> elt -> elt
  33. | OneHot of int
  34. | OfArray of int array
  35. | Delay of Device.A.arr -> Device.A.arr
  36. | DelayArray of int array * Device.A.arr array -> Device.A.arr
  37. | LazyPrint of int option - * int option - * bool option - * (Device.A.elt -> string) option
  38. | Abs
  39. | Neg
  40. | Floor
  41. | Ceil
  42. | Round
  43. | Sqr
  44. | Sqrt
  45. | Log
  46. | Log2
  47. | Log10
  48. | Exp
  49. | Sin
  50. | Cos
  51. | Tan
  52. | Sinh
  53. | Cosh
  54. | Tanh
  55. | Asin
  56. | Acos
  57. | Atan
  58. | Asinh
  59. | Acosh
  60. | Atanh
  61. | Min of bool * int
  62. | Max of bool * int
  63. | Sum of bool * int
  64. | SumReduce of int array
  65. | Signum
  66. | Sigmoid
  67. | Relu
  68. | Dawsn
  69. | Min'
  70. | Max'
  71. | Sum'
  72. | LogSumExp'
  73. | LogSumExp of bool * int
  74. | L1norm'
  75. | L2norm'
  76. | L2NormSqr'
  77. | ClipByValue
  78. | ClipByL2norm
  79. | Pow
  80. | ScalarPow
  81. | PowScalar
  82. | Atan2
  83. | ScalarAtan2
  84. | Atan2Scalar
  85. | Hypot
  86. | Min2
  87. | Max2
  88. | Add
  89. | Sub
  90. | Mul
  91. | Div
  92. | AddScalar
  93. | SubScalar
  94. | MulScalar
  95. | DivScalar
  96. | ScalarAdd
  97. | ScalarSub
  98. | ScalarMul
  99. | ScalarDiv
  100. | FMA
  101. | EltEqual
  102. | EltNotEqual
  103. | EltLess
  104. | EltGreater
  105. | EltLessEqual
  106. | EltGreaterEqual
  107. | EltEqualScalar
  108. | EltNotEqualScalar
  109. | EltLessScalar
  110. | EltGreaterScalar
  111. | EltLessEqualScalar
  112. | EltGreaterEqualScalar
  113. | Conv1d of Owl_types_common.padding * int array
  114. | Conv2d of Owl_types_common.padding * int array
  115. | Conv3d of Owl_types_common.padding * int array
  116. | TransposeConv1d of Owl_types_common.padding * int array
  117. | TransposeConv2d of Owl_types_common.padding * int array
  118. | TransposeConv3d of Owl_types_common.padding * int array
  119. | DilatedConv1d of Owl_types_common.padding * int array * int array
  120. | DilatedConv2d of Owl_types_common.padding * int array * int array
  121. | DilatedConv3d of Owl_types_common.padding * int array * int array
  122. | MaxPool1d of Owl_types_common.padding * int array * int array
  123. | MaxPool2d of Owl_types_common.padding * int array * int array
  124. | MaxPool3d of Owl_types_common.padding * int array * int array
  125. | AvgPool1d of Owl_types_common.padding * int array * int array
  126. | AvgPool2d of Owl_types_common.padding * int array * int array
  127. | AvgPool3d of Owl_types_common.padding * int array * int array
  128. | UpSampling2d of int array
  129. | Conv1dBackwardInput of int array
  130. | Conv1dBackwardKernel of int array
  131. | Conv2dBackwardInput of int array
  132. | Conv2dBackwardKernel of int array
  133. | Conv3dBackwardInput of int array
  134. | Conv3dBackwardKernel of int array
  135. | TransposeConv1dBackwardInput of int array
  136. | TransposeConv1dBackwardKernel of int array
  137. | TransposeConv2dBackwardInput of int array
  138. | TransposeConv2dBackwardKernel of int array
  139. | TransposeConv3dBackwardInput of int array
  140. | TransposeConv3dBackwardKernel of int array
  141. | DilatedConv1dBackwardInput of int array * int array
  142. | DilatedConv1dBackwardKernel of int array * int array
  143. | DilatedConv2dBackwardInput of int array * int array
  144. | DilatedConv2dBackwardKernel of int array * int array
  145. | DilatedConv3dBackwardInput of int array * int array
  146. | DilatedConv3dBackwardKernel of int array * int array
  147. | MaxPool1dBackward of Owl_types_common.padding * int array * int array
  148. | MaxPool2dBackward of Owl_types_common.padding * int array * int array
  149. | MaxPool3dBackward of Owl_types_common.padding * int array * int array
  150. | AvgPool1dBackward of Owl_types_common.padding * int array * int array
  151. | AvgPool2dBackward of Owl_types_common.padding * int array * int array
  152. | AvgPool3dBackward of Owl_types_common.padding * int array * int array
  153. | UpSampling2dBackward of int array
  154. | RowNum
  155. | ColNum
  156. | Row
  157. | Rows of int array
  158. | CopyRowTo
  159. | CopyColTo
  160. | Dot of bool * bool * elt * elt
  161. | Inv
  162. | Trace
  163. | Transpose of int array
  164. | ToRows
  165. | OfRows
  166. | Scalar_Add
  167. | Scalar_Sub
  168. | Scalar_Mul
  169. | Scalar_Div
  170. | Scalar_Pow
  171. | Scalar_Atan2
  172. | Scalar_Abs
  173. | Scalar_Neg
  174. | Scalar_Sqr
  175. | Scalar_Sqrt
  176. | Scalar_Exp
  177. | Scalar_Log
  178. | Scalar_Log2
  179. | Scalar_Log10
  180. | Scalar_Signum
  181. | Scalar_Floor
  182. | Scalar_Ceil
  183. | Scalar_Round
  184. | Scalar_Sin
  185. | Scalar_Cos
  186. | Scalar_Tan
  187. | Scalar_Sinh
  188. | Scalar_Cosh
  189. | Scalar_Tanh
  190. | Scalar_Asin
  191. | Scalar_Acos
  192. | Scalar_Atan
  193. | Scalar_Asinh
  194. | Scalar_Acosh
  195. | Scalar_Atanh
  196. | Scalar_Relu
  197. | Scalar_Dawsn
  198. | Scalar_Sigmoid
  199. | Fused_Adagrad of float * float
    (*

    TODO

    *)
\ No newline at end of file diff --git a/owl-base/Owl_computation_optimiser_sig/module-type-Sig/Operator/Symbol/Shape/index.html b/owl-base/Owl_computation_optimiser_sig/module-type-Sig/Operator/Symbol/Shape/index.html deleted file mode 100644 index 6a9c4df6f..000000000 --- a/owl-base/Owl_computation_optimiser_sig/module-type-Sig/Operator/Symbol/Shape/index.html +++ /dev/null @@ -1,5 +0,0 @@ - -Shape (owl-base.Owl_computation_optimiser_sig.Sig.Operator.Symbol.Shape)

Module Symbol.Shape

Core functions
val infer_shape : - Type.op -> - Type.attr Owl_graph.node array -> - int array option array

TODO

\ No newline at end of file diff --git a/owl-base/Owl_computation_optimiser_sig/module-type-Sig/Operator/Symbol/index.html b/owl-base/Owl_computation_optimiser_sig/module-type-Sig/Operator/Symbol/index.html deleted file mode 100644 index 43fdb8703..000000000 --- a/owl-base/Owl_computation_optimiser_sig/module-type-Sig/Operator/Symbol/index.html +++ /dev/null @@ -1,28 +0,0 @@ - -Symbol (owl-base.Owl_computation_optimiser_sig.Sig.Operator.Symbol)

Module Operator.Symbol

Core functions
val op_to_str : Shape.Type.op -> string

TODO

val is_random_variable : Shape.Type.op -> bool

TODO

val refnum : 'a Owl_graph.node -> int

TODO

val node_shape : Shape.Type.attr Owl_graph.node -> int array

TODO

val node_numel : Shape.Type.attr Owl_graph.node -> int

TODO

val is_shape_unknown : Shape.Type.attr Owl_graph.node -> bool

TODO

val infer_shape_graph : Shape.Type.attr Owl_graph.node array -> unit

TODO

val shape_to_str : int array option array -> string

TODO

val node_to_str : Shape.Type.attr Owl_graph.node -> string

TODO

val node_to_arr : Shape.Type.t -> Shape.Type.arr

TODO

val arr_to_node : Shape.Type.arr -> Shape.Type.t

TODO

val node_to_elt : Shape.Type.t -> Shape.Type.elt

TODO

val elt_to_node : Shape.Type.elt -> Shape.Type.t

TODO

val make_node : - ?name:string -> - ?value:Shape.Type.Device.value array -> - ?shape:int array option array -> - ?freeze:bool -> - ?reuse:bool -> - ?state:Shape.Type.state -> - Shape.Type.op -> - Shape.Type.attr Owl_graph.node

TODO

val make_then_connect : - ?shape:int array option array -> - Shape.Type.op -> - Shape.Type.attr Owl_graph.node array -> - Shape.Type.attr Owl_graph.node

TODO

val var_arr : ?shape:int array -> string -> Shape.Type.arr

TODO

val var_elt : string -> Shape.Type.elt

TODO

val const_arr : string -> Shape.Type.Device.A.arr -> Shape.Type.arr

TODO

val const_elt : string -> Shape.Type.Device.A.elt -> Shape.Type.elt

TODO

val new_block_id : unit -> int

new_block_id () returns an unused block id.

val make_empty_block : ?block_id:int -> int -> Shape.Type.block

make_empty_block s returns an empty block of memory of size s.

val make_value_block : - Shape.Type.Device.value -> - Shape.Type.attr Owl_graph.node -> - unit

make_value_block value node creates a block of memory initialised with value and links the new block to node.

get_block node returns the memory block allocated to node. If no block is allocated, throws an exception.

val add_node_to_block : - Shape.Type.attr Owl_graph.node -> - Shape.Type.block -> - unit

Link a node to a reusable block and initialises its memory on the memory of the block.

val get_active_node : Shape.Type.block -> Shape.Type.attr Owl_graph.node option

Return the node that is currently using the memory of the block.

val set_active_node : - Shape.Type.block -> - Shape.Type.attr Owl_graph.node -> - unit

Update the node that is currently using the block of memory.

val get_block_id : Shape.Type.attr Owl_graph.node -> int

get_block_id node returns the id of the block assigned to node. If node has not been assigned yet, returns -1.

val set_value : - Shape.Type.attr Owl_graph.node -> - Shape.Type.Device.value array -> - unit

TODO

val set_operator : Shape.Type.attr Owl_graph.node -> Shape.Type.op -> unit

TODO

TODO

val set_reuse : Shape.Type.attr Owl_graph.node -> bool -> unit

TODO

val get_reuse : Shape.Type.attr Owl_graph.node -> bool

TODO

val is_shared : Shape.Type.attr Owl_graph.node -> bool

TODO

val get_shared_nodes : - Shape.Type.attr Owl_graph.node -> - Shape.Type.attr Owl_graph.node array

get_shared_nodes node returns the nodes sharing the same block of memory as node.

val is_var : Shape.Type.attr Owl_graph.node -> bool

TODO

val is_const : Shape.Type.attr Owl_graph.node -> bool

TODO

val is_node_arr : Shape.Type.attr Owl_graph.node -> bool

TODO

val is_node_elt : Shape.Type.attr Owl_graph.node -> bool

TODO

val is_assigned : Shape.Type.attr Owl_graph.node -> bool

is_assigned node checks if a block of memory has been assigned to node.

val check_assigned : Shape.Type.attr Owl_graph.node -> unit

check_assigned node throws an exception if node has not been assigned to a block.

val is_valid : Shape.Type.attr Owl_graph.node -> bool

TODO

val validate : Shape.Type.attr Owl_graph.node -> unit

TODO

val invalidate : Shape.Type.attr Owl_graph.node -> unit

TODO

val invalidate_graph : Shape.Type.attr Owl_graph.node -> unit

TODO

val is_freeze : Shape.Type.attr Owl_graph.node -> bool

TODO

val freeze : Shape.Type.attr Owl_graph.node -> unit

TODO

val freeze_descendants : Shape.Type.attr Owl_graph.node array -> unit

TODO

val freeze_ancestors : Shape.Type.attr Owl_graph.node array -> unit

TODO

TODO

TODO

val unsafe_assign_arr : Shape.Type.arr -> Shape.Type.Device.A.arr -> unit

TODO

val assign_arr : Shape.Type.arr -> Shape.Type.Device.A.arr -> unit

TODO

val assign_elt : Shape.Type.elt -> Shape.Type.Device.A.elt -> unit

TODO

val float_to_elt : float -> Shape.Type.elt

TODO

val elt_to_float : Shape.Type.elt -> float

TODO

\ No newline at end of file diff --git a/owl-base/Owl_computation_optimiser_sig/module-type-Sig/Operator/index.html b/owl-base/Owl_computation_optimiser_sig/module-type-Sig/Operator/index.html deleted file mode 100644 index afc278f61..000000000 --- a/owl-base/Owl_computation_optimiser_sig/module-type-Sig/Operator/index.html +++ /dev/null @@ -1,420 +0,0 @@ - -Operator (owl-base.Owl_computation_optimiser_sig.Sig.Operator)

Module Sig.Operator

Vectorised functions
val empty : int array -> Symbol.Shape.Type.arr

TODO

val zeros : int array -> Symbol.Shape.Type.arr

TODO

val ones : int array -> Symbol.Shape.Type.arr

TODO

val create : int array -> Symbol.Shape.Type.elt -> Symbol.Shape.Type.arr

TODO

val sequential : - ?a:Symbol.Shape.Type.elt -> - ?step:Symbol.Shape.Type.elt -> - int array -> - Symbol.Shape.Type.arr

TODO

val uniform : - ?a:Symbol.Shape.Type.elt -> - ?b:Symbol.Shape.Type.elt -> - int array -> - Symbol.Shape.Type.arr

TODO

val gaussian : - ?mu:Symbol.Shape.Type.elt -> - ?sigma:Symbol.Shape.Type.elt -> - int array -> - Symbol.Shape.Type.arr

TODO

val bernoulli : ?p:Symbol.Shape.Type.elt -> int array -> Symbol.Shape.Type.arr

TODO

val init : int array -> (int -> Symbol.Shape.Type.elt) -> Symbol.Shape.Type.arr

TODO

val init_nd : - int array -> - (int array -> Symbol.Shape.Type.elt) -> - Symbol.Shape.Type.arr

TODO

val shape : Symbol.Shape.Type.arr -> int array

TODO

val numel : Symbol.Shape.Type.arr -> int

TODO

TODO

val set : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.elt -> unit

TODO

val get_slice : int list list -> Symbol.Shape.Type.arr -> Symbol.Shape.Type.arr

TODO

val set_slice : - int list list -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - unit

TODO

TODO

val set_fancy : - Owl_types.index list -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - unit

TODO

val copy_ : out:'a -> 'b -> 'c

TODO

val reset : Symbol.Shape.Type.arr -> unit

TODO

val reshape : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.arr

TODO

val tile : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.arr

TODO

val repeat : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.arr

TODO

val pad : - ?v:Symbol.Shape.Type.elt -> - int list list -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val expand : ?hi:bool -> Symbol.Shape.Type.arr -> int -> Symbol.Shape.Type.arr

TODO

val squeeze : ?axis:int array -> Symbol.Shape.Type.arr -> Symbol.Shape.Type.arr

TODO

val concatenate : - ?axis:int -> - Symbol.Shape.Type.arr array -> - Symbol.Shape.Type.arr

TODO

val stack : ?axis:int -> Symbol.Shape.Type.arr array -> Symbol.Shape.Type.arr

TODO

TODO

val split : ?axis:int -> 'a -> 'b -> 'c

TODO

val draw : - ?axis:int -> - Symbol.Shape.Type.arr -> - int -> - Symbol.Shape.Type.arr * 'a array

TODO

TODO

delay f x returns f x. It allows to use a function that is not tracked by the computation graph and delay its evaluation. The output must have the same shape as the input.

delay_array out_shape f x works in the same way as delay but is applied on an array of ndarrays. Needs the shape of the output as an argument.

val lazy_print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(Symbol.Shape.Type.Device.A.elt -> string) -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

lazy_print x prints the output of x when it is evaluated. Is implemented as an identity node. For information about the optional parameters, refer to the print function of the Ndarray module.

val print : ?max_row:'a -> ?max_col:'b -> ?header:'c -> ?fmt:'d -> 'e -> unit

TODO

val min : - ?axis:int -> - ?keep_dims:bool -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val max : - ?axis:int -> - ?keep_dims:bool -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val sum : - ?axis:int -> - ?keep_dims:bool -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val sum_reduce : - ?axis:int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val log_sum_exp : - ?axis:int -> - ?keep_dims:bool -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

TODO

TODO

TODO

TODO

val elt_less_equal_scalar : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.elt -> - Symbol.Shape.Type.arr

TODO

val elt_greater_equal_scalar : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.elt -> - Symbol.Shape.Type.arr

TODO

val conv1d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr

TODO

val conv2d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr

TODO

val conv3d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr

TODO

val transpose_conv1d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr

TODO

val transpose_conv2d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr

TODO

val transpose_conv3d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr

TODO

val dilated_conv1d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val dilated_conv2d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val dilated_conv3d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val max_pool1d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val max_pool2d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val max_pool3d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val avg_pool1d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val avg_pool2d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val avg_pool3d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val upsampling2d : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.arr

TODO

val conv1d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val conv1d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val conv2d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val conv2d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val conv3d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val conv3d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val transpose_conv1d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val transpose_conv1d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val transpose_conv2d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val transpose_conv2d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val transpose_conv3d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val transpose_conv3d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val dilated_conv1d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val dilated_conv1d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val dilated_conv2d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val dilated_conv2d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val dilated_conv3d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val dilated_conv3d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val max_pool1d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val max_pool2d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val max_pool3d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val avg_pool1d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val avg_pool2d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val avg_pool3d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val upsampling2d_backward : - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val row_num : Symbol.Shape.Type.arr -> int

TODO

val col_num : Symbol.Shape.Type.arr -> int

TODO

val rows : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.arr

TODO

val copy_row_to : Symbol.Shape.Type.arr -> 'a -> 'b -> unit

TODO

val copy_col_to : Symbol.Shape.Type.arr -> 'a -> 'b -> unit

TODO

TODO

val transpose : - ?axis:int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val to_rows : Symbol.Shape.Type.arr -> 'a array

TODO

TODO

val to_cols : Symbol.Shape.Type.arr -> 'a array

TODO

TODO

val of_array : - Symbol.Shape.Type.elt array -> - int array -> - Symbol.Shape.Type.arr

TODO

val of_arrays : Symbol.Shape.Type.elt array array -> Symbol.Shape.Type.arr

TODO

val to_arrays : Symbol.Shape.Type.arr -> Symbol.Shape.Type.elt array array

TODO

Scalar functions
module Scalar : sig ... end
module Mat : sig ... end
module Linalg : sig ... end
\ No newline at end of file diff --git a/owl-base/Owl_computation_optimiser_sig/module-type-Sig/index.html b/owl-base/Owl_computation_optimiser_sig/module-type-Sig/index.html deleted file mode 100644 index 1610b3f1a..000000000 --- a/owl-base/Owl_computation_optimiser_sig/module-type-Sig/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Sig (owl-base.Owl_computation_optimiser_sig.Sig)

Module type Owl_computation_optimiser_sig.Sig

Core functions
val estimate_complexity : 'a Owl_graph.node array -> int * int

TODO

val optimise_nodes : - Operator.Symbol.Shape.Type.attr Owl_graph.node array -> - unit

TODO

\ No newline at end of file diff --git a/owl-base/Owl_computation_shape/.dummy b/owl-base/Owl_computation_shape/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl-base/Owl_computation_shape/Make/argument-1-Type/Device/A/Linalg/index.html b/owl-base/Owl_computation_shape/Make/argument-1-Type/Device/A/Linalg/index.html deleted file mode 100644 index 219e54ec8..000000000 --- a/owl-base/Owl_computation_shape/Make/argument-1-Type/Device/A/Linalg/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -Linalg (owl-base.Owl_computation_shape.Make.Type.Device.A.Linalg)

Module A.Linalg

val inv : arr -> arr
val logdet : arr -> elt
val chol : ?upper:bool -> arr -> arr
val svd : ?thin:bool -> arr -> arr * arr * arr
val qr : arr -> arr * arr
val lq : arr -> arr * arr
val sylvester : arr -> arr -> arr -> arr
val lyapunov : arr -> arr -> arr
val discrete_lyapunov : - ?solver:[ `default | `bilinear | `direct ] -> - arr -> - arr -> - arr
val linsolve : ?trans:bool -> ?typ:[ `n | `u | `l ] -> arr -> arr -> arr
val care : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
val dare : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
\ No newline at end of file diff --git a/owl-base/Owl_computation_shape/Make/argument-1-Type/Device/A/Mat/index.html b/owl-base/Owl_computation_shape/Make/argument-1-Type/Device/A/Mat/index.html deleted file mode 100644 index 5f3f40ada..000000000 --- a/owl-base/Owl_computation_shape/Make/argument-1-Type/Device/A/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl-base.Owl_computation_shape.Make.Type.Device.A.Mat)

Module A.Mat

val diagm : ?k:int -> arr -> arr
val triu : ?k:int -> arr -> arr
val tril : ?k:int -> arr -> arr
val eye : int -> arr
\ No newline at end of file diff --git a/owl-base/Owl_computation_shape/Make/argument-1-Type/Device/A/Scalar/index.html b/owl-base/Owl_computation_shape/Make/argument-1-Type/Device/A/Scalar/index.html deleted file mode 100644 index 0c8a4b243..000000000 --- a/owl-base/Owl_computation_shape/Make/argument-1-Type/Device/A/Scalar/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Scalar (owl-base.Owl_computation_shape.Make.Type.Device.A.Scalar)

Module A.Scalar

val add : elt -> elt -> elt
val sub : elt -> elt -> elt
val mul : elt -> elt -> elt
val div : elt -> elt -> elt
val pow : elt -> elt -> elt
val atan2 : elt -> elt -> elt
val abs : elt -> elt
val neg : elt -> elt
val sqr : elt -> elt
val sqrt : elt -> elt
val exp : elt -> elt
val log : elt -> elt
val log2 : elt -> elt
val log10 : elt -> elt
val signum : elt -> elt
val floor : elt -> elt
val ceil : elt -> elt
val round : elt -> elt
val sin : elt -> elt
val cos : elt -> elt
val tan : elt -> elt
val sinh : elt -> elt
val cosh : elt -> elt
val tanh : elt -> elt
val asin : elt -> elt
val acos : elt -> elt
val atan : elt -> elt
val asinh : elt -> elt
val acosh : elt -> elt
val atanh : elt -> elt
val relu : elt -> elt
val dawsn : elt -> elt
val sigmoid : elt -> elt
\ No newline at end of file diff --git a/owl-base/Owl_computation_shape/Make/argument-1-Type/Device/A/index.html b/owl-base/Owl_computation_shape/Make/argument-1-Type/Device/A/index.html deleted file mode 100644 index f12057101..000000000 --- a/owl-base/Owl_computation_shape/Make/argument-1-Type/Device/A/index.html +++ /dev/null @@ -1,379 +0,0 @@ - -A (owl-base.Owl_computation_shape.Make.Type.Device.A)

Module Device.A

include Owl_types_ndarray_algodiff.Sig
include Owl_types_ndarray_eltcmp.Sig
include Owl_types_ndarray_basic.Sig
type arr
type elt
val empty : int array -> arr
val zeros : int array -> arr
val ones : int array -> arr
val create : int array -> elt -> arr
val sequential : ?a:elt -> ?step:elt -> int array -> arr
val uniform : ?a:elt -> ?b:elt -> int array -> arr
val gaussian : ?mu:elt -> ?sigma:elt -> int array -> arr
val bernoulli : ?p:elt -> int array -> arr
val init : int array -> (int -> elt) -> arr
val init_nd : int array -> (int array -> elt) -> arr
val shape : arr -> int array
val numel : arr -> int
val get : arr -> int array -> elt
val set : arr -> int array -> elt -> unit
val get_slice : int list list -> arr -> arr
val set_slice : int list list -> arr -> arr -> unit
val get_fancy : Owl_types_common.index list -> arr -> arr
val set_fancy : Owl_types_common.index list -> arr -> arr -> unit
val copy : arr -> arr
val copy_ : out:arr -> arr -> unit
val reset : arr -> unit
val reshape : arr -> int array -> arr
val reverse : arr -> arr
val tile : arr -> int array -> arr
val repeat : arr -> int array -> arr
val concatenate : ?axis:int -> arr array -> arr
val stack : ?axis:int -> arr array -> arr
val split : ?axis:int -> int array -> arr -> arr array
val expand : ?hi:bool -> arr -> int -> arr
val squeeze : ?axis:int array -> arr -> arr
val draw : ?axis:int -> arr -> int -> arr * int array
val map : (elt -> elt) -> arr -> arr
val fold : ?axis:int -> (elt -> elt -> elt) -> elt -> arr -> arr
val scan : ?axis:int -> (elt -> elt -> elt) -> arr -> arr
val one_hot : int -> arr -> arr
val pad : ?v:elt -> int list list -> arr -> arr
val print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(elt -> string) -> - arr -> - unit
val abs : arr -> arr
val neg : arr -> arr
val floor : arr -> arr
val ceil : arr -> arr
val round : arr -> arr
val sqr : arr -> arr
val sqrt : arr -> arr
val log : arr -> arr
val log2 : arr -> arr
val log10 : arr -> arr
val exp : arr -> arr
val sin : arr -> arr
val cos : arr -> arr
val tan : arr -> arr
val sinh : arr -> arr
val cosh : arr -> arr
val tanh : arr -> arr
val asin : arr -> arr
val acos : arr -> arr
val atan : arr -> arr
val asinh : arr -> arr
val acosh : arr -> arr
val atanh : arr -> arr
val sum_reduce : ?axis:int array -> arr -> arr
val signum : arr -> arr
val sigmoid : arr -> arr
val relu : arr -> arr
val dawsn : arr -> arr
val min' : arr -> elt
val max' : arr -> elt
val sum' : arr -> elt
val log_sum_exp' : arr -> elt
val log_sum_exp : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm' : arr -> elt
val l2norm' : arr -> elt
val l2norm_sqr' : arr -> elt
val clip_by_value : ?amin:elt -> ?amax:elt -> arr -> arr
val clip_by_l2norm : elt -> arr -> arr
val pow : arr -> arr -> arr
val scalar_pow : elt -> arr -> arr
val pow_scalar : arr -> elt -> arr
val atan2 : arr -> arr -> arr
val scalar_atan2 : elt -> arr -> arr
val atan2_scalar : arr -> elt -> arr
val add : arr -> arr -> arr
val sub : arr -> arr -> arr
val mul : arr -> arr -> arr
val div : arr -> arr -> arr
val add_scalar : arr -> elt -> arr
val sub_scalar : arr -> elt -> arr
val mul_scalar : arr -> elt -> arr
val div_scalar : arr -> elt -> arr
val scalar_add : elt -> arr -> arr
val scalar_sub : elt -> arr -> arr
val scalar_mul : elt -> arr -> arr
val scalar_div : elt -> arr -> arr
val fma : arr -> arr -> arr -> arr
val conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val dilated_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val transpose_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val max_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val upsampling2d : arr -> int array -> arr
val conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val dilated_conv1d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv1d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val transpose_conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val max_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val upsampling2d_backward : arr -> int array -> arr -> arr
val row_num : arr -> int
val col_num : arr -> int
val row : arr -> int -> arr
val rows : arr -> int array -> arr
val copy_row_to : arr -> arr -> int -> unit
val copy_col_to : arr -> arr -> int -> unit
val dot : arr -> arr -> arr
val diag : ?k:int -> arr -> arr
val trace : arr -> elt
val transpose : ?axis:int array -> arr -> arr
val to_rows : arr -> arr array
val of_rows : arr array -> arr
val to_cols : arr -> arr array
val of_cols : arr array -> arr
val of_array : elt array -> int array -> arr
val of_arrays : elt array array -> arr
val float_to_elt : float -> elt
val elt_to_float : elt -> float
val elt_equal : arr -> arr -> arr
val elt_not_equal : arr -> arr -> arr
val elt_less : arr -> arr -> arr
val elt_greater : arr -> arr -> arr
val elt_less_equal : arr -> arr -> arr
val elt_greater_equal : arr -> arr -> arr
val elt_equal_scalar : arr -> elt -> arr
val elt_not_equal_scalar : arr -> elt -> arr
val elt_less_scalar : arr -> elt -> arr
val elt_greater_scalar : arr -> elt -> arr
val elt_less_equal_scalar : arr -> elt -> arr
val elt_greater_equal_scalar : arr -> elt -> arr
module Scalar : sig ... end
module Mat : sig ... end
module Linalg : sig ... end
val create_ : out:arr -> elt -> unit
val uniform_ : ?a:elt -> ?b:elt -> out:arr -> unit
val gaussian_ : ?mu:elt -> ?sigma:elt -> out:arr -> unit
val sequential_ : ?a:elt -> ?step:elt -> out:arr -> unit
val bernoulli_ : ?p:elt -> out:arr -> unit
val zeros_ : out:arr -> unit
val ones_ : out:arr -> unit
val one_hot_ : out:arr -> int -> arr -> unit
val get_slice_ : out:arr -> int list list -> arr -> unit
val set_slice_ : out:arr -> int list list -> arr -> arr -> unit
val sub_left : arr -> int -> int -> arr
val reshape_ : out:arr -> arr -> unit
val reverse_ : out:arr -> arr -> unit
val transpose_ : out:arr -> ?axis:int array -> arr -> unit
val repeat_ : out:arr -> arr -> int array -> unit
val tile_ : out:arr -> arr -> int array -> unit
val pad_ : out:arr -> ?v:elt -> int list list -> arr -> unit
val hypot : arr -> arr -> arr
val fmod : arr -> arr -> arr
val min2 : arr -> arr -> arr
val max2 : arr -> arr -> arr
val add_ : ?out:arr -> arr -> arr -> unit
val sub_ : ?out:arr -> arr -> arr -> unit
val mul_ : ?out:arr -> arr -> arr -> unit
val div_ : ?out:arr -> arr -> arr -> unit
val pow_ : ?out:arr -> arr -> arr -> unit
val atan2_ : ?out:arr -> arr -> arr -> unit
val hypot_ : ?out:arr -> arr -> arr -> unit
val fmod_ : ?out:arr -> arr -> arr -> unit
val min2_ : ?out:arr -> arr -> arr -> unit
val max2_ : ?out:arr -> arr -> arr -> unit
val add_scalar_ : ?out:arr -> arr -> elt -> unit
val sub_scalar_ : ?out:arr -> arr -> elt -> unit
val mul_scalar_ : ?out:arr -> arr -> elt -> unit
val div_scalar_ : ?out:arr -> arr -> elt -> unit
val pow_scalar_ : ?out:arr -> arr -> elt -> unit
val atan2_scalar_ : ?out:arr -> arr -> elt -> unit
val fmod_scalar_ : ?out:arr -> arr -> elt -> unit
val scalar_add_ : ?out:arr -> elt -> arr -> unit
val scalar_sub_ : ?out:arr -> elt -> arr -> unit
val scalar_mul_ : ?out:arr -> elt -> arr -> unit
val scalar_div_ : ?out:arr -> elt -> arr -> unit
val scalar_pow_ : ?out:arr -> elt -> arr -> unit
val scalar_atan2_ : ?out:arr -> elt -> arr -> unit
val scalar_fmod_ : ?out:arr -> elt -> arr -> unit
val fma_ : ?out:arr -> arr -> arr -> arr -> unit
val clip_by_value_ : ?out:arr -> ?amin:elt -> ?amax:elt -> arr -> unit
val clip_by_l2norm_ : ?out:arr -> elt -> arr -> unit
val dot_ : - ?transa:bool -> - ?transb:bool -> - ?alpha:elt -> - ?beta:elt -> - c:arr -> - arr -> - arr -> - unit
val abs_ : ?out:arr -> arr -> unit
val neg_ : ?out:arr -> arr -> unit
val conj_ : ?out:arr -> arr -> unit
val reci_ : ?out:arr -> arr -> unit
val signum_ : ?out:arr -> arr -> unit
val sqr_ : ?out:arr -> arr -> unit
val sqrt_ : ?out:arr -> arr -> unit
val cbrt_ : ?out:arr -> arr -> unit
val exp_ : ?out:arr -> arr -> unit
val exp2_ : ?out:arr -> arr -> unit
val exp10_ : ?out:arr -> arr -> unit
val expm1_ : ?out:arr -> arr -> unit
val log_ : ?out:arr -> arr -> unit
val log2_ : ?out:arr -> arr -> unit
val log10_ : ?out:arr -> arr -> unit
val log1p_ : ?out:arr -> arr -> unit
val sin_ : ?out:arr -> arr -> unit
val cos_ : ?out:arr -> arr -> unit
val tan_ : ?out:arr -> arr -> unit
val asin_ : ?out:arr -> arr -> unit
val acos_ : ?out:arr -> arr -> unit
val atan_ : ?out:arr -> arr -> unit
val sinh_ : ?out:arr -> arr -> unit
val cosh_ : ?out:arr -> arr -> unit
val tanh_ : ?out:arr -> arr -> unit
val asinh_ : ?out:arr -> arr -> unit
val acosh_ : ?out:arr -> arr -> unit
val atanh_ : ?out:arr -> arr -> unit
val floor_ : ?out:arr -> arr -> unit
val ceil_ : ?out:arr -> arr -> unit
val round_ : ?out:arr -> arr -> unit
val trunc_ : ?out:arr -> arr -> unit
val fix_ : ?out:arr -> arr -> unit
val erf_ : ?out:arr -> arr -> unit
val erfc_ : ?out:arr -> arr -> unit
val relu_ : ?out:arr -> arr -> unit
val softplus_ : ?out:arr -> arr -> unit
val softsign_ : ?out:arr -> arr -> unit
val softmax_ : ?out:arr -> ?axis:int -> arr -> unit
val sigmoid_ : ?out:arr -> arr -> unit
val sum_ : out:arr -> axis:int -> arr -> unit
val min_ : out:arr -> axis:int -> arr -> unit
val max_ : out:arr -> axis:int -> arr -> unit
val sum : ?axis:int -> ?keep_dims:bool -> arr -> arr
val prod : ?axis:int -> ?keep_dims:bool -> arr -> arr
val min : ?axis:int -> ?keep_dims:bool -> arr -> arr
val max : ?axis:int -> ?keep_dims:bool -> arr -> arr
val mean : ?axis:int -> ?keep_dims:bool -> arr -> arr
val var : ?axis:int -> ?keep_dims:bool -> arr -> arr
val std : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l2norm : ?axis:int -> ?keep_dims:bool -> arr -> arr
val cumsum_ : ?out:arr -> ?axis:int -> arr -> unit
val cumprod_ : ?out:arr -> ?axis:int -> arr -> unit
val cummin_ : ?out:arr -> ?axis:int -> arr -> unit
val cummax_ : ?out:arr -> ?axis:int -> arr -> unit
val dropout_ : ?out:arr -> ?rate:float -> arr -> unit
val prod' : arr -> elt
val mean' : arr -> elt
val var' : arr -> elt
val std' : arr -> elt
val elt_equal_ : ?out:arr -> arr -> arr -> unit
val elt_not_equal_ : ?out:arr -> arr -> arr -> unit
val elt_less_ : ?out:arr -> arr -> arr -> unit
val elt_greater_ : ?out:arr -> arr -> arr -> unit
val elt_less_equal_ : ?out:arr -> arr -> arr -> unit
val elt_greater_equal_ : ?out:arr -> arr -> arr -> unit
val elt_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_not_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_less_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_greater_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_less_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_greater_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val conv1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val conv2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val conv3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val dilated_conv1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val dilated_conv2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val dilated_conv3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val transpose_conv1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val transpose_conv2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val transpose_conv3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val max_pool1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val max_pool2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val max_pool3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val upsampling2d_ : out:arr -> arr -> int array -> unit
val conv1d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv1d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv2d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv2d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv3d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv3d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val dilated_conv1d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv1d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv2d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv2d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv3d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv3d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val transpose_conv1d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv1d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv2d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv2d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv3d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv3d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val max_pool1d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val max_pool2d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val max_pool3d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool1d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool2d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool3d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val upsampling2d_backward_ : out:arr -> arr -> int array -> arr -> unit
val fused_adagrad_ : ?out:arr -> rate:float -> eps:float -> arr -> unit
\ No newline at end of file diff --git a/owl-base/Owl_computation_shape/Make/argument-1-Type/Device/index.html b/owl-base/Owl_computation_shape/Make/argument-1-Type/Device/index.html deleted file mode 100644 index b0698bb1f..000000000 --- a/owl-base/Owl_computation_shape/Make/argument-1-Type/Device/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Device (owl-base.Owl_computation_shape.Make.Type.Device)

Module Type.Device

Type definition
type device

TODO

type value

TODO

Core functions
val make_device : unit -> device

TODO

val arr_to_value : A.arr -> value

TODO

val value_to_arr : value -> A.arr

TODO

val elt_to_value : A.elt -> value

TODO

val value_to_elt : value -> A.elt

TODO

val value_to_float : value -> float

TODO

val is_arr : value -> bool

TODO

val is_elt : value -> bool

TODO

\ No newline at end of file diff --git a/owl-base/Owl_computation_shape/Make/argument-1-Type/index.html b/owl-base/Owl_computation_shape/Make/argument-1-Type/index.html deleted file mode 100644 index 79f78cb37..000000000 --- a/owl-base/Owl_computation_shape/Make/argument-1-Type/index.html +++ /dev/null @@ -1,5 +0,0 @@ - -Type (owl-base.Owl_computation_shape.Make.Type)

Parameter Make.Type

Type definition
type state =
  1. | Valid
  2. | Invalid
    (*

    TODO

    *)

TODO

and block = {
  1. size : int;
  2. block_id : int;
  3. mutable active : t option;
  4. mutable memory : Device.value;
  5. mutable nodes : t list;
}

block type keeps a reference to a block of memory and to the nodes sharing that block.

and attr = {
  1. mutable op : op;
  2. mutable freeze : bool;
  3. mutable reuse : bool;
  4. mutable state : state;
  5. mutable shape : int array option array;
  6. mutable value : Device.value array;
  7. mutable block : block array option;
}

TODO

and arr =
  1. | Arr of t
and elt =
  1. | Elt of t
and op =
  1. | Noop
  2. | Var
  3. | Const
  4. | Empty of int array
  5. | Zeros of int array
  6. | Ones of int array
  7. | Create of int array
  8. | Sequential of int array
  9. | Uniform of int array
  10. | Gaussian of int array
  11. | Bernoulli of int array
  12. | Init of int array * int -> elt
  13. | Get of int array
  14. | Set of int array
  15. | GetSlice of int list list
  16. | SetSlice of int list list
  17. | GetFancy of Owl_types_common.index list
  18. | SetFancy of Owl_types_common.index list
  19. | Copy
  20. | Reset
  21. | Reshape of int array
  22. | Reverse
  23. | Tile of int array
  24. | Repeat of int array
  25. | Pad of elt * int list list
  26. | Concatenate of int
  27. | Stack of int
  28. | Split of int * int array
  29. | Draw of int * int
  30. | Map of elt -> elt
  31. | Fold of int * elt -> elt -> elt
  32. | Scan of int * elt -> elt -> elt
  33. | OneHot of int
  34. | OfArray of int array
  35. | Delay of Device.A.arr -> Device.A.arr
  36. | DelayArray of int array * Device.A.arr array -> Device.A.arr
  37. | LazyPrint of int option - * int option - * bool option - * (Device.A.elt -> string) option
  38. | Abs
  39. | Neg
  40. | Floor
  41. | Ceil
  42. | Round
  43. | Sqr
  44. | Sqrt
  45. | Log
  46. | Log2
  47. | Log10
  48. | Exp
  49. | Sin
  50. | Cos
  51. | Tan
  52. | Sinh
  53. | Cosh
  54. | Tanh
  55. | Asin
  56. | Acos
  57. | Atan
  58. | Asinh
  59. | Acosh
  60. | Atanh
  61. | Min of bool * int
  62. | Max of bool * int
  63. | Sum of bool * int
  64. | SumReduce of int array
  65. | Signum
  66. | Sigmoid
  67. | Relu
  68. | Dawsn
  69. | Min'
  70. | Max'
  71. | Sum'
  72. | LogSumExp'
  73. | LogSumExp of bool * int
  74. | L1norm'
  75. | L2norm'
  76. | L2NormSqr'
  77. | ClipByValue
  78. | ClipByL2norm
  79. | Pow
  80. | ScalarPow
  81. | PowScalar
  82. | Atan2
  83. | ScalarAtan2
  84. | Atan2Scalar
  85. | Hypot
  86. | Min2
  87. | Max2
  88. | Add
  89. | Sub
  90. | Mul
  91. | Div
  92. | AddScalar
  93. | SubScalar
  94. | MulScalar
  95. | DivScalar
  96. | ScalarAdd
  97. | ScalarSub
  98. | ScalarMul
  99. | ScalarDiv
  100. | FMA
  101. | EltEqual
  102. | EltNotEqual
  103. | EltLess
  104. | EltGreater
  105. | EltLessEqual
  106. | EltGreaterEqual
  107. | EltEqualScalar
  108. | EltNotEqualScalar
  109. | EltLessScalar
  110. | EltGreaterScalar
  111. | EltLessEqualScalar
  112. | EltGreaterEqualScalar
  113. | Conv1d of Owl_types_common.padding * int array
  114. | Conv2d of Owl_types_common.padding * int array
  115. | Conv3d of Owl_types_common.padding * int array
  116. | TransposeConv1d of Owl_types_common.padding * int array
  117. | TransposeConv2d of Owl_types_common.padding * int array
  118. | TransposeConv3d of Owl_types_common.padding * int array
  119. | DilatedConv1d of Owl_types_common.padding * int array * int array
  120. | DilatedConv2d of Owl_types_common.padding * int array * int array
  121. | DilatedConv3d of Owl_types_common.padding * int array * int array
  122. | MaxPool1d of Owl_types_common.padding * int array * int array
  123. | MaxPool2d of Owl_types_common.padding * int array * int array
  124. | MaxPool3d of Owl_types_common.padding * int array * int array
  125. | AvgPool1d of Owl_types_common.padding * int array * int array
  126. | AvgPool2d of Owl_types_common.padding * int array * int array
  127. | AvgPool3d of Owl_types_common.padding * int array * int array
  128. | UpSampling2d of int array
  129. | Conv1dBackwardInput of int array
  130. | Conv1dBackwardKernel of int array
  131. | Conv2dBackwardInput of int array
  132. | Conv2dBackwardKernel of int array
  133. | Conv3dBackwardInput of int array
  134. | Conv3dBackwardKernel of int array
  135. | TransposeConv1dBackwardInput of int array
  136. | TransposeConv1dBackwardKernel of int array
  137. | TransposeConv2dBackwardInput of int array
  138. | TransposeConv2dBackwardKernel of int array
  139. | TransposeConv3dBackwardInput of int array
  140. | TransposeConv3dBackwardKernel of int array
  141. | DilatedConv1dBackwardInput of int array * int array
  142. | DilatedConv1dBackwardKernel of int array * int array
  143. | DilatedConv2dBackwardInput of int array * int array
  144. | DilatedConv2dBackwardKernel of int array * int array
  145. | DilatedConv3dBackwardInput of int array * int array
  146. | DilatedConv3dBackwardKernel of int array * int array
  147. | MaxPool1dBackward of Owl_types_common.padding * int array * int array
  148. | MaxPool2dBackward of Owl_types_common.padding * int array * int array
  149. | MaxPool3dBackward of Owl_types_common.padding * int array * int array
  150. | AvgPool1dBackward of Owl_types_common.padding * int array * int array
  151. | AvgPool2dBackward of Owl_types_common.padding * int array * int array
  152. | AvgPool3dBackward of Owl_types_common.padding * int array * int array
  153. | UpSampling2dBackward of int array
  154. | RowNum
  155. | ColNum
  156. | Row
  157. | Rows of int array
  158. | CopyRowTo
  159. | CopyColTo
  160. | Dot of bool * bool * elt * elt
  161. | Inv
  162. | Trace
  163. | Transpose of int array
  164. | ToRows
  165. | OfRows
  166. | Scalar_Add
  167. | Scalar_Sub
  168. | Scalar_Mul
  169. | Scalar_Div
  170. | Scalar_Pow
  171. | Scalar_Atan2
  172. | Scalar_Abs
  173. | Scalar_Neg
  174. | Scalar_Sqr
  175. | Scalar_Sqrt
  176. | Scalar_Exp
  177. | Scalar_Log
  178. | Scalar_Log2
  179. | Scalar_Log10
  180. | Scalar_Signum
  181. | Scalar_Floor
  182. | Scalar_Ceil
  183. | Scalar_Round
  184. | Scalar_Sin
  185. | Scalar_Cos
  186. | Scalar_Tan
  187. | Scalar_Sinh
  188. | Scalar_Cosh
  189. | Scalar_Tanh
  190. | Scalar_Asin
  191. | Scalar_Acos
  192. | Scalar_Atan
  193. | Scalar_Asinh
  194. | Scalar_Acosh
  195. | Scalar_Atanh
  196. | Scalar_Relu
  197. | Scalar_Dawsn
  198. | Scalar_Sigmoid
  199. | Fused_Adagrad of float * float
    (*

    TODO

    *)
\ No newline at end of file diff --git a/owl-base/Owl_computation_shape/Make/index.html b/owl-base/Owl_computation_shape/Make/index.html deleted file mode 100644 index 423ec380c..000000000 --- a/owl-base/Owl_computation_shape/Make/index.html +++ /dev/null @@ -1,106 +0,0 @@ - -Make (owl-base.Owl_computation_shape.Make)

Module Owl_computation_shape.Make

Parameters

Signature

module Type = Type
val _infer_shape_00 : 'a -> 'b array option array
val _infer_shape_01 : 'a array option array array -> 'a array option array
val _infer_shape_02 : 'a array option array array -> 'a array option array
val _infer_shape_03 : int array option array array -> int array option array
val _infer_shape_04 : - int array option array array -> - int -> - int array option array
val _infer_shape_05 : - int array option array array -> - int array -> - int array option array
val _infer_shape_06 : - int array option array array -> - int array -> - int array option array
val _infer_shape_07 : - int array option array array -> - int -> - int array option array
val _infer_shape_08 : - int array option array array -> - int -> - int array -> - int array option array
val _infer_shape_09 : - 'a array option array array -> - int -> - 'b -> - 'a array option array
val _infer_shape_10 : - int array option array array -> - int array -> - int array option array
val _infer_shape_11 : - int array option array array -> - Owl_types.padding -> - int array -> - int array option array
val _infer_shape_12 : - int array option array array -> - Owl_types.padding -> - int array -> - int array option array
val _infer_shape_13 : - int array option array array -> - Owl_types.padding -> - int array -> - int array option array
val _infer_shape_14 : - int array option array array -> - Owl_types.padding -> - int array -> - int array option array
val _infer_shape_15 : - int array option array array -> - Owl_types.padding -> - int array -> - int array -> - int array option array
val _infer_shape_16 : - int array option array array -> - Owl_types.padding -> - int array -> - int array -> - int array option array
val _infer_shape_17 : - int array option array array -> - Owl_types.padding -> - int array -> - int array -> - int array option array
val _infer_shape_18 : - 'a array option array array -> - int array -> - 'a array option array
val _infer_shape_19 : 'a array option array array -> 'a array option array
val _infer_shape_20 : - int array option array array -> - int list list -> - int array option array
val _infer_shape_21 : - int array option array array -> - Owl_types.padding -> - int array -> - int array -> - int array option array
val _infer_shape_22 : - 'a array option array array -> - 'b -> - 'a array option array
val _infer_shape_23 : int array option array array -> int array option array
val _infer_shape_24 : - int array option array array -> - Owl_types.padding -> - int array -> - int array option array
val _infer_shape_25 : - int array option array array -> - Owl_types.padding -> - int array -> - int array option array
val _infer_shape_26 : - int array option array array -> - Owl_types.padding -> - int array -> - int array -> - int array option array
val _infer_shape_27 : - int array option array array -> - Owl_types.padding -> - int array -> - int array -> - int array option array
val _infer_shape_28 : - int array option array array -> - Owl_types.padding -> - int array -> - int array -> - int array option array
val _infer_shape_29 : - int array option array array -> - int array -> - int array option array
val _infer_shape_30 : - int array option array array -> - int list list -> - int array option array
val _infer_shape_31 : - bool -> - int array option array array -> - int -> - int array option array
val infer_shape : - Type.op -> - Type.attr Owl_graph.node array -> - int array option array
\ No newline at end of file diff --git a/owl-base/Owl_computation_shape_sig/.dummy b/owl-base/Owl_computation_shape_sig/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl-base/Owl_computation_shape_sig/module-type-Sig/Type/Device/A/Linalg/index.html b/owl-base/Owl_computation_shape_sig/module-type-Sig/Type/Device/A/Linalg/index.html deleted file mode 100644 index 6e69356e3..000000000 --- a/owl-base/Owl_computation_shape_sig/module-type-Sig/Type/Device/A/Linalg/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -Linalg (owl-base.Owl_computation_shape_sig.Sig.Type.Device.A.Linalg)

Module A.Linalg

val inv : arr -> arr
val logdet : arr -> elt
val chol : ?upper:bool -> arr -> arr
val svd : ?thin:bool -> arr -> arr * arr * arr
val qr : arr -> arr * arr
val lq : arr -> arr * arr
val sylvester : arr -> arr -> arr -> arr
val lyapunov : arr -> arr -> arr
val discrete_lyapunov : - ?solver:[ `default | `bilinear | `direct ] -> - arr -> - arr -> - arr
val linsolve : ?trans:bool -> ?typ:[ `n | `u | `l ] -> arr -> arr -> arr
val care : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
val dare : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
\ No newline at end of file diff --git a/owl-base/Owl_computation_shape_sig/module-type-Sig/Type/Device/A/Mat/index.html b/owl-base/Owl_computation_shape_sig/module-type-Sig/Type/Device/A/Mat/index.html deleted file mode 100644 index d25a6a07e..000000000 --- a/owl-base/Owl_computation_shape_sig/module-type-Sig/Type/Device/A/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl-base.Owl_computation_shape_sig.Sig.Type.Device.A.Mat)

Module A.Mat

val diagm : ?k:int -> arr -> arr
val triu : ?k:int -> arr -> arr
val tril : ?k:int -> arr -> arr
val eye : int -> arr
\ No newline at end of file diff --git a/owl-base/Owl_computation_shape_sig/module-type-Sig/Type/Device/A/Scalar/index.html b/owl-base/Owl_computation_shape_sig/module-type-Sig/Type/Device/A/Scalar/index.html deleted file mode 100644 index 4f535785e..000000000 --- a/owl-base/Owl_computation_shape_sig/module-type-Sig/Type/Device/A/Scalar/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Scalar (owl-base.Owl_computation_shape_sig.Sig.Type.Device.A.Scalar)

Module A.Scalar

val add : elt -> elt -> elt
val sub : elt -> elt -> elt
val mul : elt -> elt -> elt
val div : elt -> elt -> elt
val pow : elt -> elt -> elt
val atan2 : elt -> elt -> elt
val abs : elt -> elt
val neg : elt -> elt
val sqr : elt -> elt
val sqrt : elt -> elt
val exp : elt -> elt
val log : elt -> elt
val log2 : elt -> elt
val log10 : elt -> elt
val signum : elt -> elt
val floor : elt -> elt
val ceil : elt -> elt
val round : elt -> elt
val sin : elt -> elt
val cos : elt -> elt
val tan : elt -> elt
val sinh : elt -> elt
val cosh : elt -> elt
val tanh : elt -> elt
val asin : elt -> elt
val acos : elt -> elt
val atan : elt -> elt
val asinh : elt -> elt
val acosh : elt -> elt
val atanh : elt -> elt
val relu : elt -> elt
val dawsn : elt -> elt
val sigmoid : elt -> elt
\ No newline at end of file diff --git a/owl-base/Owl_computation_shape_sig/module-type-Sig/Type/Device/A/index.html b/owl-base/Owl_computation_shape_sig/module-type-Sig/Type/Device/A/index.html deleted file mode 100644 index 0fa55a3ec..000000000 --- a/owl-base/Owl_computation_shape_sig/module-type-Sig/Type/Device/A/index.html +++ /dev/null @@ -1,379 +0,0 @@ - -A (owl-base.Owl_computation_shape_sig.Sig.Type.Device.A)

Module Device.A

include Owl_types_ndarray_algodiff.Sig
include Owl_types_ndarray_eltcmp.Sig
include Owl_types_ndarray_basic.Sig
type arr
type elt
val empty : int array -> arr
val zeros : int array -> arr
val ones : int array -> arr
val create : int array -> elt -> arr
val sequential : ?a:elt -> ?step:elt -> int array -> arr
val uniform : ?a:elt -> ?b:elt -> int array -> arr
val gaussian : ?mu:elt -> ?sigma:elt -> int array -> arr
val bernoulli : ?p:elt -> int array -> arr
val init : int array -> (int -> elt) -> arr
val init_nd : int array -> (int array -> elt) -> arr
val shape : arr -> int array
val numel : arr -> int
val get : arr -> int array -> elt
val set : arr -> int array -> elt -> unit
val get_slice : int list list -> arr -> arr
val set_slice : int list list -> arr -> arr -> unit
val get_fancy : Owl_types_common.index list -> arr -> arr
val set_fancy : Owl_types_common.index list -> arr -> arr -> unit
val copy : arr -> arr
val copy_ : out:arr -> arr -> unit
val reset : arr -> unit
val reshape : arr -> int array -> arr
val reverse : arr -> arr
val tile : arr -> int array -> arr
val repeat : arr -> int array -> arr
val concatenate : ?axis:int -> arr array -> arr
val stack : ?axis:int -> arr array -> arr
val split : ?axis:int -> int array -> arr -> arr array
val expand : ?hi:bool -> arr -> int -> arr
val squeeze : ?axis:int array -> arr -> arr
val draw : ?axis:int -> arr -> int -> arr * int array
val map : (elt -> elt) -> arr -> arr
val fold : ?axis:int -> (elt -> elt -> elt) -> elt -> arr -> arr
val scan : ?axis:int -> (elt -> elt -> elt) -> arr -> arr
val one_hot : int -> arr -> arr
val pad : ?v:elt -> int list list -> arr -> arr
val print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(elt -> string) -> - arr -> - unit
val abs : arr -> arr
val neg : arr -> arr
val floor : arr -> arr
val ceil : arr -> arr
val round : arr -> arr
val sqr : arr -> arr
val sqrt : arr -> arr
val log : arr -> arr
val log2 : arr -> arr
val log10 : arr -> arr
val exp : arr -> arr
val sin : arr -> arr
val cos : arr -> arr
val tan : arr -> arr
val sinh : arr -> arr
val cosh : arr -> arr
val tanh : arr -> arr
val asin : arr -> arr
val acos : arr -> arr
val atan : arr -> arr
val asinh : arr -> arr
val acosh : arr -> arr
val atanh : arr -> arr
val sum_reduce : ?axis:int array -> arr -> arr
val signum : arr -> arr
val sigmoid : arr -> arr
val relu : arr -> arr
val dawsn : arr -> arr
val min' : arr -> elt
val max' : arr -> elt
val sum' : arr -> elt
val log_sum_exp' : arr -> elt
val log_sum_exp : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm' : arr -> elt
val l2norm' : arr -> elt
val l2norm_sqr' : arr -> elt
val clip_by_value : ?amin:elt -> ?amax:elt -> arr -> arr
val clip_by_l2norm : elt -> arr -> arr
val pow : arr -> arr -> arr
val scalar_pow : elt -> arr -> arr
val pow_scalar : arr -> elt -> arr
val atan2 : arr -> arr -> arr
val scalar_atan2 : elt -> arr -> arr
val atan2_scalar : arr -> elt -> arr
val add : arr -> arr -> arr
val sub : arr -> arr -> arr
val mul : arr -> arr -> arr
val div : arr -> arr -> arr
val add_scalar : arr -> elt -> arr
val sub_scalar : arr -> elt -> arr
val mul_scalar : arr -> elt -> arr
val div_scalar : arr -> elt -> arr
val scalar_add : elt -> arr -> arr
val scalar_sub : elt -> arr -> arr
val scalar_mul : elt -> arr -> arr
val scalar_div : elt -> arr -> arr
val fma : arr -> arr -> arr -> arr
val conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val dilated_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val transpose_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val max_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val upsampling2d : arr -> int array -> arr
val conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val dilated_conv1d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv1d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val transpose_conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val max_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val upsampling2d_backward : arr -> int array -> arr -> arr
val row_num : arr -> int
val col_num : arr -> int
val row : arr -> int -> arr
val rows : arr -> int array -> arr
val copy_row_to : arr -> arr -> int -> unit
val copy_col_to : arr -> arr -> int -> unit
val dot : arr -> arr -> arr
val diag : ?k:int -> arr -> arr
val trace : arr -> elt
val transpose : ?axis:int array -> arr -> arr
val to_rows : arr -> arr array
val of_rows : arr array -> arr
val to_cols : arr -> arr array
val of_cols : arr array -> arr
val of_array : elt array -> int array -> arr
val of_arrays : elt array array -> arr
val float_to_elt : float -> elt
val elt_to_float : elt -> float
val elt_equal : arr -> arr -> arr
val elt_not_equal : arr -> arr -> arr
val elt_less : arr -> arr -> arr
val elt_greater : arr -> arr -> arr
val elt_less_equal : arr -> arr -> arr
val elt_greater_equal : arr -> arr -> arr
val elt_equal_scalar : arr -> elt -> arr
val elt_not_equal_scalar : arr -> elt -> arr
val elt_less_scalar : arr -> elt -> arr
val elt_greater_scalar : arr -> elt -> arr
val elt_less_equal_scalar : arr -> elt -> arr
val elt_greater_equal_scalar : arr -> elt -> arr
module Scalar : sig ... end
module Mat : sig ... end
module Linalg : sig ... end
val create_ : out:arr -> elt -> unit
val uniform_ : ?a:elt -> ?b:elt -> out:arr -> unit
val gaussian_ : ?mu:elt -> ?sigma:elt -> out:arr -> unit
val sequential_ : ?a:elt -> ?step:elt -> out:arr -> unit
val bernoulli_ : ?p:elt -> out:arr -> unit
val zeros_ : out:arr -> unit
val ones_ : out:arr -> unit
val one_hot_ : out:arr -> int -> arr -> unit
val get_slice_ : out:arr -> int list list -> arr -> unit
val set_slice_ : out:arr -> int list list -> arr -> arr -> unit
val sub_left : arr -> int -> int -> arr
val reshape_ : out:arr -> arr -> unit
val reverse_ : out:arr -> arr -> unit
val transpose_ : out:arr -> ?axis:int array -> arr -> unit
val repeat_ : out:arr -> arr -> int array -> unit
val tile_ : out:arr -> arr -> int array -> unit
val pad_ : out:arr -> ?v:elt -> int list list -> arr -> unit
val hypot : arr -> arr -> arr
val fmod : arr -> arr -> arr
val min2 : arr -> arr -> arr
val max2 : arr -> arr -> arr
val add_ : ?out:arr -> arr -> arr -> unit
val sub_ : ?out:arr -> arr -> arr -> unit
val mul_ : ?out:arr -> arr -> arr -> unit
val div_ : ?out:arr -> arr -> arr -> unit
val pow_ : ?out:arr -> arr -> arr -> unit
val atan2_ : ?out:arr -> arr -> arr -> unit
val hypot_ : ?out:arr -> arr -> arr -> unit
val fmod_ : ?out:arr -> arr -> arr -> unit
val min2_ : ?out:arr -> arr -> arr -> unit
val max2_ : ?out:arr -> arr -> arr -> unit
val add_scalar_ : ?out:arr -> arr -> elt -> unit
val sub_scalar_ : ?out:arr -> arr -> elt -> unit
val mul_scalar_ : ?out:arr -> arr -> elt -> unit
val div_scalar_ : ?out:arr -> arr -> elt -> unit
val pow_scalar_ : ?out:arr -> arr -> elt -> unit
val atan2_scalar_ : ?out:arr -> arr -> elt -> unit
val fmod_scalar_ : ?out:arr -> arr -> elt -> unit
val scalar_add_ : ?out:arr -> elt -> arr -> unit
val scalar_sub_ : ?out:arr -> elt -> arr -> unit
val scalar_mul_ : ?out:arr -> elt -> arr -> unit
val scalar_div_ : ?out:arr -> elt -> arr -> unit
val scalar_pow_ : ?out:arr -> elt -> arr -> unit
val scalar_atan2_ : ?out:arr -> elt -> arr -> unit
val scalar_fmod_ : ?out:arr -> elt -> arr -> unit
val fma_ : ?out:arr -> arr -> arr -> arr -> unit
val clip_by_value_ : ?out:arr -> ?amin:elt -> ?amax:elt -> arr -> unit
val clip_by_l2norm_ : ?out:arr -> elt -> arr -> unit
val dot_ : - ?transa:bool -> - ?transb:bool -> - ?alpha:elt -> - ?beta:elt -> - c:arr -> - arr -> - arr -> - unit
val abs_ : ?out:arr -> arr -> unit
val neg_ : ?out:arr -> arr -> unit
val conj_ : ?out:arr -> arr -> unit
val reci_ : ?out:arr -> arr -> unit
val signum_ : ?out:arr -> arr -> unit
val sqr_ : ?out:arr -> arr -> unit
val sqrt_ : ?out:arr -> arr -> unit
val cbrt_ : ?out:arr -> arr -> unit
val exp_ : ?out:arr -> arr -> unit
val exp2_ : ?out:arr -> arr -> unit
val exp10_ : ?out:arr -> arr -> unit
val expm1_ : ?out:arr -> arr -> unit
val log_ : ?out:arr -> arr -> unit
val log2_ : ?out:arr -> arr -> unit
val log10_ : ?out:arr -> arr -> unit
val log1p_ : ?out:arr -> arr -> unit
val sin_ : ?out:arr -> arr -> unit
val cos_ : ?out:arr -> arr -> unit
val tan_ : ?out:arr -> arr -> unit
val asin_ : ?out:arr -> arr -> unit
val acos_ : ?out:arr -> arr -> unit
val atan_ : ?out:arr -> arr -> unit
val sinh_ : ?out:arr -> arr -> unit
val cosh_ : ?out:arr -> arr -> unit
val tanh_ : ?out:arr -> arr -> unit
val asinh_ : ?out:arr -> arr -> unit
val acosh_ : ?out:arr -> arr -> unit
val atanh_ : ?out:arr -> arr -> unit
val floor_ : ?out:arr -> arr -> unit
val ceil_ : ?out:arr -> arr -> unit
val round_ : ?out:arr -> arr -> unit
val trunc_ : ?out:arr -> arr -> unit
val fix_ : ?out:arr -> arr -> unit
val erf_ : ?out:arr -> arr -> unit
val erfc_ : ?out:arr -> arr -> unit
val relu_ : ?out:arr -> arr -> unit
val softplus_ : ?out:arr -> arr -> unit
val softsign_ : ?out:arr -> arr -> unit
val softmax_ : ?out:arr -> ?axis:int -> arr -> unit
val sigmoid_ : ?out:arr -> arr -> unit
val sum_ : out:arr -> axis:int -> arr -> unit
val min_ : out:arr -> axis:int -> arr -> unit
val max_ : out:arr -> axis:int -> arr -> unit
val sum : ?axis:int -> ?keep_dims:bool -> arr -> arr
val prod : ?axis:int -> ?keep_dims:bool -> arr -> arr
val min : ?axis:int -> ?keep_dims:bool -> arr -> arr
val max : ?axis:int -> ?keep_dims:bool -> arr -> arr
val mean : ?axis:int -> ?keep_dims:bool -> arr -> arr
val var : ?axis:int -> ?keep_dims:bool -> arr -> arr
val std : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l2norm : ?axis:int -> ?keep_dims:bool -> arr -> arr
val cumsum_ : ?out:arr -> ?axis:int -> arr -> unit
val cumprod_ : ?out:arr -> ?axis:int -> arr -> unit
val cummin_ : ?out:arr -> ?axis:int -> arr -> unit
val cummax_ : ?out:arr -> ?axis:int -> arr -> unit
val dropout_ : ?out:arr -> ?rate:float -> arr -> unit
val prod' : arr -> elt
val mean' : arr -> elt
val var' : arr -> elt
val std' : arr -> elt
val elt_equal_ : ?out:arr -> arr -> arr -> unit
val elt_not_equal_ : ?out:arr -> arr -> arr -> unit
val elt_less_ : ?out:arr -> arr -> arr -> unit
val elt_greater_ : ?out:arr -> arr -> arr -> unit
val elt_less_equal_ : ?out:arr -> arr -> arr -> unit
val elt_greater_equal_ : ?out:arr -> arr -> arr -> unit
val elt_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_not_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_less_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_greater_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_less_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_greater_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val conv1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val conv2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val conv3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val dilated_conv1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val dilated_conv2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val dilated_conv3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val transpose_conv1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val transpose_conv2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val transpose_conv3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val max_pool1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val max_pool2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val max_pool3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val upsampling2d_ : out:arr -> arr -> int array -> unit
val conv1d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv1d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv2d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv2d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv3d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv3d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val dilated_conv1d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv1d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv2d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv2d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv3d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv3d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val transpose_conv1d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv1d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv2d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv2d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv3d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv3d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val max_pool1d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val max_pool2d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val max_pool3d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool1d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool2d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool3d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val upsampling2d_backward_ : out:arr -> arr -> int array -> arr -> unit
val fused_adagrad_ : ?out:arr -> rate:float -> eps:float -> arr -> unit
\ No newline at end of file diff --git a/owl-base/Owl_computation_shape_sig/module-type-Sig/Type/Device/index.html b/owl-base/Owl_computation_shape_sig/module-type-Sig/Type/Device/index.html deleted file mode 100644 index 1f35a3da3..000000000 --- a/owl-base/Owl_computation_shape_sig/module-type-Sig/Type/Device/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Device (owl-base.Owl_computation_shape_sig.Sig.Type.Device)

Module Type.Device

Type definition
type device

TODO

type value

TODO

Core functions
val make_device : unit -> device

TODO

val arr_to_value : A.arr -> value

TODO

val value_to_arr : value -> A.arr

TODO

val elt_to_value : A.elt -> value

TODO

val value_to_elt : value -> A.elt

TODO

val value_to_float : value -> float

TODO

val is_arr : value -> bool

TODO

val is_elt : value -> bool

TODO

\ No newline at end of file diff --git a/owl-base/Owl_computation_shape_sig/module-type-Sig/Type/index.html b/owl-base/Owl_computation_shape_sig/module-type-Sig/Type/index.html deleted file mode 100644 index da3288e63..000000000 --- a/owl-base/Owl_computation_shape_sig/module-type-Sig/Type/index.html +++ /dev/null @@ -1,5 +0,0 @@ - -Type (owl-base.Owl_computation_shape_sig.Sig.Type)

Module Sig.Type

Type definition
type state =
  1. | Valid
  2. | Invalid
    (*

    TODO

    *)

TODO

and block = {
  1. size : int;
  2. block_id : int;
  3. mutable active : t option;
  4. mutable memory : Device.value;
  5. mutable nodes : t list;
}

block type keeps a reference to a block of memory and to the nodes sharing that block.

and attr = {
  1. mutable op : op;
  2. mutable freeze : bool;
  3. mutable reuse : bool;
  4. mutable state : state;
  5. mutable shape : int array option array;
  6. mutable value : Device.value array;
  7. mutable block : block array option;
}

TODO

and arr =
  1. | Arr of t
and elt =
  1. | Elt of t
and op =
  1. | Noop
  2. | Var
  3. | Const
  4. | Empty of int array
  5. | Zeros of int array
  6. | Ones of int array
  7. | Create of int array
  8. | Sequential of int array
  9. | Uniform of int array
  10. | Gaussian of int array
  11. | Bernoulli of int array
  12. | Init of int array * int -> elt
  13. | Get of int array
  14. | Set of int array
  15. | GetSlice of int list list
  16. | SetSlice of int list list
  17. | GetFancy of Owl_types_common.index list
  18. | SetFancy of Owl_types_common.index list
  19. | Copy
  20. | Reset
  21. | Reshape of int array
  22. | Reverse
  23. | Tile of int array
  24. | Repeat of int array
  25. | Pad of elt * int list list
  26. | Concatenate of int
  27. | Stack of int
  28. | Split of int * int array
  29. | Draw of int * int
  30. | Map of elt -> elt
  31. | Fold of int * elt -> elt -> elt
  32. | Scan of int * elt -> elt -> elt
  33. | OneHot of int
  34. | OfArray of int array
  35. | Delay of Device.A.arr -> Device.A.arr
  36. | DelayArray of int array * Device.A.arr array -> Device.A.arr
  37. | LazyPrint of int option - * int option - * bool option - * (Device.A.elt -> string) option
  38. | Abs
  39. | Neg
  40. | Floor
  41. | Ceil
  42. | Round
  43. | Sqr
  44. | Sqrt
  45. | Log
  46. | Log2
  47. | Log10
  48. | Exp
  49. | Sin
  50. | Cos
  51. | Tan
  52. | Sinh
  53. | Cosh
  54. | Tanh
  55. | Asin
  56. | Acos
  57. | Atan
  58. | Asinh
  59. | Acosh
  60. | Atanh
  61. | Min of bool * int
  62. | Max of bool * int
  63. | Sum of bool * int
  64. | SumReduce of int array
  65. | Signum
  66. | Sigmoid
  67. | Relu
  68. | Dawsn
  69. | Min'
  70. | Max'
  71. | Sum'
  72. | LogSumExp'
  73. | LogSumExp of bool * int
  74. | L1norm'
  75. | L2norm'
  76. | L2NormSqr'
  77. | ClipByValue
  78. | ClipByL2norm
  79. | Pow
  80. | ScalarPow
  81. | PowScalar
  82. | Atan2
  83. | ScalarAtan2
  84. | Atan2Scalar
  85. | Hypot
  86. | Min2
  87. | Max2
  88. | Add
  89. | Sub
  90. | Mul
  91. | Div
  92. | AddScalar
  93. | SubScalar
  94. | MulScalar
  95. | DivScalar
  96. | ScalarAdd
  97. | ScalarSub
  98. | ScalarMul
  99. | ScalarDiv
  100. | FMA
  101. | EltEqual
  102. | EltNotEqual
  103. | EltLess
  104. | EltGreater
  105. | EltLessEqual
  106. | EltGreaterEqual
  107. | EltEqualScalar
  108. | EltNotEqualScalar
  109. | EltLessScalar
  110. | EltGreaterScalar
  111. | EltLessEqualScalar
  112. | EltGreaterEqualScalar
  113. | Conv1d of Owl_types_common.padding * int array
  114. | Conv2d of Owl_types_common.padding * int array
  115. | Conv3d of Owl_types_common.padding * int array
  116. | TransposeConv1d of Owl_types_common.padding * int array
  117. | TransposeConv2d of Owl_types_common.padding * int array
  118. | TransposeConv3d of Owl_types_common.padding * int array
  119. | DilatedConv1d of Owl_types_common.padding * int array * int array
  120. | DilatedConv2d of Owl_types_common.padding * int array * int array
  121. | DilatedConv3d of Owl_types_common.padding * int array * int array
  122. | MaxPool1d of Owl_types_common.padding * int array * int array
  123. | MaxPool2d of Owl_types_common.padding * int array * int array
  124. | MaxPool3d of Owl_types_common.padding * int array * int array
  125. | AvgPool1d of Owl_types_common.padding * int array * int array
  126. | AvgPool2d of Owl_types_common.padding * int array * int array
  127. | AvgPool3d of Owl_types_common.padding * int array * int array
  128. | UpSampling2d of int array
  129. | Conv1dBackwardInput of int array
  130. | Conv1dBackwardKernel of int array
  131. | Conv2dBackwardInput of int array
  132. | Conv2dBackwardKernel of int array
  133. | Conv3dBackwardInput of int array
  134. | Conv3dBackwardKernel of int array
  135. | TransposeConv1dBackwardInput of int array
  136. | TransposeConv1dBackwardKernel of int array
  137. | TransposeConv2dBackwardInput of int array
  138. | TransposeConv2dBackwardKernel of int array
  139. | TransposeConv3dBackwardInput of int array
  140. | TransposeConv3dBackwardKernel of int array
  141. | DilatedConv1dBackwardInput of int array * int array
  142. | DilatedConv1dBackwardKernel of int array * int array
  143. | DilatedConv2dBackwardInput of int array * int array
  144. | DilatedConv2dBackwardKernel of int array * int array
  145. | DilatedConv3dBackwardInput of int array * int array
  146. | DilatedConv3dBackwardKernel of int array * int array
  147. | MaxPool1dBackward of Owl_types_common.padding * int array * int array
  148. | MaxPool2dBackward of Owl_types_common.padding * int array * int array
  149. | MaxPool3dBackward of Owl_types_common.padding * int array * int array
  150. | AvgPool1dBackward of Owl_types_common.padding * int array * int array
  151. | AvgPool2dBackward of Owl_types_common.padding * int array * int array
  152. | AvgPool3dBackward of Owl_types_common.padding * int array * int array
  153. | UpSampling2dBackward of int array
  154. | RowNum
  155. | ColNum
  156. | Row
  157. | Rows of int array
  158. | CopyRowTo
  159. | CopyColTo
  160. | Dot of bool * bool * elt * elt
  161. | Inv
  162. | Trace
  163. | Transpose of int array
  164. | ToRows
  165. | OfRows
  166. | Scalar_Add
  167. | Scalar_Sub
  168. | Scalar_Mul
  169. | Scalar_Div
  170. | Scalar_Pow
  171. | Scalar_Atan2
  172. | Scalar_Abs
  173. | Scalar_Neg
  174. | Scalar_Sqr
  175. | Scalar_Sqrt
  176. | Scalar_Exp
  177. | Scalar_Log
  178. | Scalar_Log2
  179. | Scalar_Log10
  180. | Scalar_Signum
  181. | Scalar_Floor
  182. | Scalar_Ceil
  183. | Scalar_Round
  184. | Scalar_Sin
  185. | Scalar_Cos
  186. | Scalar_Tan
  187. | Scalar_Sinh
  188. | Scalar_Cosh
  189. | Scalar_Tanh
  190. | Scalar_Asin
  191. | Scalar_Acos
  192. | Scalar_Atan
  193. | Scalar_Asinh
  194. | Scalar_Acosh
  195. | Scalar_Atanh
  196. | Scalar_Relu
  197. | Scalar_Dawsn
  198. | Scalar_Sigmoid
  199. | Fused_Adagrad of float * float
    (*

    TODO

    *)
\ No newline at end of file diff --git a/owl-base/Owl_computation_shape_sig/module-type-Sig/index.html b/owl-base/Owl_computation_shape_sig/module-type-Sig/index.html deleted file mode 100644 index 1db92ef00..000000000 --- a/owl-base/Owl_computation_shape_sig/module-type-Sig/index.html +++ /dev/null @@ -1,5 +0,0 @@ - -Sig (owl-base.Owl_computation_shape_sig.Sig)

Module type Owl_computation_shape_sig.Sig

Core functions
val infer_shape : - Type.op -> - Type.attr Owl_graph.node array -> - int array option array

TODO

\ No newline at end of file diff --git a/owl-base/Owl_computation_symbol/.dummy b/owl-base/Owl_computation_symbol/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl-base/Owl_computation_symbol/Make/argument-1-Shape/Type/Device/A/Linalg/index.html b/owl-base/Owl_computation_symbol/Make/argument-1-Shape/Type/Device/A/Linalg/index.html deleted file mode 100644 index aee205e81..000000000 --- a/owl-base/Owl_computation_symbol/Make/argument-1-Shape/Type/Device/A/Linalg/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -Linalg (owl-base.Owl_computation_symbol.Make.Shape.Type.Device.A.Linalg)

Module A.Linalg

val inv : arr -> arr
val logdet : arr -> elt
val chol : ?upper:bool -> arr -> arr
val svd : ?thin:bool -> arr -> arr * arr * arr
val qr : arr -> arr * arr
val lq : arr -> arr * arr
val sylvester : arr -> arr -> arr -> arr
val lyapunov : arr -> arr -> arr
val discrete_lyapunov : - ?solver:[ `default | `bilinear | `direct ] -> - arr -> - arr -> - arr
val linsolve : ?trans:bool -> ?typ:[ `n | `u | `l ] -> arr -> arr -> arr
val care : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
val dare : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
\ No newline at end of file diff --git a/owl-base/Owl_computation_symbol/Make/argument-1-Shape/Type/Device/A/Mat/index.html b/owl-base/Owl_computation_symbol/Make/argument-1-Shape/Type/Device/A/Mat/index.html deleted file mode 100644 index 65d968ca3..000000000 --- a/owl-base/Owl_computation_symbol/Make/argument-1-Shape/Type/Device/A/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl-base.Owl_computation_symbol.Make.Shape.Type.Device.A.Mat)

Module A.Mat

val diagm : ?k:int -> arr -> arr
val triu : ?k:int -> arr -> arr
val tril : ?k:int -> arr -> arr
val eye : int -> arr
\ No newline at end of file diff --git a/owl-base/Owl_computation_symbol/Make/argument-1-Shape/Type/Device/A/Scalar/index.html b/owl-base/Owl_computation_symbol/Make/argument-1-Shape/Type/Device/A/Scalar/index.html deleted file mode 100644 index 7538461ca..000000000 --- a/owl-base/Owl_computation_symbol/Make/argument-1-Shape/Type/Device/A/Scalar/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Scalar (owl-base.Owl_computation_symbol.Make.Shape.Type.Device.A.Scalar)

Module A.Scalar

val add : elt -> elt -> elt
val sub : elt -> elt -> elt
val mul : elt -> elt -> elt
val div : elt -> elt -> elt
val pow : elt -> elt -> elt
val atan2 : elt -> elt -> elt
val abs : elt -> elt
val neg : elt -> elt
val sqr : elt -> elt
val sqrt : elt -> elt
val exp : elt -> elt
val log : elt -> elt
val log2 : elt -> elt
val log10 : elt -> elt
val signum : elt -> elt
val floor : elt -> elt
val ceil : elt -> elt
val round : elt -> elt
val sin : elt -> elt
val cos : elt -> elt
val tan : elt -> elt
val sinh : elt -> elt
val cosh : elt -> elt
val tanh : elt -> elt
val asin : elt -> elt
val acos : elt -> elt
val atan : elt -> elt
val asinh : elt -> elt
val acosh : elt -> elt
val atanh : elt -> elt
val relu : elt -> elt
val dawsn : elt -> elt
val sigmoid : elt -> elt
\ No newline at end of file diff --git a/owl-base/Owl_computation_symbol/Make/argument-1-Shape/Type/Device/A/index.html b/owl-base/Owl_computation_symbol/Make/argument-1-Shape/Type/Device/A/index.html deleted file mode 100644 index 6aeafc6ec..000000000 --- a/owl-base/Owl_computation_symbol/Make/argument-1-Shape/Type/Device/A/index.html +++ /dev/null @@ -1,379 +0,0 @@ - -A (owl-base.Owl_computation_symbol.Make.Shape.Type.Device.A)

Module Device.A

include Owl_types_ndarray_algodiff.Sig
include Owl_types_ndarray_eltcmp.Sig
include Owl_types_ndarray_basic.Sig
type arr
type elt
val empty : int array -> arr
val zeros : int array -> arr
val ones : int array -> arr
val create : int array -> elt -> arr
val sequential : ?a:elt -> ?step:elt -> int array -> arr
val uniform : ?a:elt -> ?b:elt -> int array -> arr
val gaussian : ?mu:elt -> ?sigma:elt -> int array -> arr
val bernoulli : ?p:elt -> int array -> arr
val init : int array -> (int -> elt) -> arr
val init_nd : int array -> (int array -> elt) -> arr
val shape : arr -> int array
val numel : arr -> int
val get : arr -> int array -> elt
val set : arr -> int array -> elt -> unit
val get_slice : int list list -> arr -> arr
val set_slice : int list list -> arr -> arr -> unit
val get_fancy : Owl_types_common.index list -> arr -> arr
val set_fancy : Owl_types_common.index list -> arr -> arr -> unit
val copy : arr -> arr
val copy_ : out:arr -> arr -> unit
val reset : arr -> unit
val reshape : arr -> int array -> arr
val reverse : arr -> arr
val tile : arr -> int array -> arr
val repeat : arr -> int array -> arr
val concatenate : ?axis:int -> arr array -> arr
val stack : ?axis:int -> arr array -> arr
val split : ?axis:int -> int array -> arr -> arr array
val expand : ?hi:bool -> arr -> int -> arr
val squeeze : ?axis:int array -> arr -> arr
val draw : ?axis:int -> arr -> int -> arr * int array
val map : (elt -> elt) -> arr -> arr
val fold : ?axis:int -> (elt -> elt -> elt) -> elt -> arr -> arr
val scan : ?axis:int -> (elt -> elt -> elt) -> arr -> arr
val one_hot : int -> arr -> arr
val pad : ?v:elt -> int list list -> arr -> arr
val print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(elt -> string) -> - arr -> - unit
val abs : arr -> arr
val neg : arr -> arr
val floor : arr -> arr
val ceil : arr -> arr
val round : arr -> arr
val sqr : arr -> arr
val sqrt : arr -> arr
val log : arr -> arr
val log2 : arr -> arr
val log10 : arr -> arr
val exp : arr -> arr
val sin : arr -> arr
val cos : arr -> arr
val tan : arr -> arr
val sinh : arr -> arr
val cosh : arr -> arr
val tanh : arr -> arr
val asin : arr -> arr
val acos : arr -> arr
val atan : arr -> arr
val asinh : arr -> arr
val acosh : arr -> arr
val atanh : arr -> arr
val sum_reduce : ?axis:int array -> arr -> arr
val signum : arr -> arr
val sigmoid : arr -> arr
val relu : arr -> arr
val dawsn : arr -> arr
val min' : arr -> elt
val max' : arr -> elt
val sum' : arr -> elt
val log_sum_exp' : arr -> elt
val log_sum_exp : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm' : arr -> elt
val l2norm' : arr -> elt
val l2norm_sqr' : arr -> elt
val clip_by_value : ?amin:elt -> ?amax:elt -> arr -> arr
val clip_by_l2norm : elt -> arr -> arr
val pow : arr -> arr -> arr
val scalar_pow : elt -> arr -> arr
val pow_scalar : arr -> elt -> arr
val atan2 : arr -> arr -> arr
val scalar_atan2 : elt -> arr -> arr
val atan2_scalar : arr -> elt -> arr
val add : arr -> arr -> arr
val sub : arr -> arr -> arr
val mul : arr -> arr -> arr
val div : arr -> arr -> arr
val add_scalar : arr -> elt -> arr
val sub_scalar : arr -> elt -> arr
val mul_scalar : arr -> elt -> arr
val div_scalar : arr -> elt -> arr
val scalar_add : elt -> arr -> arr
val scalar_sub : elt -> arr -> arr
val scalar_mul : elt -> arr -> arr
val scalar_div : elt -> arr -> arr
val fma : arr -> arr -> arr -> arr
val conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val dilated_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val transpose_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val max_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val upsampling2d : arr -> int array -> arr
val conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val dilated_conv1d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv1d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val transpose_conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val max_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val upsampling2d_backward : arr -> int array -> arr -> arr
val row_num : arr -> int
val col_num : arr -> int
val row : arr -> int -> arr
val rows : arr -> int array -> arr
val copy_row_to : arr -> arr -> int -> unit
val copy_col_to : arr -> arr -> int -> unit
val dot : arr -> arr -> arr
val diag : ?k:int -> arr -> arr
val trace : arr -> elt
val transpose : ?axis:int array -> arr -> arr
val to_rows : arr -> arr array
val of_rows : arr array -> arr
val to_cols : arr -> arr array
val of_cols : arr array -> arr
val of_array : elt array -> int array -> arr
val of_arrays : elt array array -> arr
val float_to_elt : float -> elt
val elt_to_float : elt -> float
val elt_equal : arr -> arr -> arr
val elt_not_equal : arr -> arr -> arr
val elt_less : arr -> arr -> arr
val elt_greater : arr -> arr -> arr
val elt_less_equal : arr -> arr -> arr
val elt_greater_equal : arr -> arr -> arr
val elt_equal_scalar : arr -> elt -> arr
val elt_not_equal_scalar : arr -> elt -> arr
val elt_less_scalar : arr -> elt -> arr
val elt_greater_scalar : arr -> elt -> arr
val elt_less_equal_scalar : arr -> elt -> arr
val elt_greater_equal_scalar : arr -> elt -> arr
module Scalar : sig ... end
module Mat : sig ... end
module Linalg : sig ... end
val create_ : out:arr -> elt -> unit
val uniform_ : ?a:elt -> ?b:elt -> out:arr -> unit
val gaussian_ : ?mu:elt -> ?sigma:elt -> out:arr -> unit
val sequential_ : ?a:elt -> ?step:elt -> out:arr -> unit
val bernoulli_ : ?p:elt -> out:arr -> unit
val zeros_ : out:arr -> unit
val ones_ : out:arr -> unit
val one_hot_ : out:arr -> int -> arr -> unit
val get_slice_ : out:arr -> int list list -> arr -> unit
val set_slice_ : out:arr -> int list list -> arr -> arr -> unit
val sub_left : arr -> int -> int -> arr
val reshape_ : out:arr -> arr -> unit
val reverse_ : out:arr -> arr -> unit
val transpose_ : out:arr -> ?axis:int array -> arr -> unit
val repeat_ : out:arr -> arr -> int array -> unit
val tile_ : out:arr -> arr -> int array -> unit
val pad_ : out:arr -> ?v:elt -> int list list -> arr -> unit
val hypot : arr -> arr -> arr
val fmod : arr -> arr -> arr
val min2 : arr -> arr -> arr
val max2 : arr -> arr -> arr
val add_ : ?out:arr -> arr -> arr -> unit
val sub_ : ?out:arr -> arr -> arr -> unit
val mul_ : ?out:arr -> arr -> arr -> unit
val div_ : ?out:arr -> arr -> arr -> unit
val pow_ : ?out:arr -> arr -> arr -> unit
val atan2_ : ?out:arr -> arr -> arr -> unit
val hypot_ : ?out:arr -> arr -> arr -> unit
val fmod_ : ?out:arr -> arr -> arr -> unit
val min2_ : ?out:arr -> arr -> arr -> unit
val max2_ : ?out:arr -> arr -> arr -> unit
val add_scalar_ : ?out:arr -> arr -> elt -> unit
val sub_scalar_ : ?out:arr -> arr -> elt -> unit
val mul_scalar_ : ?out:arr -> arr -> elt -> unit
val div_scalar_ : ?out:arr -> arr -> elt -> unit
val pow_scalar_ : ?out:arr -> arr -> elt -> unit
val atan2_scalar_ : ?out:arr -> arr -> elt -> unit
val fmod_scalar_ : ?out:arr -> arr -> elt -> unit
val scalar_add_ : ?out:arr -> elt -> arr -> unit
val scalar_sub_ : ?out:arr -> elt -> arr -> unit
val scalar_mul_ : ?out:arr -> elt -> arr -> unit
val scalar_div_ : ?out:arr -> elt -> arr -> unit
val scalar_pow_ : ?out:arr -> elt -> arr -> unit
val scalar_atan2_ : ?out:arr -> elt -> arr -> unit
val scalar_fmod_ : ?out:arr -> elt -> arr -> unit
val fma_ : ?out:arr -> arr -> arr -> arr -> unit
val clip_by_value_ : ?out:arr -> ?amin:elt -> ?amax:elt -> arr -> unit
val clip_by_l2norm_ : ?out:arr -> elt -> arr -> unit
val dot_ : - ?transa:bool -> - ?transb:bool -> - ?alpha:elt -> - ?beta:elt -> - c:arr -> - arr -> - arr -> - unit
val abs_ : ?out:arr -> arr -> unit
val neg_ : ?out:arr -> arr -> unit
val conj_ : ?out:arr -> arr -> unit
val reci_ : ?out:arr -> arr -> unit
val signum_ : ?out:arr -> arr -> unit
val sqr_ : ?out:arr -> arr -> unit
val sqrt_ : ?out:arr -> arr -> unit
val cbrt_ : ?out:arr -> arr -> unit
val exp_ : ?out:arr -> arr -> unit
val exp2_ : ?out:arr -> arr -> unit
val exp10_ : ?out:arr -> arr -> unit
val expm1_ : ?out:arr -> arr -> unit
val log_ : ?out:arr -> arr -> unit
val log2_ : ?out:arr -> arr -> unit
val log10_ : ?out:arr -> arr -> unit
val log1p_ : ?out:arr -> arr -> unit
val sin_ : ?out:arr -> arr -> unit
val cos_ : ?out:arr -> arr -> unit
val tan_ : ?out:arr -> arr -> unit
val asin_ : ?out:arr -> arr -> unit
val acos_ : ?out:arr -> arr -> unit
val atan_ : ?out:arr -> arr -> unit
val sinh_ : ?out:arr -> arr -> unit
val cosh_ : ?out:arr -> arr -> unit
val tanh_ : ?out:arr -> arr -> unit
val asinh_ : ?out:arr -> arr -> unit
val acosh_ : ?out:arr -> arr -> unit
val atanh_ : ?out:arr -> arr -> unit
val floor_ : ?out:arr -> arr -> unit
val ceil_ : ?out:arr -> arr -> unit
val round_ : ?out:arr -> arr -> unit
val trunc_ : ?out:arr -> arr -> unit
val fix_ : ?out:arr -> arr -> unit
val erf_ : ?out:arr -> arr -> unit
val erfc_ : ?out:arr -> arr -> unit
val relu_ : ?out:arr -> arr -> unit
val softplus_ : ?out:arr -> arr -> unit
val softsign_ : ?out:arr -> arr -> unit
val softmax_ : ?out:arr -> ?axis:int -> arr -> unit
val sigmoid_ : ?out:arr -> arr -> unit
val sum_ : out:arr -> axis:int -> arr -> unit
val min_ : out:arr -> axis:int -> arr -> unit
val max_ : out:arr -> axis:int -> arr -> unit
val sum : ?axis:int -> ?keep_dims:bool -> arr -> arr
val prod : ?axis:int -> ?keep_dims:bool -> arr -> arr
val min : ?axis:int -> ?keep_dims:bool -> arr -> arr
val max : ?axis:int -> ?keep_dims:bool -> arr -> arr
val mean : ?axis:int -> ?keep_dims:bool -> arr -> arr
val var : ?axis:int -> ?keep_dims:bool -> arr -> arr
val std : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l2norm : ?axis:int -> ?keep_dims:bool -> arr -> arr
val cumsum_ : ?out:arr -> ?axis:int -> arr -> unit
val cumprod_ : ?out:arr -> ?axis:int -> arr -> unit
val cummin_ : ?out:arr -> ?axis:int -> arr -> unit
val cummax_ : ?out:arr -> ?axis:int -> arr -> unit
val dropout_ : ?out:arr -> ?rate:float -> arr -> unit
val prod' : arr -> elt
val mean' : arr -> elt
val var' : arr -> elt
val std' : arr -> elt
val elt_equal_ : ?out:arr -> arr -> arr -> unit
val elt_not_equal_ : ?out:arr -> arr -> arr -> unit
val elt_less_ : ?out:arr -> arr -> arr -> unit
val elt_greater_ : ?out:arr -> arr -> arr -> unit
val elt_less_equal_ : ?out:arr -> arr -> arr -> unit
val elt_greater_equal_ : ?out:arr -> arr -> arr -> unit
val elt_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_not_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_less_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_greater_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_less_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_greater_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val conv1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val conv2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val conv3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val dilated_conv1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val dilated_conv2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val dilated_conv3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val transpose_conv1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val transpose_conv2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val transpose_conv3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val max_pool1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val max_pool2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val max_pool3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val upsampling2d_ : out:arr -> arr -> int array -> unit
val conv1d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv1d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv2d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv2d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv3d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv3d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val dilated_conv1d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv1d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv2d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv2d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv3d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv3d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val transpose_conv1d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv1d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv2d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv2d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv3d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv3d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val max_pool1d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val max_pool2d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val max_pool3d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool1d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool2d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool3d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val upsampling2d_backward_ : out:arr -> arr -> int array -> arr -> unit
val fused_adagrad_ : ?out:arr -> rate:float -> eps:float -> arr -> unit
\ No newline at end of file diff --git a/owl-base/Owl_computation_symbol/Make/argument-1-Shape/Type/Device/index.html b/owl-base/Owl_computation_symbol/Make/argument-1-Shape/Type/Device/index.html deleted file mode 100644 index 91a938a2e..000000000 --- a/owl-base/Owl_computation_symbol/Make/argument-1-Shape/Type/Device/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Device (owl-base.Owl_computation_symbol.Make.Shape.Type.Device)

Module Type.Device

Type definition
type device

TODO

type value

TODO

Core functions
val make_device : unit -> device

TODO

val arr_to_value : A.arr -> value

TODO

val value_to_arr : value -> A.arr

TODO

val elt_to_value : A.elt -> value

TODO

val value_to_elt : value -> A.elt

TODO

val value_to_float : value -> float

TODO

val is_arr : value -> bool

TODO

val is_elt : value -> bool

TODO

\ No newline at end of file diff --git a/owl-base/Owl_computation_symbol/Make/argument-1-Shape/Type/index.html b/owl-base/Owl_computation_symbol/Make/argument-1-Shape/Type/index.html deleted file mode 100644 index 2d3e3b5d9..000000000 --- a/owl-base/Owl_computation_symbol/Make/argument-1-Shape/Type/index.html +++ /dev/null @@ -1,5 +0,0 @@ - -Type (owl-base.Owl_computation_symbol.Make.Shape.Type)

Module Shape.Type

Type definition
type state =
  1. | Valid
  2. | Invalid
    (*

    TODO

    *)

TODO

and block = {
  1. size : int;
  2. block_id : int;
  3. mutable active : t option;
  4. mutable memory : Device.value;
  5. mutable nodes : t list;
}

block type keeps a reference to a block of memory and to the nodes sharing that block.

and attr = {
  1. mutable op : op;
  2. mutable freeze : bool;
  3. mutable reuse : bool;
  4. mutable state : state;
  5. mutable shape : int array option array;
  6. mutable value : Device.value array;
  7. mutable block : block array option;
}

TODO

and arr =
  1. | Arr of t
and elt =
  1. | Elt of t
and op =
  1. | Noop
  2. | Var
  3. | Const
  4. | Empty of int array
  5. | Zeros of int array
  6. | Ones of int array
  7. | Create of int array
  8. | Sequential of int array
  9. | Uniform of int array
  10. | Gaussian of int array
  11. | Bernoulli of int array
  12. | Init of int array * int -> elt
  13. | Get of int array
  14. | Set of int array
  15. | GetSlice of int list list
  16. | SetSlice of int list list
  17. | GetFancy of Owl_types_common.index list
  18. | SetFancy of Owl_types_common.index list
  19. | Copy
  20. | Reset
  21. | Reshape of int array
  22. | Reverse
  23. | Tile of int array
  24. | Repeat of int array
  25. | Pad of elt * int list list
  26. | Concatenate of int
  27. | Stack of int
  28. | Split of int * int array
  29. | Draw of int * int
  30. | Map of elt -> elt
  31. | Fold of int * elt -> elt -> elt
  32. | Scan of int * elt -> elt -> elt
  33. | OneHot of int
  34. | OfArray of int array
  35. | Delay of Device.A.arr -> Device.A.arr
  36. | DelayArray of int array * Device.A.arr array -> Device.A.arr
  37. | LazyPrint of int option - * int option - * bool option - * (Device.A.elt -> string) option
  38. | Abs
  39. | Neg
  40. | Floor
  41. | Ceil
  42. | Round
  43. | Sqr
  44. | Sqrt
  45. | Log
  46. | Log2
  47. | Log10
  48. | Exp
  49. | Sin
  50. | Cos
  51. | Tan
  52. | Sinh
  53. | Cosh
  54. | Tanh
  55. | Asin
  56. | Acos
  57. | Atan
  58. | Asinh
  59. | Acosh
  60. | Atanh
  61. | Min of bool * int
  62. | Max of bool * int
  63. | Sum of bool * int
  64. | SumReduce of int array
  65. | Signum
  66. | Sigmoid
  67. | Relu
  68. | Dawsn
  69. | Min'
  70. | Max'
  71. | Sum'
  72. | LogSumExp'
  73. | LogSumExp of bool * int
  74. | L1norm'
  75. | L2norm'
  76. | L2NormSqr'
  77. | ClipByValue
  78. | ClipByL2norm
  79. | Pow
  80. | ScalarPow
  81. | PowScalar
  82. | Atan2
  83. | ScalarAtan2
  84. | Atan2Scalar
  85. | Hypot
  86. | Min2
  87. | Max2
  88. | Add
  89. | Sub
  90. | Mul
  91. | Div
  92. | AddScalar
  93. | SubScalar
  94. | MulScalar
  95. | DivScalar
  96. | ScalarAdd
  97. | ScalarSub
  98. | ScalarMul
  99. | ScalarDiv
  100. | FMA
  101. | EltEqual
  102. | EltNotEqual
  103. | EltLess
  104. | EltGreater
  105. | EltLessEqual
  106. | EltGreaterEqual
  107. | EltEqualScalar
  108. | EltNotEqualScalar
  109. | EltLessScalar
  110. | EltGreaterScalar
  111. | EltLessEqualScalar
  112. | EltGreaterEqualScalar
  113. | Conv1d of Owl_types_common.padding * int array
  114. | Conv2d of Owl_types_common.padding * int array
  115. | Conv3d of Owl_types_common.padding * int array
  116. | TransposeConv1d of Owl_types_common.padding * int array
  117. | TransposeConv2d of Owl_types_common.padding * int array
  118. | TransposeConv3d of Owl_types_common.padding * int array
  119. | DilatedConv1d of Owl_types_common.padding * int array * int array
  120. | DilatedConv2d of Owl_types_common.padding * int array * int array
  121. | DilatedConv3d of Owl_types_common.padding * int array * int array
  122. | MaxPool1d of Owl_types_common.padding * int array * int array
  123. | MaxPool2d of Owl_types_common.padding * int array * int array
  124. | MaxPool3d of Owl_types_common.padding * int array * int array
  125. | AvgPool1d of Owl_types_common.padding * int array * int array
  126. | AvgPool2d of Owl_types_common.padding * int array * int array
  127. | AvgPool3d of Owl_types_common.padding * int array * int array
  128. | UpSampling2d of int array
  129. | Conv1dBackwardInput of int array
  130. | Conv1dBackwardKernel of int array
  131. | Conv2dBackwardInput of int array
  132. | Conv2dBackwardKernel of int array
  133. | Conv3dBackwardInput of int array
  134. | Conv3dBackwardKernel of int array
  135. | TransposeConv1dBackwardInput of int array
  136. | TransposeConv1dBackwardKernel of int array
  137. | TransposeConv2dBackwardInput of int array
  138. | TransposeConv2dBackwardKernel of int array
  139. | TransposeConv3dBackwardInput of int array
  140. | TransposeConv3dBackwardKernel of int array
  141. | DilatedConv1dBackwardInput of int array * int array
  142. | DilatedConv1dBackwardKernel of int array * int array
  143. | DilatedConv2dBackwardInput of int array * int array
  144. | DilatedConv2dBackwardKernel of int array * int array
  145. | DilatedConv3dBackwardInput of int array * int array
  146. | DilatedConv3dBackwardKernel of int array * int array
  147. | MaxPool1dBackward of Owl_types_common.padding * int array * int array
  148. | MaxPool2dBackward of Owl_types_common.padding * int array * int array
  149. | MaxPool3dBackward of Owl_types_common.padding * int array * int array
  150. | AvgPool1dBackward of Owl_types_common.padding * int array * int array
  151. | AvgPool2dBackward of Owl_types_common.padding * int array * int array
  152. | AvgPool3dBackward of Owl_types_common.padding * int array * int array
  153. | UpSampling2dBackward of int array
  154. | RowNum
  155. | ColNum
  156. | Row
  157. | Rows of int array
  158. | CopyRowTo
  159. | CopyColTo
  160. | Dot of bool * bool * elt * elt
  161. | Inv
  162. | Trace
  163. | Transpose of int array
  164. | ToRows
  165. | OfRows
  166. | Scalar_Add
  167. | Scalar_Sub
  168. | Scalar_Mul
  169. | Scalar_Div
  170. | Scalar_Pow
  171. | Scalar_Atan2
  172. | Scalar_Abs
  173. | Scalar_Neg
  174. | Scalar_Sqr
  175. | Scalar_Sqrt
  176. | Scalar_Exp
  177. | Scalar_Log
  178. | Scalar_Log2
  179. | Scalar_Log10
  180. | Scalar_Signum
  181. | Scalar_Floor
  182. | Scalar_Ceil
  183. | Scalar_Round
  184. | Scalar_Sin
  185. | Scalar_Cos
  186. | Scalar_Tan
  187. | Scalar_Sinh
  188. | Scalar_Cosh
  189. | Scalar_Tanh
  190. | Scalar_Asin
  191. | Scalar_Acos
  192. | Scalar_Atan
  193. | Scalar_Asinh
  194. | Scalar_Acosh
  195. | Scalar_Atanh
  196. | Scalar_Relu
  197. | Scalar_Dawsn
  198. | Scalar_Sigmoid
  199. | Fused_Adagrad of float * float
    (*

    TODO

    *)
\ No newline at end of file diff --git a/owl-base/Owl_computation_symbol/Make/argument-1-Shape/index.html b/owl-base/Owl_computation_symbol/Make/argument-1-Shape/index.html deleted file mode 100644 index fb5eb0a86..000000000 --- a/owl-base/Owl_computation_symbol/Make/argument-1-Shape/index.html +++ /dev/null @@ -1,5 +0,0 @@ - -Shape (owl-base.Owl_computation_symbol.Make.Shape)

Parameter Make.Shape

Core functions
val infer_shape : - Type.op -> - Type.attr Owl_graph.node array -> - int array option array

TODO

\ No newline at end of file diff --git a/owl-base/Owl_computation_symbol/Make/index.html b/owl-base/Owl_computation_symbol/Make/index.html deleted file mode 100644 index a79f870c2..000000000 --- a/owl-base/Owl_computation_symbol/Make/index.html +++ /dev/null @@ -1,19 +0,0 @@ - -Make (owl-base.Owl_computation_symbol.Make)

Module Owl_computation_symbol.Make

Parameters

Signature

module Shape = Shape
val op_to_str : Shape.Type.op -> string
val is_random_variable : Shape.Type.op -> bool
val refnum : 'a Owl_graph.node -> int
val node_shape : Shape.Type.attr Owl_graph.node -> int array
val node_numel : Shape.Type.attr Owl_graph.node -> int
val is_shape_unknown : Shape.Type.attr Owl_graph.node -> bool
val infer_shape_graph : Shape.Type.attr Owl_graph.node array -> unit
val shape_to_str : int array option array -> string
val node_to_str : Shape.Type.attr Owl_graph.node -> string
val node_to_arr : Shape.Type.t -> Shape.Type.arr
val arr_to_node : Shape.Type.arr -> Shape.Type.t
val node_to_elt : Shape.Type.t -> Shape.Type.elt
val elt_to_node : Shape.Type.elt -> Shape.Type.t
val new_block_id : unit -> int
val make_empty_block : ?block_id:int -> int -> Shape.Type.block
val make_value_block : Shape.Type.Device.value -> Shape.Type.t -> unit
val make_node : - ?name:string -> - ?value:Shape.Type.Device.value array -> - ?shape:int array option array -> - ?freeze:bool -> - ?reuse:bool -> - ?state:Shape.Type.state -> - Shape.Type.op -> - Shape.Type.attr Owl_graph.node
val make_then_connect : - ?shape:int array option array -> - Shape.Type.op -> - Shape.Type.attr Owl_graph.node array -> - Shape.Type.attr Owl_graph.node
val var_arr : ?shape:int array -> string -> Shape.Type.arr
val var_elt : string -> Shape.Type.elt
val const_arr : string -> Shape.Type.Device.A.arr -> Shape.Type.arr
val const_elt : string -> Shape.Type.Device.A.elt -> Shape.Type.elt
val get_nodes_using_block : Shape.Type.block -> Shape.Type.t list
val _get_value_block : Shape.Type.block -> Shape.Type.Device.value
val get_block_opt : - Shape.Type.attr Owl_graph.node -> - Shape.Type.block array option
val _set_block : - Shape.Type.attr Owl_graph.node -> - Shape.Type.block array -> - unit
val add_node_to_block : Shape.Type.t -> Shape.Type.block -> unit
val get_active_node : Shape.Type.block -> Shape.Type.t option
val set_active_node : Shape.Type.block -> Shape.Type.t -> unit
val get_block_id : Shape.Type.attr Owl_graph.node -> int
val set_value : Shape.Type.t -> Shape.Type.Device.value array -> unit
val set_operator : Shape.Type.attr Owl_graph.node -> Shape.Type.op -> unit
val set_reuse : Shape.Type.attr Owl_graph.node -> bool -> unit
val get_reuse : Shape.Type.attr Owl_graph.node -> bool
val is_shared : Shape.Type.attr Owl_graph.node -> bool
val get_shared_nodes : Shape.Type.t -> Shape.Type.t array
val is_var : Shape.Type.attr Owl_graph.node -> bool
val is_const : Shape.Type.attr Owl_graph.node -> bool
val is_node_arr : Shape.Type.attr Owl_graph.node -> bool
val is_node_elt : Shape.Type.attr Owl_graph.node -> bool
val is_assigned : Shape.Type.attr Owl_graph.node -> bool
val check_assigned : Shape.Type.attr Owl_graph.node -> unit
val is_valid : Shape.Type.attr Owl_graph.node -> bool
val validate : Shape.Type.attr Owl_graph.node -> unit
val invalidate : Shape.Type.attr Owl_graph.node -> unit
val invalidate_graph : Shape.Type.attr Owl_graph.node -> unit
val is_freeze : Shape.Type.attr Owl_graph.node -> bool
val freeze : Shape.Type.attr Owl_graph.node -> unit
val freeze_descendants : Shape.Type.attr Owl_graph.node array -> unit
val freeze_ancestors : Shape.Type.attr Owl_graph.node array -> unit
val unsafe_assign_arr : Shape.Type.arr -> Shape.Type.Device.A.arr -> unit
val assign_arr : Shape.Type.arr -> Shape.Type.Device.A.arr -> unit
val assign_elt : Shape.Type.elt -> Shape.Type.Device.A.elt -> unit
val float_to_elt : float -> Shape.Type.elt
val elt_to_float : Shape.Type.elt -> float
\ No newline at end of file diff --git a/owl-base/Owl_computation_symbol_sig/.dummy b/owl-base/Owl_computation_symbol_sig/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl-base/Owl_computation_symbol_sig/module-type-Sig/Shape/Type/Device/A/Linalg/index.html b/owl-base/Owl_computation_symbol_sig/module-type-Sig/Shape/Type/Device/A/Linalg/index.html deleted file mode 100644 index 9a4db0cd6..000000000 --- a/owl-base/Owl_computation_symbol_sig/module-type-Sig/Shape/Type/Device/A/Linalg/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -Linalg (owl-base.Owl_computation_symbol_sig.Sig.Shape.Type.Device.A.Linalg)

Module A.Linalg

val inv : arr -> arr
val logdet : arr -> elt
val chol : ?upper:bool -> arr -> arr
val svd : ?thin:bool -> arr -> arr * arr * arr
val qr : arr -> arr * arr
val lq : arr -> arr * arr
val sylvester : arr -> arr -> arr -> arr
val lyapunov : arr -> arr -> arr
val discrete_lyapunov : - ?solver:[ `default | `bilinear | `direct ] -> - arr -> - arr -> - arr
val linsolve : ?trans:bool -> ?typ:[ `n | `u | `l ] -> arr -> arr -> arr
val care : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
val dare : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
\ No newline at end of file diff --git a/owl-base/Owl_computation_symbol_sig/module-type-Sig/Shape/Type/Device/A/Mat/index.html b/owl-base/Owl_computation_symbol_sig/module-type-Sig/Shape/Type/Device/A/Mat/index.html deleted file mode 100644 index 56d186cfd..000000000 --- a/owl-base/Owl_computation_symbol_sig/module-type-Sig/Shape/Type/Device/A/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl-base.Owl_computation_symbol_sig.Sig.Shape.Type.Device.A.Mat)

Module A.Mat

val diagm : ?k:int -> arr -> arr
val triu : ?k:int -> arr -> arr
val tril : ?k:int -> arr -> arr
val eye : int -> arr
\ No newline at end of file diff --git a/owl-base/Owl_computation_symbol_sig/module-type-Sig/Shape/Type/Device/A/Scalar/index.html b/owl-base/Owl_computation_symbol_sig/module-type-Sig/Shape/Type/Device/A/Scalar/index.html deleted file mode 100644 index 0919bbc05..000000000 --- a/owl-base/Owl_computation_symbol_sig/module-type-Sig/Shape/Type/Device/A/Scalar/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Scalar (owl-base.Owl_computation_symbol_sig.Sig.Shape.Type.Device.A.Scalar)

Module A.Scalar

val add : elt -> elt -> elt
val sub : elt -> elt -> elt
val mul : elt -> elt -> elt
val div : elt -> elt -> elt
val pow : elt -> elt -> elt
val atan2 : elt -> elt -> elt
val abs : elt -> elt
val neg : elt -> elt
val sqr : elt -> elt
val sqrt : elt -> elt
val exp : elt -> elt
val log : elt -> elt
val log2 : elt -> elt
val log10 : elt -> elt
val signum : elt -> elt
val floor : elt -> elt
val ceil : elt -> elt
val round : elt -> elt
val sin : elt -> elt
val cos : elt -> elt
val tan : elt -> elt
val sinh : elt -> elt
val cosh : elt -> elt
val tanh : elt -> elt
val asin : elt -> elt
val acos : elt -> elt
val atan : elt -> elt
val asinh : elt -> elt
val acosh : elt -> elt
val atanh : elt -> elt
val relu : elt -> elt
val dawsn : elt -> elt
val sigmoid : elt -> elt
\ No newline at end of file diff --git a/owl-base/Owl_computation_symbol_sig/module-type-Sig/Shape/Type/Device/A/index.html b/owl-base/Owl_computation_symbol_sig/module-type-Sig/Shape/Type/Device/A/index.html deleted file mode 100644 index 524f37be2..000000000 --- a/owl-base/Owl_computation_symbol_sig/module-type-Sig/Shape/Type/Device/A/index.html +++ /dev/null @@ -1,379 +0,0 @@ - -A (owl-base.Owl_computation_symbol_sig.Sig.Shape.Type.Device.A)

Module Device.A

include Owl_types_ndarray_algodiff.Sig
include Owl_types_ndarray_eltcmp.Sig
include Owl_types_ndarray_basic.Sig
type arr
type elt
val empty : int array -> arr
val zeros : int array -> arr
val ones : int array -> arr
val create : int array -> elt -> arr
val sequential : ?a:elt -> ?step:elt -> int array -> arr
val uniform : ?a:elt -> ?b:elt -> int array -> arr
val gaussian : ?mu:elt -> ?sigma:elt -> int array -> arr
val bernoulli : ?p:elt -> int array -> arr
val init : int array -> (int -> elt) -> arr
val init_nd : int array -> (int array -> elt) -> arr
val shape : arr -> int array
val numel : arr -> int
val get : arr -> int array -> elt
val set : arr -> int array -> elt -> unit
val get_slice : int list list -> arr -> arr
val set_slice : int list list -> arr -> arr -> unit
val get_fancy : Owl_types_common.index list -> arr -> arr
val set_fancy : Owl_types_common.index list -> arr -> arr -> unit
val copy : arr -> arr
val copy_ : out:arr -> arr -> unit
val reset : arr -> unit
val reshape : arr -> int array -> arr
val reverse : arr -> arr
val tile : arr -> int array -> arr
val repeat : arr -> int array -> arr
val concatenate : ?axis:int -> arr array -> arr
val stack : ?axis:int -> arr array -> arr
val split : ?axis:int -> int array -> arr -> arr array
val expand : ?hi:bool -> arr -> int -> arr
val squeeze : ?axis:int array -> arr -> arr
val draw : ?axis:int -> arr -> int -> arr * int array
val map : (elt -> elt) -> arr -> arr
val fold : ?axis:int -> (elt -> elt -> elt) -> elt -> arr -> arr
val scan : ?axis:int -> (elt -> elt -> elt) -> arr -> arr
val one_hot : int -> arr -> arr
val pad : ?v:elt -> int list list -> arr -> arr
val print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(elt -> string) -> - arr -> - unit
val abs : arr -> arr
val neg : arr -> arr
val floor : arr -> arr
val ceil : arr -> arr
val round : arr -> arr
val sqr : arr -> arr
val sqrt : arr -> arr
val log : arr -> arr
val log2 : arr -> arr
val log10 : arr -> arr
val exp : arr -> arr
val sin : arr -> arr
val cos : arr -> arr
val tan : arr -> arr
val sinh : arr -> arr
val cosh : arr -> arr
val tanh : arr -> arr
val asin : arr -> arr
val acos : arr -> arr
val atan : arr -> arr
val asinh : arr -> arr
val acosh : arr -> arr
val atanh : arr -> arr
val sum_reduce : ?axis:int array -> arr -> arr
val signum : arr -> arr
val sigmoid : arr -> arr
val relu : arr -> arr
val dawsn : arr -> arr
val min' : arr -> elt
val max' : arr -> elt
val sum' : arr -> elt
val log_sum_exp' : arr -> elt
val log_sum_exp : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm' : arr -> elt
val l2norm' : arr -> elt
val l2norm_sqr' : arr -> elt
val clip_by_value : ?amin:elt -> ?amax:elt -> arr -> arr
val clip_by_l2norm : elt -> arr -> arr
val pow : arr -> arr -> arr
val scalar_pow : elt -> arr -> arr
val pow_scalar : arr -> elt -> arr
val atan2 : arr -> arr -> arr
val scalar_atan2 : elt -> arr -> arr
val atan2_scalar : arr -> elt -> arr
val add : arr -> arr -> arr
val sub : arr -> arr -> arr
val mul : arr -> arr -> arr
val div : arr -> arr -> arr
val add_scalar : arr -> elt -> arr
val sub_scalar : arr -> elt -> arr
val mul_scalar : arr -> elt -> arr
val div_scalar : arr -> elt -> arr
val scalar_add : elt -> arr -> arr
val scalar_sub : elt -> arr -> arr
val scalar_mul : elt -> arr -> arr
val scalar_div : elt -> arr -> arr
val fma : arr -> arr -> arr -> arr
val conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val dilated_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val transpose_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val max_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val upsampling2d : arr -> int array -> arr
val conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val dilated_conv1d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv1d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val transpose_conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val max_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val upsampling2d_backward : arr -> int array -> arr -> arr
val row_num : arr -> int
val col_num : arr -> int
val row : arr -> int -> arr
val rows : arr -> int array -> arr
val copy_row_to : arr -> arr -> int -> unit
val copy_col_to : arr -> arr -> int -> unit
val dot : arr -> arr -> arr
val diag : ?k:int -> arr -> arr
val trace : arr -> elt
val transpose : ?axis:int array -> arr -> arr
val to_rows : arr -> arr array
val of_rows : arr array -> arr
val to_cols : arr -> arr array
val of_cols : arr array -> arr
val of_array : elt array -> int array -> arr
val of_arrays : elt array array -> arr
val float_to_elt : float -> elt
val elt_to_float : elt -> float
val elt_equal : arr -> arr -> arr
val elt_not_equal : arr -> arr -> arr
val elt_less : arr -> arr -> arr
val elt_greater : arr -> arr -> arr
val elt_less_equal : arr -> arr -> arr
val elt_greater_equal : arr -> arr -> arr
val elt_equal_scalar : arr -> elt -> arr
val elt_not_equal_scalar : arr -> elt -> arr
val elt_less_scalar : arr -> elt -> arr
val elt_greater_scalar : arr -> elt -> arr
val elt_less_equal_scalar : arr -> elt -> arr
val elt_greater_equal_scalar : arr -> elt -> arr
module Scalar : sig ... end
module Mat : sig ... end
module Linalg : sig ... end
val create_ : out:arr -> elt -> unit
val uniform_ : ?a:elt -> ?b:elt -> out:arr -> unit
val gaussian_ : ?mu:elt -> ?sigma:elt -> out:arr -> unit
val sequential_ : ?a:elt -> ?step:elt -> out:arr -> unit
val bernoulli_ : ?p:elt -> out:arr -> unit
val zeros_ : out:arr -> unit
val ones_ : out:arr -> unit
val one_hot_ : out:arr -> int -> arr -> unit
val get_slice_ : out:arr -> int list list -> arr -> unit
val set_slice_ : out:arr -> int list list -> arr -> arr -> unit
val sub_left : arr -> int -> int -> arr
val reshape_ : out:arr -> arr -> unit
val reverse_ : out:arr -> arr -> unit
val transpose_ : out:arr -> ?axis:int array -> arr -> unit
val repeat_ : out:arr -> arr -> int array -> unit
val tile_ : out:arr -> arr -> int array -> unit
val pad_ : out:arr -> ?v:elt -> int list list -> arr -> unit
val hypot : arr -> arr -> arr
val fmod : arr -> arr -> arr
val min2 : arr -> arr -> arr
val max2 : arr -> arr -> arr
val add_ : ?out:arr -> arr -> arr -> unit
val sub_ : ?out:arr -> arr -> arr -> unit
val mul_ : ?out:arr -> arr -> arr -> unit
val div_ : ?out:arr -> arr -> arr -> unit
val pow_ : ?out:arr -> arr -> arr -> unit
val atan2_ : ?out:arr -> arr -> arr -> unit
val hypot_ : ?out:arr -> arr -> arr -> unit
val fmod_ : ?out:arr -> arr -> arr -> unit
val min2_ : ?out:arr -> arr -> arr -> unit
val max2_ : ?out:arr -> arr -> arr -> unit
val add_scalar_ : ?out:arr -> arr -> elt -> unit
val sub_scalar_ : ?out:arr -> arr -> elt -> unit
val mul_scalar_ : ?out:arr -> arr -> elt -> unit
val div_scalar_ : ?out:arr -> arr -> elt -> unit
val pow_scalar_ : ?out:arr -> arr -> elt -> unit
val atan2_scalar_ : ?out:arr -> arr -> elt -> unit
val fmod_scalar_ : ?out:arr -> arr -> elt -> unit
val scalar_add_ : ?out:arr -> elt -> arr -> unit
val scalar_sub_ : ?out:arr -> elt -> arr -> unit
val scalar_mul_ : ?out:arr -> elt -> arr -> unit
val scalar_div_ : ?out:arr -> elt -> arr -> unit
val scalar_pow_ : ?out:arr -> elt -> arr -> unit
val scalar_atan2_ : ?out:arr -> elt -> arr -> unit
val scalar_fmod_ : ?out:arr -> elt -> arr -> unit
val fma_ : ?out:arr -> arr -> arr -> arr -> unit
val clip_by_value_ : ?out:arr -> ?amin:elt -> ?amax:elt -> arr -> unit
val clip_by_l2norm_ : ?out:arr -> elt -> arr -> unit
val dot_ : - ?transa:bool -> - ?transb:bool -> - ?alpha:elt -> - ?beta:elt -> - c:arr -> - arr -> - arr -> - unit
val abs_ : ?out:arr -> arr -> unit
val neg_ : ?out:arr -> arr -> unit
val conj_ : ?out:arr -> arr -> unit
val reci_ : ?out:arr -> arr -> unit
val signum_ : ?out:arr -> arr -> unit
val sqr_ : ?out:arr -> arr -> unit
val sqrt_ : ?out:arr -> arr -> unit
val cbrt_ : ?out:arr -> arr -> unit
val exp_ : ?out:arr -> arr -> unit
val exp2_ : ?out:arr -> arr -> unit
val exp10_ : ?out:arr -> arr -> unit
val expm1_ : ?out:arr -> arr -> unit
val log_ : ?out:arr -> arr -> unit
val log2_ : ?out:arr -> arr -> unit
val log10_ : ?out:arr -> arr -> unit
val log1p_ : ?out:arr -> arr -> unit
val sin_ : ?out:arr -> arr -> unit
val cos_ : ?out:arr -> arr -> unit
val tan_ : ?out:arr -> arr -> unit
val asin_ : ?out:arr -> arr -> unit
val acos_ : ?out:arr -> arr -> unit
val atan_ : ?out:arr -> arr -> unit
val sinh_ : ?out:arr -> arr -> unit
val cosh_ : ?out:arr -> arr -> unit
val tanh_ : ?out:arr -> arr -> unit
val asinh_ : ?out:arr -> arr -> unit
val acosh_ : ?out:arr -> arr -> unit
val atanh_ : ?out:arr -> arr -> unit
val floor_ : ?out:arr -> arr -> unit
val ceil_ : ?out:arr -> arr -> unit
val round_ : ?out:arr -> arr -> unit
val trunc_ : ?out:arr -> arr -> unit
val fix_ : ?out:arr -> arr -> unit
val erf_ : ?out:arr -> arr -> unit
val erfc_ : ?out:arr -> arr -> unit
val relu_ : ?out:arr -> arr -> unit
val softplus_ : ?out:arr -> arr -> unit
val softsign_ : ?out:arr -> arr -> unit
val softmax_ : ?out:arr -> ?axis:int -> arr -> unit
val sigmoid_ : ?out:arr -> arr -> unit
val sum_ : out:arr -> axis:int -> arr -> unit
val min_ : out:arr -> axis:int -> arr -> unit
val max_ : out:arr -> axis:int -> arr -> unit
val sum : ?axis:int -> ?keep_dims:bool -> arr -> arr
val prod : ?axis:int -> ?keep_dims:bool -> arr -> arr
val min : ?axis:int -> ?keep_dims:bool -> arr -> arr
val max : ?axis:int -> ?keep_dims:bool -> arr -> arr
val mean : ?axis:int -> ?keep_dims:bool -> arr -> arr
val var : ?axis:int -> ?keep_dims:bool -> arr -> arr
val std : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l2norm : ?axis:int -> ?keep_dims:bool -> arr -> arr
val cumsum_ : ?out:arr -> ?axis:int -> arr -> unit
val cumprod_ : ?out:arr -> ?axis:int -> arr -> unit
val cummin_ : ?out:arr -> ?axis:int -> arr -> unit
val cummax_ : ?out:arr -> ?axis:int -> arr -> unit
val dropout_ : ?out:arr -> ?rate:float -> arr -> unit
val prod' : arr -> elt
val mean' : arr -> elt
val var' : arr -> elt
val std' : arr -> elt
val elt_equal_ : ?out:arr -> arr -> arr -> unit
val elt_not_equal_ : ?out:arr -> arr -> arr -> unit
val elt_less_ : ?out:arr -> arr -> arr -> unit
val elt_greater_ : ?out:arr -> arr -> arr -> unit
val elt_less_equal_ : ?out:arr -> arr -> arr -> unit
val elt_greater_equal_ : ?out:arr -> arr -> arr -> unit
val elt_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_not_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_less_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_greater_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_less_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_greater_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val conv1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val conv2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val conv3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val dilated_conv1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val dilated_conv2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val dilated_conv3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val transpose_conv1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val transpose_conv2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val transpose_conv3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val max_pool1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val max_pool2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val max_pool3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val upsampling2d_ : out:arr -> arr -> int array -> unit
val conv1d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv1d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv2d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv2d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv3d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv3d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val dilated_conv1d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv1d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv2d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv2d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv3d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv3d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val transpose_conv1d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv1d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv2d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv2d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv3d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv3d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val max_pool1d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val max_pool2d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val max_pool3d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool1d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool2d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool3d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val upsampling2d_backward_ : out:arr -> arr -> int array -> arr -> unit
val fused_adagrad_ : ?out:arr -> rate:float -> eps:float -> arr -> unit
\ No newline at end of file diff --git a/owl-base/Owl_computation_symbol_sig/module-type-Sig/Shape/Type/Device/index.html b/owl-base/Owl_computation_symbol_sig/module-type-Sig/Shape/Type/Device/index.html deleted file mode 100644 index 8f72c8044..000000000 --- a/owl-base/Owl_computation_symbol_sig/module-type-Sig/Shape/Type/Device/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Device (owl-base.Owl_computation_symbol_sig.Sig.Shape.Type.Device)

Module Type.Device

Type definition
type device

TODO

type value

TODO

Core functions
val make_device : unit -> device

TODO

val arr_to_value : A.arr -> value

TODO

val value_to_arr : value -> A.arr

TODO

val elt_to_value : A.elt -> value

TODO

val value_to_elt : value -> A.elt

TODO

val value_to_float : value -> float

TODO

val is_arr : value -> bool

TODO

val is_elt : value -> bool

TODO

\ No newline at end of file diff --git a/owl-base/Owl_computation_symbol_sig/module-type-Sig/Shape/Type/index.html b/owl-base/Owl_computation_symbol_sig/module-type-Sig/Shape/Type/index.html deleted file mode 100644 index f9c3c283f..000000000 --- a/owl-base/Owl_computation_symbol_sig/module-type-Sig/Shape/Type/index.html +++ /dev/null @@ -1,5 +0,0 @@ - -Type (owl-base.Owl_computation_symbol_sig.Sig.Shape.Type)

Module Shape.Type

Type definition
type state =
  1. | Valid
  2. | Invalid
    (*

    TODO

    *)

TODO

and block = {
  1. size : int;
  2. block_id : int;
  3. mutable active : t option;
  4. mutable memory : Device.value;
  5. mutable nodes : t list;
}

block type keeps a reference to a block of memory and to the nodes sharing that block.

and attr = {
  1. mutable op : op;
  2. mutable freeze : bool;
  3. mutable reuse : bool;
  4. mutable state : state;
  5. mutable shape : int array option array;
  6. mutable value : Device.value array;
  7. mutable block : block array option;
}

TODO

and arr =
  1. | Arr of t
and elt =
  1. | Elt of t
and op =
  1. | Noop
  2. | Var
  3. | Const
  4. | Empty of int array
  5. | Zeros of int array
  6. | Ones of int array
  7. | Create of int array
  8. | Sequential of int array
  9. | Uniform of int array
  10. | Gaussian of int array
  11. | Bernoulli of int array
  12. | Init of int array * int -> elt
  13. | Get of int array
  14. | Set of int array
  15. | GetSlice of int list list
  16. | SetSlice of int list list
  17. | GetFancy of Owl_types_common.index list
  18. | SetFancy of Owl_types_common.index list
  19. | Copy
  20. | Reset
  21. | Reshape of int array
  22. | Reverse
  23. | Tile of int array
  24. | Repeat of int array
  25. | Pad of elt * int list list
  26. | Concatenate of int
  27. | Stack of int
  28. | Split of int * int array
  29. | Draw of int * int
  30. | Map of elt -> elt
  31. | Fold of int * elt -> elt -> elt
  32. | Scan of int * elt -> elt -> elt
  33. | OneHot of int
  34. | OfArray of int array
  35. | Delay of Device.A.arr -> Device.A.arr
  36. | DelayArray of int array * Device.A.arr array -> Device.A.arr
  37. | LazyPrint of int option - * int option - * bool option - * (Device.A.elt -> string) option
  38. | Abs
  39. | Neg
  40. | Floor
  41. | Ceil
  42. | Round
  43. | Sqr
  44. | Sqrt
  45. | Log
  46. | Log2
  47. | Log10
  48. | Exp
  49. | Sin
  50. | Cos
  51. | Tan
  52. | Sinh
  53. | Cosh
  54. | Tanh
  55. | Asin
  56. | Acos
  57. | Atan
  58. | Asinh
  59. | Acosh
  60. | Atanh
  61. | Min of bool * int
  62. | Max of bool * int
  63. | Sum of bool * int
  64. | SumReduce of int array
  65. | Signum
  66. | Sigmoid
  67. | Relu
  68. | Dawsn
  69. | Min'
  70. | Max'
  71. | Sum'
  72. | LogSumExp'
  73. | LogSumExp of bool * int
  74. | L1norm'
  75. | L2norm'
  76. | L2NormSqr'
  77. | ClipByValue
  78. | ClipByL2norm
  79. | Pow
  80. | ScalarPow
  81. | PowScalar
  82. | Atan2
  83. | ScalarAtan2
  84. | Atan2Scalar
  85. | Hypot
  86. | Min2
  87. | Max2
  88. | Add
  89. | Sub
  90. | Mul
  91. | Div
  92. | AddScalar
  93. | SubScalar
  94. | MulScalar
  95. | DivScalar
  96. | ScalarAdd
  97. | ScalarSub
  98. | ScalarMul
  99. | ScalarDiv
  100. | FMA
  101. | EltEqual
  102. | EltNotEqual
  103. | EltLess
  104. | EltGreater
  105. | EltLessEqual
  106. | EltGreaterEqual
  107. | EltEqualScalar
  108. | EltNotEqualScalar
  109. | EltLessScalar
  110. | EltGreaterScalar
  111. | EltLessEqualScalar
  112. | EltGreaterEqualScalar
  113. | Conv1d of Owl_types_common.padding * int array
  114. | Conv2d of Owl_types_common.padding * int array
  115. | Conv3d of Owl_types_common.padding * int array
  116. | TransposeConv1d of Owl_types_common.padding * int array
  117. | TransposeConv2d of Owl_types_common.padding * int array
  118. | TransposeConv3d of Owl_types_common.padding * int array
  119. | DilatedConv1d of Owl_types_common.padding * int array * int array
  120. | DilatedConv2d of Owl_types_common.padding * int array * int array
  121. | DilatedConv3d of Owl_types_common.padding * int array * int array
  122. | MaxPool1d of Owl_types_common.padding * int array * int array
  123. | MaxPool2d of Owl_types_common.padding * int array * int array
  124. | MaxPool3d of Owl_types_common.padding * int array * int array
  125. | AvgPool1d of Owl_types_common.padding * int array * int array
  126. | AvgPool2d of Owl_types_common.padding * int array * int array
  127. | AvgPool3d of Owl_types_common.padding * int array * int array
  128. | UpSampling2d of int array
  129. | Conv1dBackwardInput of int array
  130. | Conv1dBackwardKernel of int array
  131. | Conv2dBackwardInput of int array
  132. | Conv2dBackwardKernel of int array
  133. | Conv3dBackwardInput of int array
  134. | Conv3dBackwardKernel of int array
  135. | TransposeConv1dBackwardInput of int array
  136. | TransposeConv1dBackwardKernel of int array
  137. | TransposeConv2dBackwardInput of int array
  138. | TransposeConv2dBackwardKernel of int array
  139. | TransposeConv3dBackwardInput of int array
  140. | TransposeConv3dBackwardKernel of int array
  141. | DilatedConv1dBackwardInput of int array * int array
  142. | DilatedConv1dBackwardKernel of int array * int array
  143. | DilatedConv2dBackwardInput of int array * int array
  144. | DilatedConv2dBackwardKernel of int array * int array
  145. | DilatedConv3dBackwardInput of int array * int array
  146. | DilatedConv3dBackwardKernel of int array * int array
  147. | MaxPool1dBackward of Owl_types_common.padding * int array * int array
  148. | MaxPool2dBackward of Owl_types_common.padding * int array * int array
  149. | MaxPool3dBackward of Owl_types_common.padding * int array * int array
  150. | AvgPool1dBackward of Owl_types_common.padding * int array * int array
  151. | AvgPool2dBackward of Owl_types_common.padding * int array * int array
  152. | AvgPool3dBackward of Owl_types_common.padding * int array * int array
  153. | UpSampling2dBackward of int array
  154. | RowNum
  155. | ColNum
  156. | Row
  157. | Rows of int array
  158. | CopyRowTo
  159. | CopyColTo
  160. | Dot of bool * bool * elt * elt
  161. | Inv
  162. | Trace
  163. | Transpose of int array
  164. | ToRows
  165. | OfRows
  166. | Scalar_Add
  167. | Scalar_Sub
  168. | Scalar_Mul
  169. | Scalar_Div
  170. | Scalar_Pow
  171. | Scalar_Atan2
  172. | Scalar_Abs
  173. | Scalar_Neg
  174. | Scalar_Sqr
  175. | Scalar_Sqrt
  176. | Scalar_Exp
  177. | Scalar_Log
  178. | Scalar_Log2
  179. | Scalar_Log10
  180. | Scalar_Signum
  181. | Scalar_Floor
  182. | Scalar_Ceil
  183. | Scalar_Round
  184. | Scalar_Sin
  185. | Scalar_Cos
  186. | Scalar_Tan
  187. | Scalar_Sinh
  188. | Scalar_Cosh
  189. | Scalar_Tanh
  190. | Scalar_Asin
  191. | Scalar_Acos
  192. | Scalar_Atan
  193. | Scalar_Asinh
  194. | Scalar_Acosh
  195. | Scalar_Atanh
  196. | Scalar_Relu
  197. | Scalar_Dawsn
  198. | Scalar_Sigmoid
  199. | Fused_Adagrad of float * float
    (*

    TODO

    *)
\ No newline at end of file diff --git a/owl-base/Owl_computation_symbol_sig/module-type-Sig/Shape/index.html b/owl-base/Owl_computation_symbol_sig/module-type-Sig/Shape/index.html deleted file mode 100644 index 3c9735950..000000000 --- a/owl-base/Owl_computation_symbol_sig/module-type-Sig/Shape/index.html +++ /dev/null @@ -1,5 +0,0 @@ - -Shape (owl-base.Owl_computation_symbol_sig.Sig.Shape)

Module Sig.Shape

Core functions
val infer_shape : - Type.op -> - Type.attr Owl_graph.node array -> - int array option array

TODO

\ No newline at end of file diff --git a/owl-base/Owl_computation_symbol_sig/module-type-Sig/index.html b/owl-base/Owl_computation_symbol_sig/module-type-Sig/index.html deleted file mode 100644 index 99e419267..000000000 --- a/owl-base/Owl_computation_symbol_sig/module-type-Sig/index.html +++ /dev/null @@ -1,28 +0,0 @@ - -Sig (owl-base.Owl_computation_symbol_sig.Sig)

Module type Owl_computation_symbol_sig.Sig

Core functions
val op_to_str : Shape.Type.op -> string

TODO

val is_random_variable : Shape.Type.op -> bool

TODO

val refnum : 'a Owl_graph.node -> int

TODO

val node_shape : Shape.Type.attr Owl_graph.node -> int array

TODO

val node_numel : Shape.Type.attr Owl_graph.node -> int

TODO

val is_shape_unknown : Shape.Type.attr Owl_graph.node -> bool

TODO

val infer_shape_graph : Shape.Type.attr Owl_graph.node array -> unit

TODO

val shape_to_str : int array option array -> string

TODO

val node_to_str : Shape.Type.attr Owl_graph.node -> string

TODO

val node_to_arr : Shape.Type.t -> Shape.Type.arr

TODO

val arr_to_node : Shape.Type.arr -> Shape.Type.t

TODO

val node_to_elt : Shape.Type.t -> Shape.Type.elt

TODO

val elt_to_node : Shape.Type.elt -> Shape.Type.t

TODO

val make_node : - ?name:string -> - ?value:Shape.Type.Device.value array -> - ?shape:int array option array -> - ?freeze:bool -> - ?reuse:bool -> - ?state:Shape.Type.state -> - Shape.Type.op -> - Shape.Type.attr Owl_graph.node

TODO

val make_then_connect : - ?shape:int array option array -> - Shape.Type.op -> - Shape.Type.attr Owl_graph.node array -> - Shape.Type.attr Owl_graph.node

TODO

val var_arr : ?shape:int array -> string -> Shape.Type.arr

TODO

val var_elt : string -> Shape.Type.elt

TODO

val const_arr : string -> Shape.Type.Device.A.arr -> Shape.Type.arr

TODO

val const_elt : string -> Shape.Type.Device.A.elt -> Shape.Type.elt

TODO

val new_block_id : unit -> int

new_block_id () returns an unused block id.

val make_empty_block : ?block_id:int -> int -> Shape.Type.block

make_empty_block s returns an empty block of memory of size s.

val make_value_block : - Shape.Type.Device.value -> - Shape.Type.attr Owl_graph.node -> - unit

make_value_block value node creates a block of memory initialised with value and links the new block to node.

get_block node returns the memory block allocated to node. If no block is allocated, throws an exception.

val add_node_to_block : - Shape.Type.attr Owl_graph.node -> - Shape.Type.block -> - unit

Link a node to a reusable block and initialises its memory on the memory of the block.

val get_active_node : Shape.Type.block -> Shape.Type.attr Owl_graph.node option

Return the node that is currently using the memory of the block.

val set_active_node : - Shape.Type.block -> - Shape.Type.attr Owl_graph.node -> - unit

Update the node that is currently using the block of memory.

val get_block_id : Shape.Type.attr Owl_graph.node -> int

get_block_id node returns the id of the block assigned to node. If node has not been assigned yet, returns -1.

val set_value : - Shape.Type.attr Owl_graph.node -> - Shape.Type.Device.value array -> - unit

TODO

val set_operator : Shape.Type.attr Owl_graph.node -> Shape.Type.op -> unit

TODO

TODO

val set_reuse : Shape.Type.attr Owl_graph.node -> bool -> unit

TODO

val get_reuse : Shape.Type.attr Owl_graph.node -> bool

TODO

val is_shared : Shape.Type.attr Owl_graph.node -> bool

TODO

val get_shared_nodes : - Shape.Type.attr Owl_graph.node -> - Shape.Type.attr Owl_graph.node array

get_shared_nodes node returns the nodes sharing the same block of memory as node.

val is_var : Shape.Type.attr Owl_graph.node -> bool

TODO

val is_const : Shape.Type.attr Owl_graph.node -> bool

TODO

val is_node_arr : Shape.Type.attr Owl_graph.node -> bool

TODO

val is_node_elt : Shape.Type.attr Owl_graph.node -> bool

TODO

val is_assigned : Shape.Type.attr Owl_graph.node -> bool

is_assigned node checks if a block of memory has been assigned to node.

val check_assigned : Shape.Type.attr Owl_graph.node -> unit

check_assigned node throws an exception if node has not been assigned to a block.

val is_valid : Shape.Type.attr Owl_graph.node -> bool

TODO

val validate : Shape.Type.attr Owl_graph.node -> unit

TODO

val invalidate : Shape.Type.attr Owl_graph.node -> unit

TODO

val invalidate_graph : Shape.Type.attr Owl_graph.node -> unit

TODO

val is_freeze : Shape.Type.attr Owl_graph.node -> bool

TODO

val freeze : Shape.Type.attr Owl_graph.node -> unit

TODO

val freeze_descendants : Shape.Type.attr Owl_graph.node array -> unit

TODO

val freeze_ancestors : Shape.Type.attr Owl_graph.node array -> unit

TODO

TODO

TODO

val unsafe_assign_arr : Shape.Type.arr -> Shape.Type.Device.A.arr -> unit

TODO

val assign_arr : Shape.Type.arr -> Shape.Type.Device.A.arr -> unit

TODO

val assign_elt : Shape.Type.elt -> Shape.Type.Device.A.elt -> unit

TODO

val float_to_elt : float -> Shape.Type.elt

TODO

val elt_to_float : Shape.Type.elt -> float

TODO

\ No newline at end of file diff --git a/owl-base/Owl_computation_type/.dummy b/owl-base/Owl_computation_type/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl-base/Owl_computation_type/Make/argument-1-Device/A/Linalg/index.html b/owl-base/Owl_computation_type/Make/argument-1-Device/A/Linalg/index.html deleted file mode 100644 index 2272fd2c9..000000000 --- a/owl-base/Owl_computation_type/Make/argument-1-Device/A/Linalg/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -Linalg (owl-base.Owl_computation_type.Make.Device.A.Linalg)

Module A.Linalg

val inv : arr -> arr
val logdet : arr -> elt
val chol : ?upper:bool -> arr -> arr
val svd : ?thin:bool -> arr -> arr * arr * arr
val qr : arr -> arr * arr
val lq : arr -> arr * arr
val sylvester : arr -> arr -> arr -> arr
val lyapunov : arr -> arr -> arr
val discrete_lyapunov : - ?solver:[ `default | `bilinear | `direct ] -> - arr -> - arr -> - arr
val linsolve : ?trans:bool -> ?typ:[ `n | `u | `l ] -> arr -> arr -> arr
val care : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
val dare : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
\ No newline at end of file diff --git a/owl-base/Owl_computation_type/Make/argument-1-Device/A/Mat/index.html b/owl-base/Owl_computation_type/Make/argument-1-Device/A/Mat/index.html deleted file mode 100644 index 0001f330e..000000000 --- a/owl-base/Owl_computation_type/Make/argument-1-Device/A/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl-base.Owl_computation_type.Make.Device.A.Mat)

Module A.Mat

val diagm : ?k:int -> arr -> arr
val triu : ?k:int -> arr -> arr
val tril : ?k:int -> arr -> arr
val eye : int -> arr
\ No newline at end of file diff --git a/owl-base/Owl_computation_type/Make/argument-1-Device/A/Scalar/index.html b/owl-base/Owl_computation_type/Make/argument-1-Device/A/Scalar/index.html deleted file mode 100644 index 5bf868c2d..000000000 --- a/owl-base/Owl_computation_type/Make/argument-1-Device/A/Scalar/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Scalar (owl-base.Owl_computation_type.Make.Device.A.Scalar)

Module A.Scalar

val add : elt -> elt -> elt
val sub : elt -> elt -> elt
val mul : elt -> elt -> elt
val div : elt -> elt -> elt
val pow : elt -> elt -> elt
val atan2 : elt -> elt -> elt
val abs : elt -> elt
val neg : elt -> elt
val sqr : elt -> elt
val sqrt : elt -> elt
val exp : elt -> elt
val log : elt -> elt
val log2 : elt -> elt
val log10 : elt -> elt
val signum : elt -> elt
val floor : elt -> elt
val ceil : elt -> elt
val round : elt -> elt
val sin : elt -> elt
val cos : elt -> elt
val tan : elt -> elt
val sinh : elt -> elt
val cosh : elt -> elt
val tanh : elt -> elt
val asin : elt -> elt
val acos : elt -> elt
val atan : elt -> elt
val asinh : elt -> elt
val acosh : elt -> elt
val atanh : elt -> elt
val relu : elt -> elt
val dawsn : elt -> elt
val sigmoid : elt -> elt
\ No newline at end of file diff --git a/owl-base/Owl_computation_type/Make/argument-1-Device/A/index.html b/owl-base/Owl_computation_type/Make/argument-1-Device/A/index.html deleted file mode 100644 index d05da981f..000000000 --- a/owl-base/Owl_computation_type/Make/argument-1-Device/A/index.html +++ /dev/null @@ -1,379 +0,0 @@ - -A (owl-base.Owl_computation_type.Make.Device.A)

Module Device.A

include Owl_types_ndarray_algodiff.Sig
include Owl_types_ndarray_eltcmp.Sig
include Owl_types_ndarray_basic.Sig
type arr
type elt
val empty : int array -> arr
val zeros : int array -> arr
val ones : int array -> arr
val create : int array -> elt -> arr
val sequential : ?a:elt -> ?step:elt -> int array -> arr
val uniform : ?a:elt -> ?b:elt -> int array -> arr
val gaussian : ?mu:elt -> ?sigma:elt -> int array -> arr
val bernoulli : ?p:elt -> int array -> arr
val init : int array -> (int -> elt) -> arr
val init_nd : int array -> (int array -> elt) -> arr
val shape : arr -> int array
val numel : arr -> int
val get : arr -> int array -> elt
val set : arr -> int array -> elt -> unit
val get_slice : int list list -> arr -> arr
val set_slice : int list list -> arr -> arr -> unit
val get_fancy : Owl_types_common.index list -> arr -> arr
val set_fancy : Owl_types_common.index list -> arr -> arr -> unit
val copy : arr -> arr
val copy_ : out:arr -> arr -> unit
val reset : arr -> unit
val reshape : arr -> int array -> arr
val reverse : arr -> arr
val tile : arr -> int array -> arr
val repeat : arr -> int array -> arr
val concatenate : ?axis:int -> arr array -> arr
val stack : ?axis:int -> arr array -> arr
val split : ?axis:int -> int array -> arr -> arr array
val expand : ?hi:bool -> arr -> int -> arr
val squeeze : ?axis:int array -> arr -> arr
val draw : ?axis:int -> arr -> int -> arr * int array
val map : (elt -> elt) -> arr -> arr
val fold : ?axis:int -> (elt -> elt -> elt) -> elt -> arr -> arr
val scan : ?axis:int -> (elt -> elt -> elt) -> arr -> arr
val one_hot : int -> arr -> arr
val pad : ?v:elt -> int list list -> arr -> arr
val print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(elt -> string) -> - arr -> - unit
val abs : arr -> arr
val neg : arr -> arr
val floor : arr -> arr
val ceil : arr -> arr
val round : arr -> arr
val sqr : arr -> arr
val sqrt : arr -> arr
val log : arr -> arr
val log2 : arr -> arr
val log10 : arr -> arr
val exp : arr -> arr
val sin : arr -> arr
val cos : arr -> arr
val tan : arr -> arr
val sinh : arr -> arr
val cosh : arr -> arr
val tanh : arr -> arr
val asin : arr -> arr
val acos : arr -> arr
val atan : arr -> arr
val asinh : arr -> arr
val acosh : arr -> arr
val atanh : arr -> arr
val sum_reduce : ?axis:int array -> arr -> arr
val signum : arr -> arr
val sigmoid : arr -> arr
val relu : arr -> arr
val dawsn : arr -> arr
val min' : arr -> elt
val max' : arr -> elt
val sum' : arr -> elt
val log_sum_exp' : arr -> elt
val log_sum_exp : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm' : arr -> elt
val l2norm' : arr -> elt
val l2norm_sqr' : arr -> elt
val clip_by_value : ?amin:elt -> ?amax:elt -> arr -> arr
val clip_by_l2norm : elt -> arr -> arr
val pow : arr -> arr -> arr
val scalar_pow : elt -> arr -> arr
val pow_scalar : arr -> elt -> arr
val atan2 : arr -> arr -> arr
val scalar_atan2 : elt -> arr -> arr
val atan2_scalar : arr -> elt -> arr
val add : arr -> arr -> arr
val sub : arr -> arr -> arr
val mul : arr -> arr -> arr
val div : arr -> arr -> arr
val add_scalar : arr -> elt -> arr
val sub_scalar : arr -> elt -> arr
val mul_scalar : arr -> elt -> arr
val div_scalar : arr -> elt -> arr
val scalar_add : elt -> arr -> arr
val scalar_sub : elt -> arr -> arr
val scalar_mul : elt -> arr -> arr
val scalar_div : elt -> arr -> arr
val fma : arr -> arr -> arr -> arr
val conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val dilated_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val transpose_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val max_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val upsampling2d : arr -> int array -> arr
val conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val dilated_conv1d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv1d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val transpose_conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val max_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val upsampling2d_backward : arr -> int array -> arr -> arr
val row_num : arr -> int
val col_num : arr -> int
val row : arr -> int -> arr
val rows : arr -> int array -> arr
val copy_row_to : arr -> arr -> int -> unit
val copy_col_to : arr -> arr -> int -> unit
val dot : arr -> arr -> arr
val diag : ?k:int -> arr -> arr
val trace : arr -> elt
val transpose : ?axis:int array -> arr -> arr
val to_rows : arr -> arr array
val of_rows : arr array -> arr
val to_cols : arr -> arr array
val of_cols : arr array -> arr
val of_array : elt array -> int array -> arr
val of_arrays : elt array array -> arr
val float_to_elt : float -> elt
val elt_to_float : elt -> float
val elt_equal : arr -> arr -> arr
val elt_not_equal : arr -> arr -> arr
val elt_less : arr -> arr -> arr
val elt_greater : arr -> arr -> arr
val elt_less_equal : arr -> arr -> arr
val elt_greater_equal : arr -> arr -> arr
val elt_equal_scalar : arr -> elt -> arr
val elt_not_equal_scalar : arr -> elt -> arr
val elt_less_scalar : arr -> elt -> arr
val elt_greater_scalar : arr -> elt -> arr
val elt_less_equal_scalar : arr -> elt -> arr
val elt_greater_equal_scalar : arr -> elt -> arr
module Scalar : sig ... end
module Mat : sig ... end
module Linalg : sig ... end
val create_ : out:arr -> elt -> unit
val uniform_ : ?a:elt -> ?b:elt -> out:arr -> unit
val gaussian_ : ?mu:elt -> ?sigma:elt -> out:arr -> unit
val sequential_ : ?a:elt -> ?step:elt -> out:arr -> unit
val bernoulli_ : ?p:elt -> out:arr -> unit
val zeros_ : out:arr -> unit
val ones_ : out:arr -> unit
val one_hot_ : out:arr -> int -> arr -> unit
val get_slice_ : out:arr -> int list list -> arr -> unit
val set_slice_ : out:arr -> int list list -> arr -> arr -> unit
val sub_left : arr -> int -> int -> arr
val reshape_ : out:arr -> arr -> unit
val reverse_ : out:arr -> arr -> unit
val transpose_ : out:arr -> ?axis:int array -> arr -> unit
val repeat_ : out:arr -> arr -> int array -> unit
val tile_ : out:arr -> arr -> int array -> unit
val pad_ : out:arr -> ?v:elt -> int list list -> arr -> unit
val hypot : arr -> arr -> arr
val fmod : arr -> arr -> arr
val min2 : arr -> arr -> arr
val max2 : arr -> arr -> arr
val add_ : ?out:arr -> arr -> arr -> unit
val sub_ : ?out:arr -> arr -> arr -> unit
val mul_ : ?out:arr -> arr -> arr -> unit
val div_ : ?out:arr -> arr -> arr -> unit
val pow_ : ?out:arr -> arr -> arr -> unit
val atan2_ : ?out:arr -> arr -> arr -> unit
val hypot_ : ?out:arr -> arr -> arr -> unit
val fmod_ : ?out:arr -> arr -> arr -> unit
val min2_ : ?out:arr -> arr -> arr -> unit
val max2_ : ?out:arr -> arr -> arr -> unit
val add_scalar_ : ?out:arr -> arr -> elt -> unit
val sub_scalar_ : ?out:arr -> arr -> elt -> unit
val mul_scalar_ : ?out:arr -> arr -> elt -> unit
val div_scalar_ : ?out:arr -> arr -> elt -> unit
val pow_scalar_ : ?out:arr -> arr -> elt -> unit
val atan2_scalar_ : ?out:arr -> arr -> elt -> unit
val fmod_scalar_ : ?out:arr -> arr -> elt -> unit
val scalar_add_ : ?out:arr -> elt -> arr -> unit
val scalar_sub_ : ?out:arr -> elt -> arr -> unit
val scalar_mul_ : ?out:arr -> elt -> arr -> unit
val scalar_div_ : ?out:arr -> elt -> arr -> unit
val scalar_pow_ : ?out:arr -> elt -> arr -> unit
val scalar_atan2_ : ?out:arr -> elt -> arr -> unit
val scalar_fmod_ : ?out:arr -> elt -> arr -> unit
val fma_ : ?out:arr -> arr -> arr -> arr -> unit
val clip_by_value_ : ?out:arr -> ?amin:elt -> ?amax:elt -> arr -> unit
val clip_by_l2norm_ : ?out:arr -> elt -> arr -> unit
val dot_ : - ?transa:bool -> - ?transb:bool -> - ?alpha:elt -> - ?beta:elt -> - c:arr -> - arr -> - arr -> - unit
val abs_ : ?out:arr -> arr -> unit
val neg_ : ?out:arr -> arr -> unit
val conj_ : ?out:arr -> arr -> unit
val reci_ : ?out:arr -> arr -> unit
val signum_ : ?out:arr -> arr -> unit
val sqr_ : ?out:arr -> arr -> unit
val sqrt_ : ?out:arr -> arr -> unit
val cbrt_ : ?out:arr -> arr -> unit
val exp_ : ?out:arr -> arr -> unit
val exp2_ : ?out:arr -> arr -> unit
val exp10_ : ?out:arr -> arr -> unit
val expm1_ : ?out:arr -> arr -> unit
val log_ : ?out:arr -> arr -> unit
val log2_ : ?out:arr -> arr -> unit
val log10_ : ?out:arr -> arr -> unit
val log1p_ : ?out:arr -> arr -> unit
val sin_ : ?out:arr -> arr -> unit
val cos_ : ?out:arr -> arr -> unit
val tan_ : ?out:arr -> arr -> unit
val asin_ : ?out:arr -> arr -> unit
val acos_ : ?out:arr -> arr -> unit
val atan_ : ?out:arr -> arr -> unit
val sinh_ : ?out:arr -> arr -> unit
val cosh_ : ?out:arr -> arr -> unit
val tanh_ : ?out:arr -> arr -> unit
val asinh_ : ?out:arr -> arr -> unit
val acosh_ : ?out:arr -> arr -> unit
val atanh_ : ?out:arr -> arr -> unit
val floor_ : ?out:arr -> arr -> unit
val ceil_ : ?out:arr -> arr -> unit
val round_ : ?out:arr -> arr -> unit
val trunc_ : ?out:arr -> arr -> unit
val fix_ : ?out:arr -> arr -> unit
val erf_ : ?out:arr -> arr -> unit
val erfc_ : ?out:arr -> arr -> unit
val relu_ : ?out:arr -> arr -> unit
val softplus_ : ?out:arr -> arr -> unit
val softsign_ : ?out:arr -> arr -> unit
val softmax_ : ?out:arr -> ?axis:int -> arr -> unit
val sigmoid_ : ?out:arr -> arr -> unit
val sum_ : out:arr -> axis:int -> arr -> unit
val min_ : out:arr -> axis:int -> arr -> unit
val max_ : out:arr -> axis:int -> arr -> unit
val sum : ?axis:int -> ?keep_dims:bool -> arr -> arr
val prod : ?axis:int -> ?keep_dims:bool -> arr -> arr
val min : ?axis:int -> ?keep_dims:bool -> arr -> arr
val max : ?axis:int -> ?keep_dims:bool -> arr -> arr
val mean : ?axis:int -> ?keep_dims:bool -> arr -> arr
val var : ?axis:int -> ?keep_dims:bool -> arr -> arr
val std : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l2norm : ?axis:int -> ?keep_dims:bool -> arr -> arr
val cumsum_ : ?out:arr -> ?axis:int -> arr -> unit
val cumprod_ : ?out:arr -> ?axis:int -> arr -> unit
val cummin_ : ?out:arr -> ?axis:int -> arr -> unit
val cummax_ : ?out:arr -> ?axis:int -> arr -> unit
val dropout_ : ?out:arr -> ?rate:float -> arr -> unit
val prod' : arr -> elt
val mean' : arr -> elt
val var' : arr -> elt
val std' : arr -> elt
val elt_equal_ : ?out:arr -> arr -> arr -> unit
val elt_not_equal_ : ?out:arr -> arr -> arr -> unit
val elt_less_ : ?out:arr -> arr -> arr -> unit
val elt_greater_ : ?out:arr -> arr -> arr -> unit
val elt_less_equal_ : ?out:arr -> arr -> arr -> unit
val elt_greater_equal_ : ?out:arr -> arr -> arr -> unit
val elt_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_not_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_less_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_greater_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_less_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_greater_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val conv1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val conv2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val conv3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val dilated_conv1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val dilated_conv2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val dilated_conv3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val transpose_conv1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val transpose_conv2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val transpose_conv3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val max_pool1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val max_pool2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val max_pool3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val upsampling2d_ : out:arr -> arr -> int array -> unit
val conv1d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv1d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv2d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv2d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv3d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv3d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val dilated_conv1d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv1d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv2d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv2d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv3d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv3d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val transpose_conv1d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv1d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv2d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv2d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv3d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv3d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val max_pool1d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val max_pool2d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val max_pool3d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool1d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool2d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool3d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val upsampling2d_backward_ : out:arr -> arr -> int array -> arr -> unit
val fused_adagrad_ : ?out:arr -> rate:float -> eps:float -> arr -> unit
\ No newline at end of file diff --git a/owl-base/Owl_computation_type/Make/argument-1-Device/index.html b/owl-base/Owl_computation_type/Make/argument-1-Device/index.html deleted file mode 100644 index 8924af543..000000000 --- a/owl-base/Owl_computation_type/Make/argument-1-Device/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Device (owl-base.Owl_computation_type.Make.Device)

Parameter Make.Device

Type definition
type device

TODO

type value

TODO

Core functions
val make_device : unit -> device

TODO

val arr_to_value : A.arr -> value

TODO

val value_to_arr : value -> A.arr

TODO

val elt_to_value : A.elt -> value

TODO

val value_to_elt : value -> A.elt

TODO

val value_to_float : value -> float

TODO

val is_arr : value -> bool

TODO

val is_elt : value -> bool

TODO

\ No newline at end of file diff --git a/owl-base/Owl_computation_type/Make/index.html b/owl-base/Owl_computation_type/Make/index.html deleted file mode 100644 index 90901957c..000000000 --- a/owl-base/Owl_computation_type/Make/index.html +++ /dev/null @@ -1,5 +0,0 @@ - -Make (owl-base.Owl_computation_type.Make)

Module Owl_computation_type.Make

Parameters

Signature

module Device = Device
type state =
  1. | Valid
  2. | Invalid
and block = {
  1. size : int;
  2. block_id : int;
  3. mutable active : t option;
  4. mutable memory : Device.value;
  5. mutable nodes : t list;
}
and attr = {
  1. mutable op : op;
  2. mutable freeze : bool;
  3. mutable reuse : bool;
  4. mutable state : state;
  5. mutable shape : int array option array;
  6. mutable value : Device.value array;
  7. mutable block : block array option;
}
and arr =
  1. | Arr of t
and elt =
  1. | Elt of t
and op =
  1. | Noop
  2. | Var
  3. | Const
  4. | Empty of int array
  5. | Zeros of int array
  6. | Ones of int array
  7. | Create of int array
  8. | Sequential of int array
  9. | Uniform of int array
  10. | Gaussian of int array
  11. | Bernoulli of int array
  12. | Init of int array * int -> elt
  13. | Get of int array
  14. | Set of int array
  15. | GetSlice of int list list
  16. | SetSlice of int list list
  17. | GetFancy of Owl_types.index list
  18. | SetFancy of Owl_types.index list
  19. | Copy
  20. | Reset
  21. | Reshape of int array
  22. | Reverse
  23. | Tile of int array
  24. | Repeat of int array
  25. | Pad of elt * int list list
  26. | Concatenate of int
  27. | Stack of int
  28. | Split of int * int array
  29. | Draw of int * int
  30. | Map of elt -> elt
  31. | Fold of int * elt -> elt -> elt
  32. | Scan of int * elt -> elt -> elt
  33. | OneHot of int
  34. | OfArray of int array
  35. | Delay of Device.A.arr -> Device.A.arr
  36. | DelayArray of int array * Device.A.arr array -> Device.A.arr
  37. | LazyPrint of int option - * int option - * bool option - * (Device.A.elt -> string) option
  38. | Abs
  39. | Neg
  40. | Floor
  41. | Ceil
  42. | Round
  43. | Sqr
  44. | Sqrt
  45. | Log
  46. | Log2
  47. | Log10
  48. | Exp
  49. | Sin
  50. | Cos
  51. | Tan
  52. | Sinh
  53. | Cosh
  54. | Tanh
  55. | Asin
  56. | Acos
  57. | Atan
  58. | Asinh
  59. | Acosh
  60. | Atanh
  61. | Min of bool * int
  62. | Max of bool * int
  63. | Sum of bool * int
  64. | SumReduce of int array
  65. | Signum
  66. | Sigmoid
  67. | Relu
  68. | Dawsn
  69. | Min'
  70. | Max'
  71. | Sum'
  72. | LogSumExp'
  73. | LogSumExp of bool * int
  74. | L1norm'
  75. | L2norm'
  76. | L2NormSqr'
  77. | ClipByValue
  78. | ClipByL2norm
  79. | Pow
  80. | ScalarPow
  81. | PowScalar
  82. | Atan2
  83. | ScalarAtan2
  84. | Atan2Scalar
  85. | Hypot
  86. | Min2
  87. | Max2
  88. | Add
  89. | Sub
  90. | Mul
  91. | Div
  92. | AddScalar
  93. | SubScalar
  94. | MulScalar
  95. | DivScalar
  96. | ScalarAdd
  97. | ScalarSub
  98. | ScalarMul
  99. | ScalarDiv
  100. | FMA
  101. | EltEqual
  102. | EltNotEqual
  103. | EltLess
  104. | EltGreater
  105. | EltLessEqual
  106. | EltGreaterEqual
  107. | EltEqualScalar
  108. | EltNotEqualScalar
  109. | EltLessScalar
  110. | EltGreaterScalar
  111. | EltLessEqualScalar
  112. | EltGreaterEqualScalar
  113. | Conv1d of Owl_types.padding * int array
  114. | Conv2d of Owl_types.padding * int array
  115. | Conv3d of Owl_types.padding * int array
  116. | TransposeConv1d of Owl_types.padding * int array
  117. | TransposeConv2d of Owl_types.padding * int array
  118. | TransposeConv3d of Owl_types.padding * int array
  119. | DilatedConv1d of Owl_types.padding * int array * int array
  120. | DilatedConv2d of Owl_types.padding * int array * int array
  121. | DilatedConv3d of Owl_types.padding * int array * int array
  122. | MaxPool1d of Owl_types.padding * int array * int array
  123. | MaxPool2d of Owl_types.padding * int array * int array
  124. | MaxPool3d of Owl_types.padding * int array * int array
  125. | AvgPool1d of Owl_types.padding * int array * int array
  126. | AvgPool2d of Owl_types.padding * int array * int array
  127. | AvgPool3d of Owl_types.padding * int array * int array
  128. | UpSampling2d of int array
  129. | Conv1dBackwardInput of int array
  130. | Conv1dBackwardKernel of int array
  131. | Conv2dBackwardInput of int array
  132. | Conv2dBackwardKernel of int array
  133. | Conv3dBackwardInput of int array
  134. | Conv3dBackwardKernel of int array
  135. | TransposeConv1dBackwardInput of int array
  136. | TransposeConv1dBackwardKernel of int array
  137. | TransposeConv2dBackwardInput of int array
  138. | TransposeConv2dBackwardKernel of int array
  139. | TransposeConv3dBackwardInput of int array
  140. | TransposeConv3dBackwardKernel of int array
  141. | DilatedConv1dBackwardInput of int array * int array
  142. | DilatedConv1dBackwardKernel of int array * int array
  143. | DilatedConv2dBackwardInput of int array * int array
  144. | DilatedConv2dBackwardKernel of int array * int array
  145. | DilatedConv3dBackwardInput of int array * int array
  146. | DilatedConv3dBackwardKernel of int array * int array
  147. | MaxPool1dBackward of Owl_types.padding * int array * int array
  148. | MaxPool2dBackward of Owl_types.padding * int array * int array
  149. | MaxPool3dBackward of Owl_types.padding * int array * int array
  150. | AvgPool1dBackward of Owl_types.padding * int array * int array
  151. | AvgPool2dBackward of Owl_types.padding * int array * int array
  152. | AvgPool3dBackward of Owl_types.padding * int array * int array
  153. | UpSampling2dBackward of int array
  154. | RowNum
  155. | ColNum
  156. | Row
  157. | Rows of int array
  158. | CopyRowTo
  159. | CopyColTo
  160. | Dot of bool * bool * elt * elt
  161. | Inv
  162. | Trace
  163. | Transpose of int array
  164. | ToRows
  165. | OfRows
  166. | Scalar_Add
  167. | Scalar_Sub
  168. | Scalar_Mul
  169. | Scalar_Div
  170. | Scalar_Pow
  171. | Scalar_Atan2
  172. | Scalar_Abs
  173. | Scalar_Neg
  174. | Scalar_Sqr
  175. | Scalar_Sqrt
  176. | Scalar_Exp
  177. | Scalar_Log
  178. | Scalar_Log2
  179. | Scalar_Log10
  180. | Scalar_Signum
  181. | Scalar_Floor
  182. | Scalar_Ceil
  183. | Scalar_Round
  184. | Scalar_Sin
  185. | Scalar_Cos
  186. | Scalar_Tan
  187. | Scalar_Sinh
  188. | Scalar_Cosh
  189. | Scalar_Tanh
  190. | Scalar_Asin
  191. | Scalar_Acos
  192. | Scalar_Atan
  193. | Scalar_Asinh
  194. | Scalar_Acosh
  195. | Scalar_Atanh
  196. | Scalar_Relu
  197. | Scalar_Dawsn
  198. | Scalar_Sigmoid
  199. | Fused_Adagrad of float * float
\ No newline at end of file diff --git a/owl-base/Owl_computation_type_sig/.dummy b/owl-base/Owl_computation_type_sig/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl-base/Owl_computation_type_sig/module-type-Sig/Device/A/Linalg/index.html b/owl-base/Owl_computation_type_sig/module-type-Sig/Device/A/Linalg/index.html deleted file mode 100644 index d7ad177b1..000000000 --- a/owl-base/Owl_computation_type_sig/module-type-Sig/Device/A/Linalg/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -Linalg (owl-base.Owl_computation_type_sig.Sig.Device.A.Linalg)

Module A.Linalg

val inv : arr -> arr
val logdet : arr -> elt
val chol : ?upper:bool -> arr -> arr
val svd : ?thin:bool -> arr -> arr * arr * arr
val qr : arr -> arr * arr
val lq : arr -> arr * arr
val sylvester : arr -> arr -> arr -> arr
val lyapunov : arr -> arr -> arr
val discrete_lyapunov : - ?solver:[ `default | `bilinear | `direct ] -> - arr -> - arr -> - arr
val linsolve : ?trans:bool -> ?typ:[ `n | `u | `l ] -> arr -> arr -> arr
val care : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
val dare : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
\ No newline at end of file diff --git a/owl-base/Owl_computation_type_sig/module-type-Sig/Device/A/Mat/index.html b/owl-base/Owl_computation_type_sig/module-type-Sig/Device/A/Mat/index.html deleted file mode 100644 index 4216854cc..000000000 --- a/owl-base/Owl_computation_type_sig/module-type-Sig/Device/A/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl-base.Owl_computation_type_sig.Sig.Device.A.Mat)

Module A.Mat

val diagm : ?k:int -> arr -> arr
val triu : ?k:int -> arr -> arr
val tril : ?k:int -> arr -> arr
val eye : int -> arr
\ No newline at end of file diff --git a/owl-base/Owl_computation_type_sig/module-type-Sig/Device/A/Scalar/index.html b/owl-base/Owl_computation_type_sig/module-type-Sig/Device/A/Scalar/index.html deleted file mode 100644 index e0275199b..000000000 --- a/owl-base/Owl_computation_type_sig/module-type-Sig/Device/A/Scalar/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Scalar (owl-base.Owl_computation_type_sig.Sig.Device.A.Scalar)

Module A.Scalar

val add : elt -> elt -> elt
val sub : elt -> elt -> elt
val mul : elt -> elt -> elt
val div : elt -> elt -> elt
val pow : elt -> elt -> elt
val atan2 : elt -> elt -> elt
val abs : elt -> elt
val neg : elt -> elt
val sqr : elt -> elt
val sqrt : elt -> elt
val exp : elt -> elt
val log : elt -> elt
val log2 : elt -> elt
val log10 : elt -> elt
val signum : elt -> elt
val floor : elt -> elt
val ceil : elt -> elt
val round : elt -> elt
val sin : elt -> elt
val cos : elt -> elt
val tan : elt -> elt
val sinh : elt -> elt
val cosh : elt -> elt
val tanh : elt -> elt
val asin : elt -> elt
val acos : elt -> elt
val atan : elt -> elt
val asinh : elt -> elt
val acosh : elt -> elt
val atanh : elt -> elt
val relu : elt -> elt
val dawsn : elt -> elt
val sigmoid : elt -> elt
\ No newline at end of file diff --git a/owl-base/Owl_computation_type_sig/module-type-Sig/Device/A/index.html b/owl-base/Owl_computation_type_sig/module-type-Sig/Device/A/index.html deleted file mode 100644 index 567821cda..000000000 --- a/owl-base/Owl_computation_type_sig/module-type-Sig/Device/A/index.html +++ /dev/null @@ -1,379 +0,0 @@ - -A (owl-base.Owl_computation_type_sig.Sig.Device.A)

Module Device.A

include Owl_types_ndarray_algodiff.Sig
include Owl_types_ndarray_eltcmp.Sig
include Owl_types_ndarray_basic.Sig
type arr
type elt
val empty : int array -> arr
val zeros : int array -> arr
val ones : int array -> arr
val create : int array -> elt -> arr
val sequential : ?a:elt -> ?step:elt -> int array -> arr
val uniform : ?a:elt -> ?b:elt -> int array -> arr
val gaussian : ?mu:elt -> ?sigma:elt -> int array -> arr
val bernoulli : ?p:elt -> int array -> arr
val init : int array -> (int -> elt) -> arr
val init_nd : int array -> (int array -> elt) -> arr
val shape : arr -> int array
val numel : arr -> int
val get : arr -> int array -> elt
val set : arr -> int array -> elt -> unit
val get_slice : int list list -> arr -> arr
val set_slice : int list list -> arr -> arr -> unit
val get_fancy : Owl_types_common.index list -> arr -> arr
val set_fancy : Owl_types_common.index list -> arr -> arr -> unit
val copy : arr -> arr
val copy_ : out:arr -> arr -> unit
val reset : arr -> unit
val reshape : arr -> int array -> arr
val reverse : arr -> arr
val tile : arr -> int array -> arr
val repeat : arr -> int array -> arr
val concatenate : ?axis:int -> arr array -> arr
val stack : ?axis:int -> arr array -> arr
val split : ?axis:int -> int array -> arr -> arr array
val expand : ?hi:bool -> arr -> int -> arr
val squeeze : ?axis:int array -> arr -> arr
val draw : ?axis:int -> arr -> int -> arr * int array
val map : (elt -> elt) -> arr -> arr
val fold : ?axis:int -> (elt -> elt -> elt) -> elt -> arr -> arr
val scan : ?axis:int -> (elt -> elt -> elt) -> arr -> arr
val one_hot : int -> arr -> arr
val pad : ?v:elt -> int list list -> arr -> arr
val print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(elt -> string) -> - arr -> - unit
val abs : arr -> arr
val neg : arr -> arr
val floor : arr -> arr
val ceil : arr -> arr
val round : arr -> arr
val sqr : arr -> arr
val sqrt : arr -> arr
val log : arr -> arr
val log2 : arr -> arr
val log10 : arr -> arr
val exp : arr -> arr
val sin : arr -> arr
val cos : arr -> arr
val tan : arr -> arr
val sinh : arr -> arr
val cosh : arr -> arr
val tanh : arr -> arr
val asin : arr -> arr
val acos : arr -> arr
val atan : arr -> arr
val asinh : arr -> arr
val acosh : arr -> arr
val atanh : arr -> arr
val sum_reduce : ?axis:int array -> arr -> arr
val signum : arr -> arr
val sigmoid : arr -> arr
val relu : arr -> arr
val dawsn : arr -> arr
val min' : arr -> elt
val max' : arr -> elt
val sum' : arr -> elt
val log_sum_exp' : arr -> elt
val log_sum_exp : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm' : arr -> elt
val l2norm' : arr -> elt
val l2norm_sqr' : arr -> elt
val clip_by_value : ?amin:elt -> ?amax:elt -> arr -> arr
val clip_by_l2norm : elt -> arr -> arr
val pow : arr -> arr -> arr
val scalar_pow : elt -> arr -> arr
val pow_scalar : arr -> elt -> arr
val atan2 : arr -> arr -> arr
val scalar_atan2 : elt -> arr -> arr
val atan2_scalar : arr -> elt -> arr
val add : arr -> arr -> arr
val sub : arr -> arr -> arr
val mul : arr -> arr -> arr
val div : arr -> arr -> arr
val add_scalar : arr -> elt -> arr
val sub_scalar : arr -> elt -> arr
val mul_scalar : arr -> elt -> arr
val div_scalar : arr -> elt -> arr
val scalar_add : elt -> arr -> arr
val scalar_sub : elt -> arr -> arr
val scalar_mul : elt -> arr -> arr
val scalar_div : elt -> arr -> arr
val fma : arr -> arr -> arr -> arr
val conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val dilated_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val transpose_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val max_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val upsampling2d : arr -> int array -> arr
val conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val dilated_conv1d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv1d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val transpose_conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val max_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val upsampling2d_backward : arr -> int array -> arr -> arr
val row_num : arr -> int
val col_num : arr -> int
val row : arr -> int -> arr
val rows : arr -> int array -> arr
val copy_row_to : arr -> arr -> int -> unit
val copy_col_to : arr -> arr -> int -> unit
val dot : arr -> arr -> arr
val diag : ?k:int -> arr -> arr
val trace : arr -> elt
val transpose : ?axis:int array -> arr -> arr
val to_rows : arr -> arr array
val of_rows : arr array -> arr
val to_cols : arr -> arr array
val of_cols : arr array -> arr
val of_array : elt array -> int array -> arr
val of_arrays : elt array array -> arr
val float_to_elt : float -> elt
val elt_to_float : elt -> float
val elt_equal : arr -> arr -> arr
val elt_not_equal : arr -> arr -> arr
val elt_less : arr -> arr -> arr
val elt_greater : arr -> arr -> arr
val elt_less_equal : arr -> arr -> arr
val elt_greater_equal : arr -> arr -> arr
val elt_equal_scalar : arr -> elt -> arr
val elt_not_equal_scalar : arr -> elt -> arr
val elt_less_scalar : arr -> elt -> arr
val elt_greater_scalar : arr -> elt -> arr
val elt_less_equal_scalar : arr -> elt -> arr
val elt_greater_equal_scalar : arr -> elt -> arr
module Scalar : sig ... end
module Mat : sig ... end
module Linalg : sig ... end
val create_ : out:arr -> elt -> unit
val uniform_ : ?a:elt -> ?b:elt -> out:arr -> unit
val gaussian_ : ?mu:elt -> ?sigma:elt -> out:arr -> unit
val sequential_ : ?a:elt -> ?step:elt -> out:arr -> unit
val bernoulli_ : ?p:elt -> out:arr -> unit
val zeros_ : out:arr -> unit
val ones_ : out:arr -> unit
val one_hot_ : out:arr -> int -> arr -> unit
val get_slice_ : out:arr -> int list list -> arr -> unit
val set_slice_ : out:arr -> int list list -> arr -> arr -> unit
val sub_left : arr -> int -> int -> arr
val reshape_ : out:arr -> arr -> unit
val reverse_ : out:arr -> arr -> unit
val transpose_ : out:arr -> ?axis:int array -> arr -> unit
val repeat_ : out:arr -> arr -> int array -> unit
val tile_ : out:arr -> arr -> int array -> unit
val pad_ : out:arr -> ?v:elt -> int list list -> arr -> unit
val hypot : arr -> arr -> arr
val fmod : arr -> arr -> arr
val min2 : arr -> arr -> arr
val max2 : arr -> arr -> arr
val add_ : ?out:arr -> arr -> arr -> unit
val sub_ : ?out:arr -> arr -> arr -> unit
val mul_ : ?out:arr -> arr -> arr -> unit
val div_ : ?out:arr -> arr -> arr -> unit
val pow_ : ?out:arr -> arr -> arr -> unit
val atan2_ : ?out:arr -> arr -> arr -> unit
val hypot_ : ?out:arr -> arr -> arr -> unit
val fmod_ : ?out:arr -> arr -> arr -> unit
val min2_ : ?out:arr -> arr -> arr -> unit
val max2_ : ?out:arr -> arr -> arr -> unit
val add_scalar_ : ?out:arr -> arr -> elt -> unit
val sub_scalar_ : ?out:arr -> arr -> elt -> unit
val mul_scalar_ : ?out:arr -> arr -> elt -> unit
val div_scalar_ : ?out:arr -> arr -> elt -> unit
val pow_scalar_ : ?out:arr -> arr -> elt -> unit
val atan2_scalar_ : ?out:arr -> arr -> elt -> unit
val fmod_scalar_ : ?out:arr -> arr -> elt -> unit
val scalar_add_ : ?out:arr -> elt -> arr -> unit
val scalar_sub_ : ?out:arr -> elt -> arr -> unit
val scalar_mul_ : ?out:arr -> elt -> arr -> unit
val scalar_div_ : ?out:arr -> elt -> arr -> unit
val scalar_pow_ : ?out:arr -> elt -> arr -> unit
val scalar_atan2_ : ?out:arr -> elt -> arr -> unit
val scalar_fmod_ : ?out:arr -> elt -> arr -> unit
val fma_ : ?out:arr -> arr -> arr -> arr -> unit
val clip_by_value_ : ?out:arr -> ?amin:elt -> ?amax:elt -> arr -> unit
val clip_by_l2norm_ : ?out:arr -> elt -> arr -> unit
val dot_ : - ?transa:bool -> - ?transb:bool -> - ?alpha:elt -> - ?beta:elt -> - c:arr -> - arr -> - arr -> - unit
val abs_ : ?out:arr -> arr -> unit
val neg_ : ?out:arr -> arr -> unit
val conj_ : ?out:arr -> arr -> unit
val reci_ : ?out:arr -> arr -> unit
val signum_ : ?out:arr -> arr -> unit
val sqr_ : ?out:arr -> arr -> unit
val sqrt_ : ?out:arr -> arr -> unit
val cbrt_ : ?out:arr -> arr -> unit
val exp_ : ?out:arr -> arr -> unit
val exp2_ : ?out:arr -> arr -> unit
val exp10_ : ?out:arr -> arr -> unit
val expm1_ : ?out:arr -> arr -> unit
val log_ : ?out:arr -> arr -> unit
val log2_ : ?out:arr -> arr -> unit
val log10_ : ?out:arr -> arr -> unit
val log1p_ : ?out:arr -> arr -> unit
val sin_ : ?out:arr -> arr -> unit
val cos_ : ?out:arr -> arr -> unit
val tan_ : ?out:arr -> arr -> unit
val asin_ : ?out:arr -> arr -> unit
val acos_ : ?out:arr -> arr -> unit
val atan_ : ?out:arr -> arr -> unit
val sinh_ : ?out:arr -> arr -> unit
val cosh_ : ?out:arr -> arr -> unit
val tanh_ : ?out:arr -> arr -> unit
val asinh_ : ?out:arr -> arr -> unit
val acosh_ : ?out:arr -> arr -> unit
val atanh_ : ?out:arr -> arr -> unit
val floor_ : ?out:arr -> arr -> unit
val ceil_ : ?out:arr -> arr -> unit
val round_ : ?out:arr -> arr -> unit
val trunc_ : ?out:arr -> arr -> unit
val fix_ : ?out:arr -> arr -> unit
val erf_ : ?out:arr -> arr -> unit
val erfc_ : ?out:arr -> arr -> unit
val relu_ : ?out:arr -> arr -> unit
val softplus_ : ?out:arr -> arr -> unit
val softsign_ : ?out:arr -> arr -> unit
val softmax_ : ?out:arr -> ?axis:int -> arr -> unit
val sigmoid_ : ?out:arr -> arr -> unit
val sum_ : out:arr -> axis:int -> arr -> unit
val min_ : out:arr -> axis:int -> arr -> unit
val max_ : out:arr -> axis:int -> arr -> unit
val sum : ?axis:int -> ?keep_dims:bool -> arr -> arr
val prod : ?axis:int -> ?keep_dims:bool -> arr -> arr
val min : ?axis:int -> ?keep_dims:bool -> arr -> arr
val max : ?axis:int -> ?keep_dims:bool -> arr -> arr
val mean : ?axis:int -> ?keep_dims:bool -> arr -> arr
val var : ?axis:int -> ?keep_dims:bool -> arr -> arr
val std : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l2norm : ?axis:int -> ?keep_dims:bool -> arr -> arr
val cumsum_ : ?out:arr -> ?axis:int -> arr -> unit
val cumprod_ : ?out:arr -> ?axis:int -> arr -> unit
val cummin_ : ?out:arr -> ?axis:int -> arr -> unit
val cummax_ : ?out:arr -> ?axis:int -> arr -> unit
val dropout_ : ?out:arr -> ?rate:float -> arr -> unit
val prod' : arr -> elt
val mean' : arr -> elt
val var' : arr -> elt
val std' : arr -> elt
val elt_equal_ : ?out:arr -> arr -> arr -> unit
val elt_not_equal_ : ?out:arr -> arr -> arr -> unit
val elt_less_ : ?out:arr -> arr -> arr -> unit
val elt_greater_ : ?out:arr -> arr -> arr -> unit
val elt_less_equal_ : ?out:arr -> arr -> arr -> unit
val elt_greater_equal_ : ?out:arr -> arr -> arr -> unit
val elt_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_not_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_less_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_greater_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_less_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_greater_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val conv1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val conv2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val conv3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val dilated_conv1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val dilated_conv2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val dilated_conv3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val transpose_conv1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val transpose_conv2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val transpose_conv3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val max_pool1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val max_pool2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val max_pool3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val upsampling2d_ : out:arr -> arr -> int array -> unit
val conv1d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv1d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv2d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv2d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv3d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv3d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val dilated_conv1d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv1d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv2d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv2d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv3d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv3d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val transpose_conv1d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv1d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv2d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv2d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv3d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv3d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val max_pool1d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val max_pool2d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val max_pool3d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool1d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool2d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool3d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val upsampling2d_backward_ : out:arr -> arr -> int array -> arr -> unit
val fused_adagrad_ : ?out:arr -> rate:float -> eps:float -> arr -> unit
\ No newline at end of file diff --git a/owl-base/Owl_computation_type_sig/module-type-Sig/Device/index.html b/owl-base/Owl_computation_type_sig/module-type-Sig/Device/index.html deleted file mode 100644 index b1fe502aa..000000000 --- a/owl-base/Owl_computation_type_sig/module-type-Sig/Device/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Device (owl-base.Owl_computation_type_sig.Sig.Device)

Module Sig.Device

Type definition
type device

TODO

type value

TODO

Core functions
val make_device : unit -> device

TODO

val arr_to_value : A.arr -> value

TODO

val value_to_arr : value -> A.arr

TODO

val elt_to_value : A.elt -> value

TODO

val value_to_elt : value -> A.elt

TODO

val value_to_float : value -> float

TODO

val is_arr : value -> bool

TODO

val is_elt : value -> bool

TODO

\ No newline at end of file diff --git a/owl-base/Owl_computation_type_sig/module-type-Sig/index.html b/owl-base/Owl_computation_type_sig/module-type-Sig/index.html deleted file mode 100644 index 9af718c37..000000000 --- a/owl-base/Owl_computation_type_sig/module-type-Sig/index.html +++ /dev/null @@ -1,5 +0,0 @@ - -Sig (owl-base.Owl_computation_type_sig.Sig)

Module type Owl_computation_type_sig.Sig

Type definition
type state =
  1. | Valid
  2. | Invalid
    (*

    TODO

    *)

TODO

and block = {
  1. size : int;
  2. block_id : int;
  3. mutable active : t option;
  4. mutable memory : Device.value;
  5. mutable nodes : t list;
}

block type keeps a reference to a block of memory and to the nodes sharing that block.

and attr = {
  1. mutable op : op;
  2. mutable freeze : bool;
  3. mutable reuse : bool;
  4. mutable state : state;
  5. mutable shape : int array option array;
  6. mutable value : Device.value array;
  7. mutable block : block array option;
}

TODO

and arr =
  1. | Arr of t
and elt =
  1. | Elt of t
and op =
  1. | Noop
  2. | Var
  3. | Const
  4. | Empty of int array
  5. | Zeros of int array
  6. | Ones of int array
  7. | Create of int array
  8. | Sequential of int array
  9. | Uniform of int array
  10. | Gaussian of int array
  11. | Bernoulli of int array
  12. | Init of int array * int -> elt
  13. | Get of int array
  14. | Set of int array
  15. | GetSlice of int list list
  16. | SetSlice of int list list
  17. | GetFancy of Owl_types_common.index list
  18. | SetFancy of Owl_types_common.index list
  19. | Copy
  20. | Reset
  21. | Reshape of int array
  22. | Reverse
  23. | Tile of int array
  24. | Repeat of int array
  25. | Pad of elt * int list list
  26. | Concatenate of int
  27. | Stack of int
  28. | Split of int * int array
  29. | Draw of int * int
  30. | Map of elt -> elt
  31. | Fold of int * elt -> elt -> elt
  32. | Scan of int * elt -> elt -> elt
  33. | OneHot of int
  34. | OfArray of int array
  35. | Delay of Device.A.arr -> Device.A.arr
  36. | DelayArray of int array * Device.A.arr array -> Device.A.arr
  37. | LazyPrint of int option - * int option - * bool option - * (Device.A.elt -> string) option
  38. | Abs
  39. | Neg
  40. | Floor
  41. | Ceil
  42. | Round
  43. | Sqr
  44. | Sqrt
  45. | Log
  46. | Log2
  47. | Log10
  48. | Exp
  49. | Sin
  50. | Cos
  51. | Tan
  52. | Sinh
  53. | Cosh
  54. | Tanh
  55. | Asin
  56. | Acos
  57. | Atan
  58. | Asinh
  59. | Acosh
  60. | Atanh
  61. | Min of bool * int
  62. | Max of bool * int
  63. | Sum of bool * int
  64. | SumReduce of int array
  65. | Signum
  66. | Sigmoid
  67. | Relu
  68. | Dawsn
  69. | Min'
  70. | Max'
  71. | Sum'
  72. | LogSumExp'
  73. | LogSumExp of bool * int
  74. | L1norm'
  75. | L2norm'
  76. | L2NormSqr'
  77. | ClipByValue
  78. | ClipByL2norm
  79. | Pow
  80. | ScalarPow
  81. | PowScalar
  82. | Atan2
  83. | ScalarAtan2
  84. | Atan2Scalar
  85. | Hypot
  86. | Min2
  87. | Max2
  88. | Add
  89. | Sub
  90. | Mul
  91. | Div
  92. | AddScalar
  93. | SubScalar
  94. | MulScalar
  95. | DivScalar
  96. | ScalarAdd
  97. | ScalarSub
  98. | ScalarMul
  99. | ScalarDiv
  100. | FMA
  101. | EltEqual
  102. | EltNotEqual
  103. | EltLess
  104. | EltGreater
  105. | EltLessEqual
  106. | EltGreaterEqual
  107. | EltEqualScalar
  108. | EltNotEqualScalar
  109. | EltLessScalar
  110. | EltGreaterScalar
  111. | EltLessEqualScalar
  112. | EltGreaterEqualScalar
  113. | Conv1d of Owl_types_common.padding * int array
  114. | Conv2d of Owl_types_common.padding * int array
  115. | Conv3d of Owl_types_common.padding * int array
  116. | TransposeConv1d of Owl_types_common.padding * int array
  117. | TransposeConv2d of Owl_types_common.padding * int array
  118. | TransposeConv3d of Owl_types_common.padding * int array
  119. | DilatedConv1d of Owl_types_common.padding * int array * int array
  120. | DilatedConv2d of Owl_types_common.padding * int array * int array
  121. | DilatedConv3d of Owl_types_common.padding * int array * int array
  122. | MaxPool1d of Owl_types_common.padding * int array * int array
  123. | MaxPool2d of Owl_types_common.padding * int array * int array
  124. | MaxPool3d of Owl_types_common.padding * int array * int array
  125. | AvgPool1d of Owl_types_common.padding * int array * int array
  126. | AvgPool2d of Owl_types_common.padding * int array * int array
  127. | AvgPool3d of Owl_types_common.padding * int array * int array
  128. | UpSampling2d of int array
  129. | Conv1dBackwardInput of int array
  130. | Conv1dBackwardKernel of int array
  131. | Conv2dBackwardInput of int array
  132. | Conv2dBackwardKernel of int array
  133. | Conv3dBackwardInput of int array
  134. | Conv3dBackwardKernel of int array
  135. | TransposeConv1dBackwardInput of int array
  136. | TransposeConv1dBackwardKernel of int array
  137. | TransposeConv2dBackwardInput of int array
  138. | TransposeConv2dBackwardKernel of int array
  139. | TransposeConv3dBackwardInput of int array
  140. | TransposeConv3dBackwardKernel of int array
  141. | DilatedConv1dBackwardInput of int array * int array
  142. | DilatedConv1dBackwardKernel of int array * int array
  143. | DilatedConv2dBackwardInput of int array * int array
  144. | DilatedConv2dBackwardKernel of int array * int array
  145. | DilatedConv3dBackwardInput of int array * int array
  146. | DilatedConv3dBackwardKernel of int array * int array
  147. | MaxPool1dBackward of Owl_types_common.padding * int array * int array
  148. | MaxPool2dBackward of Owl_types_common.padding * int array * int array
  149. | MaxPool3dBackward of Owl_types_common.padding * int array * int array
  150. | AvgPool1dBackward of Owl_types_common.padding * int array * int array
  151. | AvgPool2dBackward of Owl_types_common.padding * int array * int array
  152. | AvgPool3dBackward of Owl_types_common.padding * int array * int array
  153. | UpSampling2dBackward of int array
  154. | RowNum
  155. | ColNum
  156. | Row
  157. | Rows of int array
  158. | CopyRowTo
  159. | CopyColTo
  160. | Dot of bool * bool * elt * elt
  161. | Inv
  162. | Trace
  163. | Transpose of int array
  164. | ToRows
  165. | OfRows
  166. | Scalar_Add
  167. | Scalar_Sub
  168. | Scalar_Mul
  169. | Scalar_Div
  170. | Scalar_Pow
  171. | Scalar_Atan2
  172. | Scalar_Abs
  173. | Scalar_Neg
  174. | Scalar_Sqr
  175. | Scalar_Sqrt
  176. | Scalar_Exp
  177. | Scalar_Log
  178. | Scalar_Log2
  179. | Scalar_Log10
  180. | Scalar_Signum
  181. | Scalar_Floor
  182. | Scalar_Ceil
  183. | Scalar_Round
  184. | Scalar_Sin
  185. | Scalar_Cos
  186. | Scalar_Tan
  187. | Scalar_Sinh
  188. | Scalar_Cosh
  189. | Scalar_Tanh
  190. | Scalar_Asin
  191. | Scalar_Acos
  192. | Scalar_Atan
  193. | Scalar_Asinh
  194. | Scalar_Acosh
  195. | Scalar_Atanh
  196. | Scalar_Relu
  197. | Scalar_Dawsn
  198. | Scalar_Sigmoid
  199. | Fused_Adagrad of float * float
    (*

    TODO

    *)
\ No newline at end of file diff --git a/owl-base/Owl_const/.dummy b/owl-base/Owl_const/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl-base/Owl_const/CGS/index.html b/owl-base/Owl_const/CGS/index.html deleted file mode 100644 index 2eb90bb32..000000000 --- a/owl-base/Owl_const/CGS/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -CGS (owl-base.Owl_const.CGS)

Module Owl_const.CGS

val speed_of_light : float

speed_of_light = 2.99792458e10

val gravitational_constant : float

gravitational_constant = 6.673e-8

val plancks_constant_h : float

plancks_constant_h = 6.62606896e-27

val plancks_constant_hbar : float

plancks_constant_hbar = 1.05457162825e-27

val astronomical_unit : float

astronomical_unit = 1.49597870691e13

val light_year : float

light_year = 9.46053620707e17

val parsec : float

parsec = 3.08567758135e18

val grav_accel : float

grav_accel = 9.80665e2

val electron_volt : float

electron_volt = 1.602176487e-12

val mass_electron : float

mass_electron = 9.10938188e-28

val mass_muon : float

mass_muon = 1.88353109e-25

val mass_proton : float

mass_proton = 1.67262158e-24

val mass_neutron : float

mass_neutron = 1.67492716e-24

val rydberg : float

rydberg = 2.17987196968e-11

val boltzmann : float

boltzmann = 1.3806504e-16

val molar_gas : float

molar_gas = 8.314472e7

val standard_gas_volume : float

standard_gas_volume = 2.2710981e4

val minute : float

minute = 6e1

val hour : float

hour = 3.6e3

val day : float

day = 8.64e4

val week : float

week = 6.048e5

val inch : float

inch = 2.54e0

val foot : float

foot = 3.048e1

val yard : float

yard = 9.144e1

val mile : float

mile = 1.609344e5

val nautical_mile : float

nautical_mile = 1.852e5

val fathom : float

fathom = 1.8288e2

val mil : float

mil = 2.54e-3

val point : float

point = 3.52777777778e-2

val texpoint : float

texpoint = 3.51459803515e-2

val micron : float

micron = 1e-4

val angstrom : float

angstrom = 1e-8

val hectare : float

hectare = 1e8

val acre : float

acre = 4.04685642241e7

val barn : float

barn = 1e-24

val liter : float

liter = 1e3

val us_gallon : float

us_gallon = 3.78541178402e3

val quart : float

quart = 9.46352946004e2

val pint : float

pint = 4.73176473002e2

val cup : float

cup = 2.36588236501e2

val fluid_ounce : float

fluid_ounce = 2.95735295626e1

val tablespoon : float

tablespoon = 1.47867647813e1

val teaspoon : float

teaspoon = 4.92892159375e0

val canadian_gallon : float

canadian_gallon = 4.54609e3

val uk_gallon : float

uk_gallon = 4.546092e3

val miles_per_hour : float

miles_per_hour = 4.4704e1

val kilometers_per_hour : float

kilometers_per_hour = 2.77777777778e1

val knot : float

knot = 5.14444444444e1

val pound_mass : float

pound_mass = 4.5359237e2

val ounce_mass : float

ounce_mass = 2.8349523125e1

val ton : float

ton = 9.0718474e5

val metric_ton : float

metric_ton = 1e6

val uk_ton : float

uk_ton = 1.0160469088e6

val troy_ounce : float

troy_ounce = 3.1103475e1

val carat : float

carat = 2e-1

val unified_atomic_mass : float

unified_atomic_mass = 1.660538782e-24

val gram_force : float

gram_force = 9.80665e2

val pound_force : float

pound_force = 4.44822161526e5

val kilopound_force : float

kilopound_force = 4.44822161526e8

val poundal : float

poundal = 1.38255e4

val calorie : float

calorie = 4.1868e7

val btu : float

btu = 1.05505585262e10

val therm : float

therm = 1.05506e15

val horsepower : float

horsepower = 7.457e9

val bar : float

bar = 1e6

val std_atmosphere : float

std_atmosphere = 1.01325e6

val torr : float

torr = 1.33322368421e3

val meter_of_mercury : float

meter_of_mercury = 1.33322368421e6

val inch_of_mercury : float

inch_of_mercury = 3.38638815789e4

val inch_of_water : float

inch_of_water = 2.490889e3

val psi : float

psi = 6.89475729317e4

val poise : float

poise = 1e0

val stokes : float

stokes = 1e0

val stilb : float

stilb = 1e0

val lumen : float

lumen = 1e0

val lux : float

lux = 1e-4

val phot : float

phot = 1e0

val footcandle : float

footcandle = 1.076e-3

val lambert : float

lambert = 1e0

val footlambert : float

footlambert = 1.07639104e-3

val curie : float

curie = 3.7e10

val roentgen : float

roentgen = 2.58e-7

val rad : float

rad = 1e2

val solar_mass : float

solar_mass = 1.98892e33

val bohr_radius : float

bohr_radius = 5.291772083e-9

val newton : float

newton = 1e5

val dyne : float

dyne = 1e0

val joule : float

joule = 1e7

val erg : float

erg = 1e0

val stefan_boltzmann_constant : float

stefan_boltzmann_constant = 5.67040047374e-5

val thomson_cross_section : float

thomson_cross_section = 6.65245893699e-25

\ No newline at end of file diff --git a/owl-base/Owl_const/CGSM/index.html b/owl-base/Owl_const/CGSM/index.html deleted file mode 100644 index 7c77d1b36..000000000 --- a/owl-base/Owl_const/CGSM/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -CGSM (owl-base.Owl_const.CGSM)

Module Owl_const.CGSM

val speed_of_light : float

speed_of_light = 2.99792458e10

val gravitational_constant : float

gravitational_constant = 6.673e-8

val plancks_constant_h : float

plancks_constant_h = 6.62606896e-27

val plancks_constant_hbar : float

plancks_constant_hbar = 1.05457162825e-27

val astronomical_unit : float

astronomical_unit = 1.49597870691e13

val light_year : float

light_year = 9.46053620707e17

val parsec : float

parsec = 3.08567758135e18

val grav_accel : float

grav_accel = 9.80665e2

val electron_volt : float

electron_volt = 1.602176487e-12

val mass_electron : float

mass_electron = 9.10938188e-28

val mass_muon : float

mass_muon = 1.88353109e-25

val mass_proton : float

mass_proton = 1.67262158e-24

val mass_neutron : float

mass_neutron = 1.67492716e-24

val rydberg : float

rydberg = 2.17987196968e-11

val boltzmann : float

boltzmann = 1.3806504e-16

val molar_gas : float

molar_gas = 8.314472e7

val standard_gas_volume : float

standard_gas_volume = 2.2710981e4

val minute : float

minute = 6e1

val hour : float

hour = 3.6e3

val day : float

day = 8.64e4

val week : float

week = 6.048e5

val inch : float

inch = 2.54e0

val foot : float

foot = 3.048e1

val yard : float

yard = 9.144e1

val mile : float

mile = 1.609344e5

val nautical_mile : float

nautical_mile = 1.852e5

val fathom : float

fathom = 1.8288e2

val mil : float

mil = 2.54e-3

val point : float

point = 3.52777777778e-2

val texpoint : float

texpoint = 3.51459803515e-2

val micron : float

micron = 1e-4

val angstrom : float

angstrom = 1e-8

val hectare : float

hectare = 1e8

val acre : float

acre = 4.04685642241e7

val barn : float

barn = 1e-24

val liter : float

liter = 1e3

val us_gallon : float

us_gallon = 3.78541178402e3

val quart : float

quart = 9.46352946004e2

val pint : float

pint = 4.73176473002e2

val cup : float

cup = 2.36588236501e2

val fluid_ounce : float

fluid_ounce = 2.95735295626e1

val tablespoon : float

tablespoon = 1.47867647813e1

val teaspoon : float

teaspoon = 4.92892159375e0

val canadian_gallon : float

canadian_gallon = 4.54609e3

val uk_gallon : float

uk_gallon = 4.546092e3

val miles_per_hour : float

miles_per_hour = 4.4704e1

val kilometers_per_hour : float

kilometers_per_hour = 2.77777777778e1

val knot : float

knot = 5.14444444444e1

val pound_mass : float

pound_mass = 4.5359237e2

val ounce_mass : float

ounce_mass = 2.8349523125e1

val ton : float

ton = 9.0718474e5

val metric_ton : float

metric_ton = 1e6

val uk_ton : float

uk_ton = 1.0160469088e6

val troy_ounce : float

troy_ounce = 3.1103475e1

val carat : float

carat = 2e-1

val unified_atomic_mass : float

unified_atomic_mass = 1.660538782e-24

val gram_force : float

gram_force = 9.80665e2

val pound_force : float

pound_force = 4.44822161526e5

val kilopound_force : float

kilopound_force = 4.44822161526e8

val poundal : float

poundal = 1.38255e4

val calorie : float

calorie = 4.1868e7

val btu : float

btu = 1.05505585262e10

val therm : float

therm = 1.05506e15

val horsepower : float

horsepower = 7.457e9

val bar : float

bar = 1e6

val std_atmosphere : float

std_atmosphere = 1.01325e6

val torr : float

torr = 1.33322368421e3

val meter_of_mercury : float

meter_of_mercury = 1.33322368421e6

val inch_of_mercury : float

inch_of_mercury = 3.38638815789e4

val inch_of_water : float

inch_of_water = 2.490889e3

val psi : float

psi = 6.89475729317e4

val poise : float

poise = 1e0

val stokes : float

stokes = 1e0

val stilb : float

stilb = 1e0

val lumen : float

lumen = 1e0

val lux : float

lux = 1e-4

val phot : float

phot = 1e0

val footcandle : float

footcandle = 1.076e-3

val lambert : float

lambert = 1e0

val footlambert : float

footlambert = 1.07639104e-3

val curie : float

curie = 3.7e10

val roentgen : float

roentgen = 2.58e-8

val rad : float

rad = 1e2

val solar_mass : float

solar_mass = 1.98892e33

val bohr_radius : float

bohr_radius = 5.291772083e-9

val newton : float

newton = 1e5

val dyne : float

dyne = 1e0

val joule : float

joule = 1e7

val erg : float

erg = 1e0

val stefan_boltzmann_constant : float

stefan_boltzmann_constant = 5.67040047374e-5

val thomson_cross_section : float

thomson_cross_section = 6.65245893699e-25

val bohr_magneton : float

bohr_magneton = 9.27400899e-21

val nuclear_magneton : float

nuclear_magneton = 5.05078317e-24

val electron_magnetic_moment : float

electron_magnetic_moment = 9.28476362e-21

val proton_magnetic_moment : float

proton_magnetic_moment = 1.410606633e-23

val faraday : float

faraday = 9.64853429775e3

val electron_charge : float

electron_charge = 1.602176487e-20

\ No newline at end of file diff --git a/owl-base/Owl_const/MKS/index.html b/owl-base/Owl_const/MKS/index.html deleted file mode 100644 index 582899bb9..000000000 --- a/owl-base/Owl_const/MKS/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -MKS (owl-base.Owl_const.MKS)

Module Owl_const.MKS

val speed_of_light : float

speed_of_light = 2.99792458e8

val gravitational_constant : float

gravitational_constant = 6.673e-11

val plancks_constant_h : float

plancks_constant_h = 6.62606896e-34

val plancks_constant_hbar : float

plancks_constant_hbar = 1.05457162825e-34

val astronomical_unit : float

astronomical_unit = 1.49597870691e11

val light_year : float

light_year = 9.46053620707e15

val parsec : float

parsec = 3.08567758135e16

val grav_accel : float

grav_accel = 9.80665e0

val electron_volt : float

electron_volt = 1.602176487e-19

val mass_electron : float

mass_electron = 9.10938188e-31

val mass_muon : float

mass_muon = 1.88353109e-28

val mass_proton : float

mass_proton = 1.67262158e-27

val mass_neutron : float

mass_neutron = 1.67492716e-27

val rydberg : float

rydberg = 2.17987196968e-18

val boltzmann : float

boltzmann = 1.3806504e-23

val molar_gas : float

molar_gas = 8.314472e0

val standard_gas_volume : float

standard_gas_volume = 2.2710981e-2

val minute : float

minute = 6e1

val hour : float

hour = 3.6e3

val day : float

day = 8.64e4

val week : float

week = 6.048e5

val inch : float

inch = 2.54e-2

val foot : float

foot = 3.048e-1

val yard : float

yard = 9.144e-1

val mile : float

mile = 1.609344e3

val nautical_mile : float

nautical_mile = 1.852e3

val fathom : float

fathom = 1.8288e0

val mil : float

mil = 2.54e-5

val point : float

point = 3.52777777778e-4

val texpoint : float

texpoint = 3.51459803515e-4

val micron : float

micron = 1e-6

val angstrom : float

angstrom = 1e-10

val hectare : float

hectare = 1e4

val acre : float

acre = 4.04685642241e3

val barn : float

barn = 1e-28

val liter : float

liter = 1e-3

val us_gallon : float

us_gallon = 3.78541178402e-3

val quart : float

quart = 9.46352946004e-4

val pint : float

pint = 4.73176473002e-4

val cup : float

cup = 2.36588236501e-4

val fluid_ounce : float

fluid_ounce = 2.95735295626e-5

val tablespoon : float

tablespoon = 1.47867647813e-5

val teaspoon : float

teaspoon = 4.92892159375e-6

val canadian_gallon : float

canadian_gallon = 4.54609e-3

val uk_gallon : float

uk_gallon = 4.546092e-3

val miles_per_hour : float

miles_per_hour = 4.4704e-1

val kilometers_per_hour : float

kilometers_per_hour = 2.77777777778e-1

val knot : float

knot = 5.14444444444e-1

val pound_mass : float

pound_mass = 4.5359237e-1

val ounce_mass : float

ounce_mass = 2.8349523125e-2

val ton : float

ton = 9.0718474e2

val metric_ton : float

metric_ton = 1e3

val uk_ton : float

uk_ton = 1.0160469088e3

val troy_ounce : float

troy_ounce = 3.1103475e-2

val carat : float

carat = 2e-4

val unified_atomic_mass : float

unified_atomic_mass = 1.660538782e-27

val gram_force : float

gram_force = 9.80665e-3

val pound_force : float

pound_force = 4.44822161526e0

val kilopound_force : float

kilopound_force = 4.44822161526e3

val poundal : float

poundal = 1.38255e-1

val calorie : float

calorie = 4.1868e0

val btu : float

btu = 1.05505585262e3

val therm : float

therm = 1.05506e8

val horsepower : float

horsepower = 7.457e2

val bar : float

bar = 1e5

val std_atmosphere : float

std_atmosphere = 1.01325e5

val torr : float

torr = 1.33322368421e2

val meter_of_mercury : float

meter_of_mercury = 1.33322368421e5

val inch_of_mercury : float

inch_of_mercury = 3.38638815789e3

val inch_of_water : float

inch_of_water = 2.490889e2

val psi : float

psi = 6.89475729317e3

val poise : float

poise = 1e-1

val stokes : float

stokes = 1e-4

val stilb : float

stilb = 1e4

val lumen : float

lumen = 1e0

val lux : float

lux = 1e0

val phot : float

phot = 1e4

val footcandle : float

footcandle = 1.076e1

val lambert : float

lambert = 1e4

val footlambert : float

footlambert = 1.07639104e1

val curie : float

curie = 3.7e10

val roentgen : float

roentgen = 2.58e-4

val rad : float

rad = 1e-2

val solar_mass : float

solar_mass = 1.98892e30

val bohr_radius : float

bohr_radius = 5.291772083e-11

val newton : float

newton = 1e0

val dyne : float

dyne = 1e-5

val joule : float

joule = 1e0

val erg : float

erg = 1e-7

val stefan_boltzmann_constant : float

stefan_boltzmann_constant = 5.67040047374e-8

val thomson_cross_section : float

thomson_cross_section = 6.65245893699e-29

val bohr_magneton : float

bohr_magneton = 9.27400899e-24

val nuclear_magneton : float

nuclear_magneton = 5.05078317e-27

val electron_magnetic_moment : float

electron_magnetic_moment = 9.28476362e-24

val proton_magnetic_moment : float

proton_magnetic_moment = 1.410606633e-26

val faraday : float

faraday = 9.64853429775e4

val electron_charge : float

electron_charge = 1.602176487e-19

val vacuum_permittivity : float

vacuum_permittivity = 8.854187817e-12

val vacuum_permeability : float

vacuum_permeability = 1.25663706144e-6

val debye : float

debye = 3.33564095198e-30

val gauss : float

gauss = 1e-4

\ No newline at end of file diff --git a/owl-base/Owl_const/Prefix/index.html b/owl-base/Owl_const/Prefix/index.html deleted file mode 100644 index 6cc0f5893..000000000 --- a/owl-base/Owl_const/Prefix/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Prefix (owl-base.Owl_const.Prefix)

Module Owl_const.Prefix

val fine_structure : float

fine_structure = 7.297352533e-3

val avogadro : float

avogadro = 6.02214199e23

val yotta : float

yotta = 1e24

val zetta : float

zetta = 1e21

val exa : float

exa = 1e18

val peta : float

peta = 1e15

val tera : float

tera = 1e12

val giga : float

giga = 1e9

val mega : float

mega = 1e6

val kilo : float

kilo = 1e3

val hecto : float

hecto = 1e2

val deca : float

deca = 1e1

val deci : float

deci = 1e-1

val centi : float

centi = 1e-2

val milli : float

milli = 1e-3

val micro : float

micro = 1e-6

val nano : float

nano = 1e-9

val pico : float

pico = 1e-12

val femto : float

femto = 1e-15

val atto : float

atto = 1e-18

val zepto : float

zepto = 1e-21

val yocto : float

yocto = 1e-24

\ No newline at end of file diff --git a/owl-base/Owl_const/SI/index.html b/owl-base/Owl_const/SI/index.html deleted file mode 100644 index 1db233f47..000000000 --- a/owl-base/Owl_const/SI/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -SI (owl-base.Owl_const.SI)

Module Owl_const.SI

val speed_of_light : float

speed_of_light = 2.99792458e8

val gravitational_constant : float

gravitational_constant = 6.673e-11

val plancks_constant_h : float

plancks_constant_h = 6.62606896e-34

val plancks_constant_hbar : float

plancks_constant_hbar = 1.05457162825e-34

val astronomical_unit : float

astronomical_unit = 1.49597870691e11

val light_year : float

light_year = 9.46053620707e15

val parsec : float

parsec = 3.08567758135e16

val grav_accel : float

grav_accel = 9.80665e0

val electron_volt : float

electron_volt = 1.602176487e-19

val mass_electron : float

mass_electron = 9.10938188e-31

val mass_muon : float

mass_muon = 1.88353109e-28

val mass_proton : float

mass_proton = 1.67262158e-27

val mass_neutron : float

mass_neutron = 1.67492716e-27

val rydberg : float

rydberg = 2.17987196968e-18

val boltzmann : float

boltzmann = 1.3806504e-23

val molar_gas : float

molar_gas = 8.314472e0

val standard_gas_volume : float

standard_gas_volume = 2.2710981e-2

val minute : float

minute = 6e1

val hour : float

hour = 3.6e3

val day : float

day = 8.64e4

val week : float

week = 6.048e5

val inch : float

inch = 2.54e-2

val foot : float

foot = 3.048e-1

val yard : float

yard = 9.144e-1

val mile : float

mile = 1.609344e3

val nautical_mile : float

nautical_mile = 1.852e3

val fathom : float

fathom = 1.8288e0

val mil : float

mil = 2.54e-5

val point : float

point = 3.52777777778e-4

val texpoint : float

texpoint = 3.51459803515e-4

val micron : float

micron = 1e-6

val angstrom : float

angstrom = 1e-10

val hectare : float

hectare = 1e4

val acre : float

acre = 4.04685642241e3

val barn : float

barn = 1e-28

val liter : float

liter = 1e-3

val us_gallon : float

us_gallon = 3.78541178402e-3

val quart : float

quart = 9.46352946004e-4

val pint : float

pint = 4.73176473002e-4

val cup : float

cup = 2.36588236501e-4

val fluid_ounce : float

fluid_ounce = 2.95735295626e-5

val tablespoon : float

tablespoon = 1.47867647813e-5

val teaspoon : float

teaspoon = 4.92892159375e-6

val canadian_gallon : float

canadian_gallon = 4.54609e-3

val uk_gallon : float

uk_gallon = 4.546092e-3

val miles_per_hour : float

miles_per_hour = 4.4704e-1

val kilometers_per_hour : float

kilometers_per_hour = 2.77777777778e-1

val knot : float

knot = 5.14444444444e-1

val pound_mass : float

pound_mass = 4.5359237e-1

val ounce_mass : float

ounce_mass = 2.8349523125e-2

val ton : float

ton = 9.0718474e2

val metric_ton : float

metric_ton = 1e3

val uk_ton : float

uk_ton = 1.0160469088e3

val troy_ounce : float

troy_ounce = 3.1103475e-2

val carat : float

carat = 2e-4

val unified_atomic_mass : float

unified_atomic_mass = 1.660538782e-27

val gram_force : float

gram_force = 9.80665e-3

val pound_force : float

pound_force = 4.44822161526e0

val kilopound_force : float

kilopound_force = 4.44822161526e3

val poundal : float

poundal = 1.38255e-1

val calorie : float

calorie = 4.1868e0

val btu : float

btu = 1.05505585262e3

val therm : float

therm = 1.05506e8

val horsepower : float

horsepower = 7.457e2

val bar : float

bar = 1e5

val std_atmosphere : float

std_atmosphere = 1.01325e5

val torr : float

torr = 1.33322368421e2

val meter_of_mercury : float

meter_of_mercury = 1.33322368421e5

val inch_of_mercury : float

inch_of_mercury = 3.38638815789e3

val inch_of_water : float

inch_of_water = 2.490889e2

val psi : float

psi = 6.89475729317e3

val poise : float

poise = 1e-1

val stokes : float

stokes = 1e-4

val stilb : float

stilb = 1e4

val lumen : float

lumen = 1e0

val lux : float

lux = 1e0

val phot : float

phot = 1e4

val footcandle : float

footcandle = 1.076e1

val lambert : float

lambert = 1e4

val footlambert : float

footlambert = 1.07639104e1

val curie : float

curie = 3.7e10

val roentgen : float

roentgen = 2.58e-4

val rad : float

rad = 1e-2

val solar_mass : float

solar_mass = 1.98892e30

val bohr_radius : float

bohr_radius = 5.291772083e-11

val newton : float

newton = 1e0

val dyne : float

dyne = 1e-5

val joule : float

joule = 1e0

val erg : float

erg = 1e-7

val stefan_boltzmann_constant : float

stefan_boltzmann_constant = 5.67040047374e-8

val thomson_cross_section : float

thomson_cross_section = 6.65245893699e-29

val bohr_magneton : float

bohr_magneton = 9.27400899e-24

val nuclear_magneton : float

nuclear_magneton = 5.05078317e-27

val electron_magnetic_moment : float

electron_magnetic_moment = 9.28476362e-24

val proton_magnetic_moment : float

proton_magnetic_moment = 1.410606633e-26

val faraday : float

faraday = 9.64853429775e4

val electron_charge : float

electron_charge = 1.602176487e-19

val vacuum_permittivity : float

vacuum_permittivity = 8.854187817e-12

val vacuum_permeability : float

vacuum_permeability = 1.25663706144e-6

val debye : float

debye = 3.33564095198e-30

val gauss : float

gauss = 1e-4

\ No newline at end of file diff --git a/owl-base/Owl_countmin_sketch/.dummy b/owl-base/Owl_countmin_sketch/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl-base/Owl_countmin_sketch/Make/argument-1-T/index.html b/owl-base/Owl_countmin_sketch/Make/argument-1-T/index.html deleted file mode 100644 index 436bf85b2..000000000 --- a/owl-base/Owl_countmin_sketch/Make/argument-1-T/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -T (owl-base.Owl_countmin_sketch.Make.T)

Parameter Make.T

Type definition
type t

The type of count-min tables

Core functions
val init : int -> int -> t

init l w generates a table with length l and width w, all counters initialized to 0.

val incr : int -> int -> t -> unit

incr i j t increments the counter at length index i and width index j in table t.

val get : int -> int -> t -> int

get i j t gets the value of the counter at length index i and width index j in table t.

val clone : t -> t

clone t returns a new table with the same contents as t.

val merge : t -> t -> t

merge t1 t2 merges tables t1 and t2 element-wise. If t1 and t2 have the same dimensions, returns a new table whose elements are the sums of corresponding elements from t1 and t2. If dimensions do not match, raises INVALID_ARGUMENT.

\ No newline at end of file diff --git a/owl-base/Owl_countmin_sketch/Make/index.html b/owl-base/Owl_countmin_sketch/Make/index.html deleted file mode 100644 index 60c7f1428..000000000 --- a/owl-base/Owl_countmin_sketch/Make/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Make (owl-base.Owl_countmin_sketch.Make)

Module Owl_countmin_sketch.Make

Parameters

Signature

Type definition
type 'a sketch

The type of Count-Min sketches

Core functions
val init : epsilon:float -> delta:float -> 'a sketch

init epsilon delta initializes a sketch with approximation ratio (1 + epsilon) and failure probability delta.

val incr : 'a sketch -> 'a -> unit

incr s x increments the frequency count of x in sketch s in-place.

val count : 'a sketch -> 'a -> int

count s x returns the estimated frequency of element x in s.

val init_from : 'a sketch -> 'a sketch

init_from s initializes a new empty sketch with the same parameters as s, which can later be merged with s.

val merge : 'a sketch -> 'a sketch -> 'a sketch

merge s1 s2 returns a new sketch whose counts are the sum of those in s1 and s2. Raises INVALID_ARGUMENT if the parameters of s1 and s2 do not match.

\ No newline at end of file diff --git a/owl-base/Owl_countmin_sketch/Native/index.html b/owl-base/Owl_countmin_sketch/Native/index.html deleted file mode 100644 index ef7bbeabd..000000000 --- a/owl-base/Owl_countmin_sketch/Native/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Native (owl-base.Owl_countmin_sketch.Native)

Module Owl_countmin_sketch.Native

val init : epsilon:float -> delta:float -> 'a sketch
val incr : 'a sketch -> 'a -> unit
val count : 'a sketch -> 'a -> int
val init_from : 'a sketch -> 'a sketch
val merge : 'a sketch -> 'a sketch -> 'a sketch
\ No newline at end of file diff --git a/owl-base/Owl_countmin_sketch/Owl/index.html b/owl-base/Owl_countmin_sketch/Owl/index.html deleted file mode 100644 index a1bc91a38..000000000 --- a/owl-base/Owl_countmin_sketch/Owl/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Owl (owl-base.Owl_countmin_sketch.Owl)

Module Owl_countmin_sketch.Owl

val init : epsilon:float -> delta:float -> 'a sketch
val incr : 'a sketch -> 'a -> unit
val count : 'a sketch -> 'a -> int
val init_from : 'a sketch -> 'a sketch
val merge : 'a sketch -> 'a sketch -> 'a sketch
\ No newline at end of file diff --git a/owl-base/Owl_countmin_sketch_sig/.dummy b/owl-base/Owl_countmin_sketch_sig/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl-base/Owl_countmin_sketch_sig/module-type-Sig/index.html b/owl-base/Owl_countmin_sketch_sig/module-type-Sig/index.html deleted file mode 100644 index 5c48a6af3..000000000 --- a/owl-base/Owl_countmin_sketch_sig/module-type-Sig/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Sig (owl-base.Owl_countmin_sketch_sig.Sig)

Module type Owl_countmin_sketch_sig.Sig

Type definition
type 'a sketch

The type of Count-Min sketches

Core functions
val init : epsilon:float -> delta:float -> 'a sketch

init epsilon delta initializes a sketch with approximation ratio (1 + epsilon) and failure probability delta.

val incr : 'a sketch -> 'a -> unit

incr s x increments the frequency count of x in sketch s in-place.

val count : 'a sketch -> 'a -> int

count s x returns the estimated frequency of element x in s.

val init_from : 'a sketch -> 'a sketch

init_from s initializes a new empty sketch with the same parameters as s, which can later be merged with s.

val merge : 'a sketch -> 'a sketch -> 'a sketch

merge s1 s2 returns a new sketch whose counts are the sum of those in s1 and s2. Raises INVALID_ARGUMENT if the parameters of s1 and s2 do not match.

\ No newline at end of file diff --git a/owl-base/Owl_countmin_table/.dummy b/owl-base/Owl_countmin_table/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl-base/Owl_countmin_table/Native/index.html b/owl-base/Owl_countmin_table/Native/index.html deleted file mode 100644 index 848dab3be..000000000 --- a/owl-base/Owl_countmin_table/Native/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Native (owl-base.Owl_countmin_table.Native)

Module Owl_countmin_table.Native

Type definition
type t

The type of count-min tables

Core functions
val init : int -> int -> t

init l w generates a table with length l and width w, all counters initialized to 0.

val incr : int -> int -> t -> unit

incr i j t increments the counter at length index i and width index j in table t.

val get : int -> int -> t -> int

get i j t gets the value of the counter at length index i and width index j in table t.

val clone : t -> t

clone t returns a new table with the same contents as t.

val merge : t -> t -> t

merge t1 t2 merges tables t1 and t2 element-wise. If t1 and t2 have the same dimensions, returns a new table whose elements are the sums of corresponding elements from t1 and t2. If dimensions do not match, raises INVALID_ARGUMENT.

\ No newline at end of file diff --git a/owl-base/Owl_countmin_table/Owl/index.html b/owl-base/Owl_countmin_table/Owl/index.html deleted file mode 100644 index 8b700f3df..000000000 --- a/owl-base/Owl_countmin_table/Owl/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Owl (owl-base.Owl_countmin_table.Owl)

Module Owl_countmin_table.Owl

Type definition
type t

The type of count-min tables

Core functions
val init : int -> int -> t

init l w generates a table with length l and width w, all counters initialized to 0.

val incr : int -> int -> t -> unit

incr i j t increments the counter at length index i and width index j in table t.

val get : int -> int -> t -> int

get i j t gets the value of the counter at length index i and width index j in table t.

val clone : t -> t

clone t returns a new table with the same contents as t.

val merge : t -> t -> t

merge t1 t2 merges tables t1 and t2 element-wise. If t1 and t2 have the same dimensions, returns a new table whose elements are the sums of corresponding elements from t1 and t2. If dimensions do not match, raises INVALID_ARGUMENT.

\ No newline at end of file diff --git a/owl-base/Owl_countmin_table/module-type-Sig/index.html b/owl-base/Owl_countmin_table/module-type-Sig/index.html deleted file mode 100644 index fe6693f47..000000000 --- a/owl-base/Owl_countmin_table/module-type-Sig/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Sig (owl-base.Owl_countmin_table.Sig)

Module type Owl_countmin_table.Sig

Type definition
type t

The type of count-min tables

Core functions
val init : int -> int -> t

init l w generates a table with length l and width w, all counters initialized to 0.

val incr : int -> int -> t -> unit

incr i j t increments the counter at length index i and width index j in table t.

val get : int -> int -> t -> int

get i j t gets the value of the counter at length index i and width index j in table t.

val clone : t -> t

clone t returns a new table with the same contents as t.

val merge : t -> t -> t

merge t1 t2 merges tables t1 and t2 element-wise. If t1 and t2 have the same dimensions, returns a new table whose elements are the sums of corresponding elements from t1 and t2. If dimensions do not match, raises INVALID_ARGUMENT.

\ No newline at end of file diff --git a/owl-base/Owl_dataframe/.dummy b/owl-base/Owl_dataframe/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl-base/Owl_exception/.dummy b/owl-base/Owl_exception/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl-base/Owl_graph/.dummy b/owl-base/Owl_graph/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl-base/Owl_heavyhitters_sketch/.dummy b/owl-base/Owl_heavyhitters_sketch/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl-base/Owl_heavyhitters_sketch/Make/argument-1-CM/index.html b/owl-base/Owl_heavyhitters_sketch/Make/argument-1-CM/index.html deleted file mode 100644 index 0d075f1cc..000000000 --- a/owl-base/Owl_heavyhitters_sketch/Make/argument-1-CM/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -CM (owl-base.Owl_heavyhitters_sketch.Make.CM)

Parameter Make.CM

Type definition
type 'a sketch

The type of Count-Min sketches

Core functions
val init : epsilon:float -> delta:float -> 'a sketch

init epsilon delta initializes a sketch with approximation ratio (1 + epsilon) and failure probability delta.

val incr : 'a sketch -> 'a -> unit

incr s x increments the frequency count of x in sketch s in-place.

val count : 'a sketch -> 'a -> int

count s x returns the estimated frequency of element x in s.

val init_from : 'a sketch -> 'a sketch

init_from s initializes a new empty sketch with the same parameters as s, which can later be merged with s.

val merge : 'a sketch -> 'a sketch -> 'a sketch

merge s1 s2 returns a new sketch whose counts are the sum of those in s1 and s2. Raises INVALID_ARGUMENT if the parameters of s1 and s2 do not match.

\ No newline at end of file diff --git a/owl-base/Owl_heavyhitters_sketch/Make/index.html b/owl-base/Owl_heavyhitters_sketch/Make/index.html deleted file mode 100644 index 6b7a59397..000000000 --- a/owl-base/Owl_heavyhitters_sketch/Make/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Make (owl-base.Owl_heavyhitters_sketch.Make)

Module Owl_heavyhitters_sketch.Make

Parameters

Signature

Type definition
type 'a t

The type of heavy-hitters sketches

Core functions
val init : k:float -> epsilon:float -> delta:float -> 'a t

`init k epsilon delta` initializes a sketch with threshold k, approximation factor epsilon, and failure probability delta.

val add : 'a t -> 'a -> unit

`add h x` adds value `x` to sketch `h` in-place.

val get : 'a t -> ('a * int) list

`get h` returns a list of all heavy-hitters in sketch `h`, as a (value, frequency) pair, sorted in decreasing order of frequency.

\ No newline at end of file diff --git a/owl-base/Owl_heavyhitters_sketch/Native/index.html b/owl-base/Owl_heavyhitters_sketch/Native/index.html deleted file mode 100644 index 8098c30c8..000000000 --- a/owl-base/Owl_heavyhitters_sketch/Native/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Native (owl-base.Owl_heavyhitters_sketch.Native)

Module Owl_heavyhitters_sketch.Native

val init : k:float -> epsilon:float -> delta:float -> 'a t
val add : 'a t -> 'a -> unit
val get : 'a t -> ('a * int) list
\ No newline at end of file diff --git a/owl-base/Owl_heavyhitters_sketch/Owl/index.html b/owl-base/Owl_heavyhitters_sketch/Owl/index.html deleted file mode 100644 index a9b40662e..000000000 --- a/owl-base/Owl_heavyhitters_sketch/Owl/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Owl (owl-base.Owl_heavyhitters_sketch.Owl)

Module Owl_heavyhitters_sketch.Owl

val init : k:float -> epsilon:float -> delta:float -> 'a t
val add : 'a t -> 'a -> unit
val get : 'a t -> ('a * int) list
\ No newline at end of file diff --git a/owl-base/Owl_heavyhitters_sketch_sig/.dummy b/owl-base/Owl_heavyhitters_sketch_sig/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl-base/Owl_heavyhitters_sketch_sig/module-type-Sig/index.html b/owl-base/Owl_heavyhitters_sketch_sig/module-type-Sig/index.html deleted file mode 100644 index 38748379d..000000000 --- a/owl-base/Owl_heavyhitters_sketch_sig/module-type-Sig/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Sig (owl-base.Owl_heavyhitters_sketch_sig.Sig)

Module type Owl_heavyhitters_sketch_sig.Sig

Type definition
type 'a t

The type of heavy-hitters sketches

Core functions
val init : k:float -> epsilon:float -> delta:float -> 'a t

`init k epsilon delta` initializes a sketch with threshold k, approximation factor epsilon, and failure probability delta.

val add : 'a t -> 'a -> unit

`add h x` adds value `x` to sketch `h` in-place.

val get : 'a t -> ('a * int) list

`get h` returns a list of all heavy-hitters in sketch `h`, as a (value, frequency) pair, sorted in decreasing order of frequency.

\ No newline at end of file diff --git a/owl-base/Owl_io/.dummy b/owl-base/Owl_io/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl-base/Owl_lazy/.dummy b/owl-base/Owl_lazy/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl-base/Owl_lazy/Make/argument-1-A/Linalg/index.html b/owl-base/Owl_lazy/Make/argument-1-A/Linalg/index.html deleted file mode 100644 index 761491989..000000000 --- a/owl-base/Owl_lazy/Make/argument-1-A/Linalg/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -Linalg (owl-base.Owl_lazy.Make.A.Linalg)

Module A.Linalg

val inv : arr -> arr
val logdet : arr -> elt
val chol : ?upper:bool -> arr -> arr
val svd : ?thin:bool -> arr -> arr * arr * arr
val qr : arr -> arr * arr
val lq : arr -> arr * arr
val sylvester : arr -> arr -> arr -> arr
val lyapunov : arr -> arr -> arr
val discrete_lyapunov : - ?solver:[ `default | `bilinear | `direct ] -> - arr -> - arr -> - arr
val linsolve : ?trans:bool -> ?typ:[ `n | `u | `l ] -> arr -> arr -> arr
val care : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
val dare : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
\ No newline at end of file diff --git a/owl-base/Owl_lazy/Make/argument-1-A/Mat/index.html b/owl-base/Owl_lazy/Make/argument-1-A/Mat/index.html deleted file mode 100644 index 9e33addb4..000000000 --- a/owl-base/Owl_lazy/Make/argument-1-A/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl-base.Owl_lazy.Make.A.Mat)

Module A.Mat

val diagm : ?k:int -> arr -> arr
val triu : ?k:int -> arr -> arr
val tril : ?k:int -> arr -> arr
val eye : int -> arr
\ No newline at end of file diff --git a/owl-base/Owl_lazy/Make/argument-1-A/Scalar/index.html b/owl-base/Owl_lazy/Make/argument-1-A/Scalar/index.html deleted file mode 100644 index 178582cab..000000000 --- a/owl-base/Owl_lazy/Make/argument-1-A/Scalar/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Scalar (owl-base.Owl_lazy.Make.A.Scalar)

Module A.Scalar

val add : elt -> elt -> elt
val sub : elt -> elt -> elt
val mul : elt -> elt -> elt
val div : elt -> elt -> elt
val pow : elt -> elt -> elt
val atan2 : elt -> elt -> elt
val abs : elt -> elt
val neg : elt -> elt
val sqr : elt -> elt
val sqrt : elt -> elt
val exp : elt -> elt
val log : elt -> elt
val log2 : elt -> elt
val log10 : elt -> elt
val signum : elt -> elt
val floor : elt -> elt
val ceil : elt -> elt
val round : elt -> elt
val sin : elt -> elt
val cos : elt -> elt
val tan : elt -> elt
val sinh : elt -> elt
val cosh : elt -> elt
val tanh : elt -> elt
val asin : elt -> elt
val acos : elt -> elt
val atan : elt -> elt
val asinh : elt -> elt
val acosh : elt -> elt
val atanh : elt -> elt
val relu : elt -> elt
val dawsn : elt -> elt
val sigmoid : elt -> elt
\ No newline at end of file diff --git a/owl-base/Owl_lazy/Make/argument-1-A/index.html b/owl-base/Owl_lazy/Make/argument-1-A/index.html deleted file mode 100644 index e5ec0b434..000000000 --- a/owl-base/Owl_lazy/Make/argument-1-A/index.html +++ /dev/null @@ -1,379 +0,0 @@ - -A (owl-base.Owl_lazy.Make.A)

Parameter Make.A

include Owl_types_ndarray_mutable.Sig
include Owl_types_ndarray_algodiff.Sig
include Owl_types_ndarray_eltcmp.Sig
include Owl_types_ndarray_basic.Sig
type arr
type elt
val empty : int array -> arr
val zeros : int array -> arr
val ones : int array -> arr
val create : int array -> elt -> arr
val sequential : ?a:elt -> ?step:elt -> int array -> arr
val uniform : ?a:elt -> ?b:elt -> int array -> arr
val gaussian : ?mu:elt -> ?sigma:elt -> int array -> arr
val bernoulli : ?p:elt -> int array -> arr
val init : int array -> (int -> elt) -> arr
val init_nd : int array -> (int array -> elt) -> arr
val shape : arr -> int array
val numel : arr -> int
val get : arr -> int array -> elt
val set : arr -> int array -> elt -> unit
val get_slice : int list list -> arr -> arr
val set_slice : int list list -> arr -> arr -> unit
val get_fancy : Owl_types_common.index list -> arr -> arr
val set_fancy : Owl_types_common.index list -> arr -> arr -> unit
val copy : arr -> arr
val copy_ : out:arr -> arr -> unit
val reset : arr -> unit
val reshape : arr -> int array -> arr
val reverse : arr -> arr
val tile : arr -> int array -> arr
val repeat : arr -> int array -> arr
val concatenate : ?axis:int -> arr array -> arr
val stack : ?axis:int -> arr array -> arr
val split : ?axis:int -> int array -> arr -> arr array
val expand : ?hi:bool -> arr -> int -> arr
val squeeze : ?axis:int array -> arr -> arr
val draw : ?axis:int -> arr -> int -> arr * int array
val map : (elt -> elt) -> arr -> arr
val fold : ?axis:int -> (elt -> elt -> elt) -> elt -> arr -> arr
val scan : ?axis:int -> (elt -> elt -> elt) -> arr -> arr
val one_hot : int -> arr -> arr
val pad : ?v:elt -> int list list -> arr -> arr
val print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(elt -> string) -> - arr -> - unit
val abs : arr -> arr
val neg : arr -> arr
val floor : arr -> arr
val ceil : arr -> arr
val round : arr -> arr
val sqr : arr -> arr
val sqrt : arr -> arr
val log : arr -> arr
val log2 : arr -> arr
val log10 : arr -> arr
val exp : arr -> arr
val sin : arr -> arr
val cos : arr -> arr
val tan : arr -> arr
val sinh : arr -> arr
val cosh : arr -> arr
val tanh : arr -> arr
val asin : arr -> arr
val acos : arr -> arr
val atan : arr -> arr
val asinh : arr -> arr
val acosh : arr -> arr
val atanh : arr -> arr
val sum_reduce : ?axis:int array -> arr -> arr
val signum : arr -> arr
val sigmoid : arr -> arr
val relu : arr -> arr
val dawsn : arr -> arr
val min' : arr -> elt
val max' : arr -> elt
val sum' : arr -> elt
val log_sum_exp' : arr -> elt
val log_sum_exp : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm' : arr -> elt
val l2norm' : arr -> elt
val l2norm_sqr' : arr -> elt
val clip_by_value : ?amin:elt -> ?amax:elt -> arr -> arr
val clip_by_l2norm : elt -> arr -> arr
val pow : arr -> arr -> arr
val scalar_pow : elt -> arr -> arr
val pow_scalar : arr -> elt -> arr
val atan2 : arr -> arr -> arr
val scalar_atan2 : elt -> arr -> arr
val atan2_scalar : arr -> elt -> arr
val add : arr -> arr -> arr
val sub : arr -> arr -> arr
val mul : arr -> arr -> arr
val div : arr -> arr -> arr
val add_scalar : arr -> elt -> arr
val sub_scalar : arr -> elt -> arr
val mul_scalar : arr -> elt -> arr
val div_scalar : arr -> elt -> arr
val scalar_add : elt -> arr -> arr
val scalar_sub : elt -> arr -> arr
val scalar_mul : elt -> arr -> arr
val scalar_div : elt -> arr -> arr
val fma : arr -> arr -> arr -> arr
val conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val dilated_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val transpose_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val max_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val upsampling2d : arr -> int array -> arr
val conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val dilated_conv1d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv1d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val transpose_conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val max_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val upsampling2d_backward : arr -> int array -> arr -> arr
val row_num : arr -> int
val col_num : arr -> int
val row : arr -> int -> arr
val rows : arr -> int array -> arr
val copy_row_to : arr -> arr -> int -> unit
val copy_col_to : arr -> arr -> int -> unit
val dot : arr -> arr -> arr
val diag : ?k:int -> arr -> arr
val trace : arr -> elt
val transpose : ?axis:int array -> arr -> arr
val to_rows : arr -> arr array
val of_rows : arr array -> arr
val to_cols : arr -> arr array
val of_cols : arr array -> arr
val of_array : elt array -> int array -> arr
val of_arrays : elt array array -> arr
val float_to_elt : float -> elt
val elt_to_float : elt -> float
val elt_equal : arr -> arr -> arr
val elt_not_equal : arr -> arr -> arr
val elt_less : arr -> arr -> arr
val elt_greater : arr -> arr -> arr
val elt_less_equal : arr -> arr -> arr
val elt_greater_equal : arr -> arr -> arr
val elt_equal_scalar : arr -> elt -> arr
val elt_not_equal_scalar : arr -> elt -> arr
val elt_less_scalar : arr -> elt -> arr
val elt_greater_scalar : arr -> elt -> arr
val elt_less_equal_scalar : arr -> elt -> arr
val elt_greater_equal_scalar : arr -> elt -> arr
module Scalar : sig ... end
module Mat : sig ... end
module Linalg : sig ... end
val create_ : out:arr -> elt -> unit
val uniform_ : ?a:elt -> ?b:elt -> out:arr -> unit
val gaussian_ : ?mu:elt -> ?sigma:elt -> out:arr -> unit
val sequential_ : ?a:elt -> ?step:elt -> out:arr -> unit
val bernoulli_ : ?p:elt -> out:arr -> unit
val zeros_ : out:arr -> unit
val ones_ : out:arr -> unit
val one_hot_ : out:arr -> int -> arr -> unit
val get_slice_ : out:arr -> int list list -> arr -> unit
val set_slice_ : out:arr -> int list list -> arr -> arr -> unit
val sub_left : arr -> int -> int -> arr
val reshape_ : out:arr -> arr -> unit
val reverse_ : out:arr -> arr -> unit
val transpose_ : out:arr -> ?axis:int array -> arr -> unit
val repeat_ : out:arr -> arr -> int array -> unit
val tile_ : out:arr -> arr -> int array -> unit
val pad_ : out:arr -> ?v:elt -> int list list -> arr -> unit
val hypot : arr -> arr -> arr
val fmod : arr -> arr -> arr
val min2 : arr -> arr -> arr
val max2 : arr -> arr -> arr
val add_ : ?out:arr -> arr -> arr -> unit
val sub_ : ?out:arr -> arr -> arr -> unit
val mul_ : ?out:arr -> arr -> arr -> unit
val div_ : ?out:arr -> arr -> arr -> unit
val pow_ : ?out:arr -> arr -> arr -> unit
val atan2_ : ?out:arr -> arr -> arr -> unit
val hypot_ : ?out:arr -> arr -> arr -> unit
val fmod_ : ?out:arr -> arr -> arr -> unit
val min2_ : ?out:arr -> arr -> arr -> unit
val max2_ : ?out:arr -> arr -> arr -> unit
val add_scalar_ : ?out:arr -> arr -> elt -> unit
val sub_scalar_ : ?out:arr -> arr -> elt -> unit
val mul_scalar_ : ?out:arr -> arr -> elt -> unit
val div_scalar_ : ?out:arr -> arr -> elt -> unit
val pow_scalar_ : ?out:arr -> arr -> elt -> unit
val atan2_scalar_ : ?out:arr -> arr -> elt -> unit
val fmod_scalar_ : ?out:arr -> arr -> elt -> unit
val scalar_add_ : ?out:arr -> elt -> arr -> unit
val scalar_sub_ : ?out:arr -> elt -> arr -> unit
val scalar_mul_ : ?out:arr -> elt -> arr -> unit
val scalar_div_ : ?out:arr -> elt -> arr -> unit
val scalar_pow_ : ?out:arr -> elt -> arr -> unit
val scalar_atan2_ : ?out:arr -> elt -> arr -> unit
val scalar_fmod_ : ?out:arr -> elt -> arr -> unit
val fma_ : ?out:arr -> arr -> arr -> arr -> unit
val clip_by_value_ : ?out:arr -> ?amin:elt -> ?amax:elt -> arr -> unit
val clip_by_l2norm_ : ?out:arr -> elt -> arr -> unit
val dot_ : - ?transa:bool -> - ?transb:bool -> - ?alpha:elt -> - ?beta:elt -> - c:arr -> - arr -> - arr -> - unit
val abs_ : ?out:arr -> arr -> unit
val neg_ : ?out:arr -> arr -> unit
val conj_ : ?out:arr -> arr -> unit
val reci_ : ?out:arr -> arr -> unit
val signum_ : ?out:arr -> arr -> unit
val sqr_ : ?out:arr -> arr -> unit
val sqrt_ : ?out:arr -> arr -> unit
val cbrt_ : ?out:arr -> arr -> unit
val exp_ : ?out:arr -> arr -> unit
val exp2_ : ?out:arr -> arr -> unit
val exp10_ : ?out:arr -> arr -> unit
val expm1_ : ?out:arr -> arr -> unit
val log_ : ?out:arr -> arr -> unit
val log2_ : ?out:arr -> arr -> unit
val log10_ : ?out:arr -> arr -> unit
val log1p_ : ?out:arr -> arr -> unit
val sin_ : ?out:arr -> arr -> unit
val cos_ : ?out:arr -> arr -> unit
val tan_ : ?out:arr -> arr -> unit
val asin_ : ?out:arr -> arr -> unit
val acos_ : ?out:arr -> arr -> unit
val atan_ : ?out:arr -> arr -> unit
val sinh_ : ?out:arr -> arr -> unit
val cosh_ : ?out:arr -> arr -> unit
val tanh_ : ?out:arr -> arr -> unit
val asinh_ : ?out:arr -> arr -> unit
val acosh_ : ?out:arr -> arr -> unit
val atanh_ : ?out:arr -> arr -> unit
val floor_ : ?out:arr -> arr -> unit
val ceil_ : ?out:arr -> arr -> unit
val round_ : ?out:arr -> arr -> unit
val trunc_ : ?out:arr -> arr -> unit
val fix_ : ?out:arr -> arr -> unit
val erf_ : ?out:arr -> arr -> unit
val erfc_ : ?out:arr -> arr -> unit
val relu_ : ?out:arr -> arr -> unit
val softplus_ : ?out:arr -> arr -> unit
val softsign_ : ?out:arr -> arr -> unit
val softmax_ : ?out:arr -> ?axis:int -> arr -> unit
val sigmoid_ : ?out:arr -> arr -> unit
val sum_ : out:arr -> axis:int -> arr -> unit
val min_ : out:arr -> axis:int -> arr -> unit
val max_ : out:arr -> axis:int -> arr -> unit
val sum : ?axis:int -> ?keep_dims:bool -> arr -> arr
val prod : ?axis:int -> ?keep_dims:bool -> arr -> arr
val min : ?axis:int -> ?keep_dims:bool -> arr -> arr
val max : ?axis:int -> ?keep_dims:bool -> arr -> arr
val mean : ?axis:int -> ?keep_dims:bool -> arr -> arr
val var : ?axis:int -> ?keep_dims:bool -> arr -> arr
val std : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l2norm : ?axis:int -> ?keep_dims:bool -> arr -> arr
val cumsum_ : ?out:arr -> ?axis:int -> arr -> unit
val cumprod_ : ?out:arr -> ?axis:int -> arr -> unit
val cummin_ : ?out:arr -> ?axis:int -> arr -> unit
val cummax_ : ?out:arr -> ?axis:int -> arr -> unit
val dropout_ : ?out:arr -> ?rate:float -> arr -> unit
val prod' : arr -> elt
val mean' : arr -> elt
val var' : arr -> elt
val std' : arr -> elt
val elt_equal_ : ?out:arr -> arr -> arr -> unit
val elt_not_equal_ : ?out:arr -> arr -> arr -> unit
val elt_less_ : ?out:arr -> arr -> arr -> unit
val elt_greater_ : ?out:arr -> arr -> arr -> unit
val elt_less_equal_ : ?out:arr -> arr -> arr -> unit
val elt_greater_equal_ : ?out:arr -> arr -> arr -> unit
val elt_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_not_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_less_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_greater_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_less_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_greater_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val conv1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val conv2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val conv3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val dilated_conv1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val dilated_conv2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val dilated_conv3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val transpose_conv1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val transpose_conv2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val transpose_conv3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val max_pool1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val max_pool2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val max_pool3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val upsampling2d_ : out:arr -> arr -> int array -> unit
val conv1d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv1d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv2d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv2d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv3d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv3d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val dilated_conv1d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv1d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv2d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv2d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv3d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv3d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val transpose_conv1d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv1d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv2d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv2d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv3d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv3d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val max_pool1d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val max_pool2d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val max_pool3d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool1d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool2d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool3d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val upsampling2d_backward_ : out:arr -> arr -> int array -> arr -> unit
val fused_adagrad_ : ?out:arr -> rate:float -> eps:float -> arr -> unit
\ No newline at end of file diff --git a/owl-base/Owl_lazy/Make/index.html b/owl-base/Owl_lazy/Make/index.html deleted file mode 100644 index e8a6c26c2..000000000 --- a/owl-base/Owl_lazy/Make/index.html +++ /dev/null @@ -1,87 +0,0 @@ - -Make (owl-base.Owl_lazy.Make)

Module Owl_lazy.Make

Parameters

Signature

Type definition
type arr

TODO

type elt

TODO

type value

TODO

type attr

TODO

type graph

TODO

Type conversion functions
val arr_to_value : A.arr -> value

TODO

val value_to_arr : value -> A.arr

TODO

val elt_to_value : A.elt -> value

TODO

val value_to_elt : value -> A.elt

TODO

val value_to_float : value -> float

TODO

val node_to_arr : attr Owl_graph.node -> arr

TODO

val arr_to_node : arr -> attr Owl_graph.node

TODO

val node_to_elt : attr Owl_graph.node -> elt

TODO

val elt_to_node : elt -> attr Owl_graph.node

TODO

val pack_arr : A.arr -> arr

TODO

val unpack_arr : arr -> A.arr

TODO

val pack_elt : A.elt -> elt

TODO

val unpack_elt : elt -> A.elt

TODO

val float_to_elt : float -> elt

TODO

val elt_to_float : elt -> float

TODO

Utility functions
val graph_to_dot : graph -> string

TODO

val graph_to_trace : graph -> string

TODO

Create variables
val var_arr : ?shape:int array -> string -> arr

TODO

val var_elt : string -> elt

TODO

val const_arr : string -> A.arr -> arr

TODO

val const_elt : string -> A.elt -> elt

TODO

val assign_arr : arr -> A.arr -> unit

TODO

val assign_elt : elt -> A.elt -> unit

TODO

val unsafe_assign_arr : arr -> A.arr -> unit

TODO

Maths functions
val noop : arr -> arr

TODO

val empty : int array -> arr

TODO

val zeros : int array -> arr

TODO

val ones : int array -> arr

TODO

val create : int array -> elt -> arr

TODO

val sequential : ?a:elt -> ?step:elt -> int array -> arr

TODO

val uniform : ?a:elt -> ?b:elt -> int array -> arr

TODO

val gaussian : ?mu:elt -> ?sigma:elt -> int array -> arr

TODO

val bernoulli : ?p:elt -> int array -> arr

TODO

val init : int array -> (int -> elt) -> arr

TODO

val shape : arr -> int array

TODO

val numel : arr -> int

TODO

val get : arr -> int array -> elt

TODO

val set : arr -> int array -> elt -> unit

TODO

val get_slice : int list list -> arr -> arr

TODO

val set_slice : int list list -> arr -> arr -> unit

TODO

val copy : arr -> arr

TODO

val reset : arr -> unit

TODO

val reshape : arr -> int array -> arr

TODO

val reverse : arr -> arr

TODO

val tile : arr -> int array -> arr

TODO

val repeat : arr -> int array -> arr

TODO

val concatenate : ?axis:int -> arr array -> arr

TODO

val split : ?axis:int -> int array -> arr -> arr array

TODO

val draw : ?axis:int -> arr -> int -> arr * 'a array

TODO

val map : (elt -> elt) -> arr -> arr

TODO

val fold : ?axis:int -> (elt -> elt -> elt) -> elt -> arr -> arr

TODO

val scan : ?axis:int -> (elt -> elt -> elt) -> arr -> arr

TODO

val one_hot : int -> arr -> arr

TODO

val lazy_print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(A.elt -> string) -> - arr -> - arr

TODO

val print : ?max_row:'a -> ?max_col:'b -> ?header:'c -> ?fmt:'d -> 'e -> unit

TODO

val abs : arr -> arr

TODO

val neg : arr -> arr

TODO

val floor : arr -> arr

TODO

val ceil : arr -> arr

TODO

val round : arr -> arr

TODO

val sqr : arr -> arr

TODO

val sqrt : arr -> arr

TODO

val log : arr -> arr

TODO

val log2 : arr -> arr

TODO

val log10 : arr -> arr

TODO

val exp : arr -> arr

TODO

val sin : arr -> arr

TODO

val cos : arr -> arr

TODO

val tan : arr -> arr

TODO

val sinh : arr -> arr

TODO

val cosh : arr -> arr

TODO

val tanh : arr -> arr

TODO

val asin : arr -> arr

TODO

val acos : arr -> arr

TODO

val atan : arr -> arr

TODO

val asinh : arr -> arr

TODO

val acosh : arr -> arr

TODO

val atanh : arr -> arr

TODO

val min : ?axis:int -> ?keep_dims:bool -> arr -> arr

TODO

val max : ?axis:int -> ?keep_dims:bool -> arr -> arr

TODO

val sum : ?axis:int -> ?keep_dims:bool -> arr -> arr

TODO

val sum_reduce : ?axis:int array -> arr -> arr

TODO

val signum : arr -> arr

TODO

val sigmoid : arr -> arr

TODO

val relu : arr -> arr

TODO

val min' : arr -> elt

TODO

val max' : arr -> elt

TODO

val sum' : arr -> elt

TODO

val l1norm' : arr -> elt

TODO

val l2norm' : arr -> elt

TODO

val l2norm_sqr' : arr -> elt

TODO

val clip_by_value : ?amin:elt -> ?amax:elt -> arr -> arr

TODO

val clip_by_l2norm : elt -> arr -> arr

TODO

val pow : arr -> arr -> arr

TODO

val scalar_pow : elt -> arr -> arr

TODO

val pow_scalar : arr -> elt -> arr

TODO

val atan2 : arr -> arr -> arr

TODO

val scalar_atan2 : elt -> arr -> arr

TODO

val atan2_scalar : arr -> elt -> arr

TODO

val hypot : arr -> arr -> arr

TODO

val min2 : arr -> arr -> arr

TODO

val max2 : arr -> arr -> arr

TODO

val add : arr -> arr -> arr

TODO

val sub : arr -> arr -> arr

TODO

val mul : arr -> arr -> arr

TODO

val div : arr -> arr -> arr

TODO

val add_scalar : arr -> elt -> arr

TODO

val sub_scalar : arr -> elt -> arr

TODO

val mul_scalar : arr -> elt -> arr

TODO

val div_scalar : arr -> elt -> arr

TODO

val scalar_add : elt -> arr -> arr

TODO

val scalar_sub : elt -> arr -> arr

TODO

val scalar_mul : elt -> arr -> arr

TODO

val scalar_div : elt -> arr -> arr

TODO

val fma : arr -> arr -> arr -> arr

TODO

val elt_equal : arr -> arr -> arr

TODO

val elt_not_equal : arr -> arr -> arr

TODO

val elt_less : arr -> arr -> arr

TODO

val elt_greater : arr -> arr -> arr

TODO

val elt_less_equal : arr -> arr -> arr

TODO

val elt_greater_equal : arr -> arr -> arr

TODO

val elt_equal_scalar : arr -> elt -> arr

TODO

val elt_not_equal_scalar : arr -> elt -> arr

TODO

val elt_less_scalar : arr -> elt -> arr

TODO

val elt_greater_scalar : arr -> elt -> arr

TODO

val elt_less_equal_scalar : arr -> elt -> arr

TODO

val elt_greater_equal_scalar : arr -> elt -> arr

TODO

val conv1d : ?padding:Owl_types.padding -> arr -> arr -> int array -> arr

TODO

val conv2d : ?padding:Owl_types.padding -> arr -> arr -> int array -> arr

TODO

val conv3d : ?padding:Owl_types.padding -> arr -> arr -> int array -> arr

TODO

val transpose_conv2d : - ?padding:Owl_types.padding -> - arr -> - arr -> - int array -> - arr

TODO

val max_pool1d : - ?padding:Owl_types.padding -> - arr -> - int array -> - int array -> - arr

TODO

val max_pool2d : - ?padding:Owl_types.padding -> - arr -> - int array -> - int array -> - arr

TODO

val max_pool3d : - ?padding:Owl_types.padding -> - arr -> - int array -> - int array -> - arr

TODO

val avg_pool1d : - ?padding:Owl_types.padding -> - arr -> - int array -> - int array -> - arr

TODO

val avg_pool2d : - ?padding:Owl_types.padding -> - arr -> - int array -> - int array -> - arr

TODO

val avg_pool3d : - ?padding:Owl_types.padding -> - arr -> - int array -> - int array -> - arr

TODO

val conv1d_backward_input : arr -> arr -> int array -> arr -> arr

TODO

val conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr

TODO

val conv2d_backward_input : arr -> arr -> int array -> arr -> arr

TODO

val conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr

TODO

val conv3d_backward_input : arr -> arr -> int array -> arr -> arr

TODO

val conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr

TODO

val transpose_conv2d_backward_input : arr -> arr -> int array -> arr -> arr

TODO

val transpose_conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr

TODO

val max_pool1d_backward : - Owl_types.padding -> - arr -> - int array -> - int array -> - arr -> - arr

TODO

val max_pool2d_backward : - Owl_types.padding -> - arr -> - int array -> - int array -> - arr -> - arr

TODO

val max_pool3d_backward : - Owl_types.padding -> - arr -> - int array -> - int array -> - arr -> - arr

TODO

val avg_pool1d_backward : - Owl_types.padding -> - arr -> - int array -> - int array -> - arr -> - arr

TODO

val avg_pool2d_backward : - Owl_types.padding -> - arr -> - int array -> - int array -> - arr -> - arr

TODO

val avg_pool3d_backward : - Owl_types.padding -> - arr -> - int array -> - int array -> - arr -> - arr

TODO

val row_num : arr -> int

TODO

val col_num : arr -> int

TODO

val row : arr -> 'a -> arr

TODO

val rows : arr -> int array -> arr

TODO

val copy_row_to : arr -> 'a -> 'b -> unit

TODO

val copy_col_to : arr -> 'a -> 'b -> unit

TODO

val trace : arr -> elt

TODO

val dot : arr -> arr -> arr

TODO

val transpose : ?axis:int array -> arr -> arr

TODO

val to_rows : arr -> 'a array

TODO

val of_rows : arr array -> arr

TODO

val to_cols : arr -> 'a array

TODO

val of_cols : arr array -> arr

TODO

val of_array : elt array -> int array -> arr

TODO

val of_arrays : elt array array -> arr

TODO

Evaluation functions
val make_graph : - input:attr Owl_graph.node array -> - output:attr Owl_graph.node array -> - string -> - graph

TODO

val get_inputs : graph -> attr Owl_graph.node array

TODO

val get_outputs : graph -> attr Owl_graph.node array

TODO

val make_iopair : - graph -> - attr Owl_graph.node array -> - attr Owl_graph.node array -> - unit

TODO

val update_iopair : graph -> unit

TODO

val init_inputs : (attr Owl_graph.node -> value) -> graph -> unit

TODO

val optimise : graph -> unit

TODO

val eval_elt : elt array -> unit

TODO

val eval_arr : arr array -> unit

TODO

val eval_graph : graph -> unit

TODO

\ No newline at end of file diff --git a/owl-base/Owl_log/.dummy b/owl-base/Owl_log/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl-base/Owl_maths_interpolate/.dummy b/owl-base/Owl_maths_interpolate/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl-base/Owl_maths_quadrature/.dummy b/owl-base/Owl_maths_quadrature/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl-base/Owl_maths_root/.dummy b/owl-base/Owl_maths_root/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl-base/Owl_neural_compiler/.dummy b/owl-base/Owl_neural_compiler/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl-base/Owl_neural_compiler/Make/Engine/Graph/Optimiser/Operator/Linalg/index.html b/owl-base/Owl_neural_compiler/Make/Engine/Graph/Optimiser/Operator/Linalg/index.html deleted file mode 100644 index 46c580654..000000000 --- a/owl-base/Owl_neural_compiler/Make/Engine/Graph/Optimiser/Operator/Linalg/index.html +++ /dev/null @@ -1,33 +0,0 @@ - -Linalg (owl-base.Owl_neural_compiler.Make.Engine.Graph.Optimiser.Operator.Linalg)

Module Operator.Linalg

\ No newline at end of file diff --git a/owl-base/Owl_neural_compiler/Make/Engine/Graph/Optimiser/Operator/Mat/index.html b/owl-base/Owl_neural_compiler/Make/Engine/Graph/Optimiser/Operator/Mat/index.html deleted file mode 100644 index 1b83df28f..000000000 --- a/owl-base/Owl_neural_compiler/Make/Engine/Graph/Optimiser/Operator/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl-base.Owl_neural_compiler.Make.Engine.Graph.Optimiser.Operator.Mat)

Module Operator.Mat

\ No newline at end of file diff --git a/owl-base/Owl_neural_compiler/Make/Engine/Graph/Optimiser/Operator/Scalar/index.html b/owl-base/Owl_neural_compiler/Make/Engine/Graph/Optimiser/Operator/Scalar/index.html deleted file mode 100644 index a9d93754d..000000000 --- a/owl-base/Owl_neural_compiler/Make/Engine/Graph/Optimiser/Operator/Scalar/index.html +++ /dev/null @@ -1,20 +0,0 @@ - -Scalar (owl-base.Owl_neural_compiler.Make.Engine.Graph.Optimiser.Operator.Scalar)

Module Operator.Scalar

\ No newline at end of file diff --git a/owl-base/Owl_neural_compiler/Make/Engine/Graph/Optimiser/Operator/Symbol/Shape/Type/Device/A/Linalg/index.html b/owl-base/Owl_neural_compiler/Make/Engine/Graph/Optimiser/Operator/Symbol/Shape/Type/Device/A/Linalg/index.html deleted file mode 100644 index a77ade316..000000000 --- a/owl-base/Owl_neural_compiler/Make/Engine/Graph/Optimiser/Operator/Symbol/Shape/Type/Device/A/Linalg/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -Linalg (owl-base.Owl_neural_compiler.Make.Engine.Graph.Optimiser.Operator.Symbol.Shape.Type.Device.A.Linalg)

Module A.Linalg

val inv : arr -> arr
val logdet : arr -> elt
val chol : ?upper:bool -> arr -> arr
val svd : ?thin:bool -> arr -> arr * arr * arr
val qr : arr -> arr * arr
val lq : arr -> arr * arr
val sylvester : arr -> arr -> arr -> arr
val lyapunov : arr -> arr -> arr
val discrete_lyapunov : - ?solver:[ `bilinear | `default | `direct ] -> - arr -> - arr -> - arr
val linsolve : ?trans:bool -> ?typ:[ `l | `n | `u ] -> arr -> arr -> arr
val care : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
val dare : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
\ No newline at end of file diff --git a/owl-base/Owl_neural_compiler/Make/Engine/Graph/Optimiser/Operator/Symbol/Shape/Type/Device/A/Mat/index.html b/owl-base/Owl_neural_compiler/Make/Engine/Graph/Optimiser/Operator/Symbol/Shape/Type/Device/A/Mat/index.html deleted file mode 100644 index 8ea142e34..000000000 --- a/owl-base/Owl_neural_compiler/Make/Engine/Graph/Optimiser/Operator/Symbol/Shape/Type/Device/A/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl-base.Owl_neural_compiler.Make.Engine.Graph.Optimiser.Operator.Symbol.Shape.Type.Device.A.Mat)

Module A.Mat

val diagm : ?k:int -> arr -> arr
val triu : ?k:int -> arr -> arr
val tril : ?k:int -> arr -> arr
val eye : int -> arr
\ No newline at end of file diff --git a/owl-base/Owl_neural_compiler/Make/Engine/Graph/Optimiser/Operator/Symbol/Shape/Type/Device/A/Scalar/index.html b/owl-base/Owl_neural_compiler/Make/Engine/Graph/Optimiser/Operator/Symbol/Shape/Type/Device/A/Scalar/index.html deleted file mode 100644 index ab48aed61..000000000 --- a/owl-base/Owl_neural_compiler/Make/Engine/Graph/Optimiser/Operator/Symbol/Shape/Type/Device/A/Scalar/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Scalar (owl-base.Owl_neural_compiler.Make.Engine.Graph.Optimiser.Operator.Symbol.Shape.Type.Device.A.Scalar)

Module A.Scalar

val add : elt -> elt -> elt
val sub : elt -> elt -> elt
val mul : elt -> elt -> elt
val div : elt -> elt -> elt
val pow : elt -> elt -> elt
val atan2 : elt -> elt -> elt
val abs : elt -> elt
val neg : elt -> elt
val sqr : elt -> elt
val sqrt : elt -> elt
val exp : elt -> elt
val log : elt -> elt
val log2 : elt -> elt
val log10 : elt -> elt
val signum : elt -> elt
val floor : elt -> elt
val ceil : elt -> elt
val round : elt -> elt
val sin : elt -> elt
val cos : elt -> elt
val tan : elt -> elt
val sinh : elt -> elt
val cosh : elt -> elt
val tanh : elt -> elt
val asin : elt -> elt
val acos : elt -> elt
val atan : elt -> elt
val asinh : elt -> elt
val acosh : elt -> elt
val atanh : elt -> elt
val relu : elt -> elt
val dawsn : elt -> elt
val sigmoid : elt -> elt
\ No newline at end of file diff --git a/owl-base/Owl_neural_compiler/Make/Engine/Graph/Optimiser/Operator/Symbol/Shape/Type/Device/A/index.html b/owl-base/Owl_neural_compiler/Make/Engine/Graph/Optimiser/Operator/Symbol/Shape/Type/Device/A/index.html deleted file mode 100644 index 134103e56..000000000 --- a/owl-base/Owl_neural_compiler/Make/Engine/Graph/Optimiser/Operator/Symbol/Shape/Type/Device/A/index.html +++ /dev/null @@ -1,379 +0,0 @@ - -A (owl-base.Owl_neural_compiler.Make.Engine.Graph.Optimiser.Operator.Symbol.Shape.Type.Device.A)

Module Device.A

val empty : int array -> arr
val zeros : int array -> arr
val ones : int array -> arr
val create : int array -> elt -> arr
val sequential : ?a:elt -> ?step:elt -> int array -> arr
val uniform : ?a:elt -> ?b:elt -> int array -> arr
val gaussian : ?mu:elt -> ?sigma:elt -> int array -> arr
val bernoulli : ?p:elt -> int array -> arr
val init : int array -> (int -> elt) -> arr
val init_nd : int array -> (int array -> elt) -> arr
val shape : arr -> int array
val numel : arr -> int
val get : arr -> int array -> elt
val set : arr -> int array -> elt -> unit
val get_slice : int list list -> arr -> arr
val set_slice : int list list -> arr -> arr -> unit
val get_fancy : Owl_types_common.index list -> arr -> arr
val set_fancy : Owl_types_common.index list -> arr -> arr -> unit
val copy : arr -> arr
val copy_ : out:arr -> arr -> unit
val reset : arr -> unit
val reshape : arr -> int array -> arr
val reverse : arr -> arr
val tile : arr -> int array -> arr
val repeat : arr -> int array -> arr
val concatenate : ?axis:int -> arr array -> arr
val stack : ?axis:int -> arr array -> arr
val split : ?axis:int -> int array -> arr -> arr array
val expand : ?hi:bool -> arr -> int -> arr
val squeeze : ?axis:int array -> arr -> arr
val draw : ?axis:int -> arr -> int -> arr * int array
val map : (elt -> elt) -> arr -> arr
val fold : ?axis:int -> (elt -> elt -> elt) -> elt -> arr -> arr
val scan : ?axis:int -> (elt -> elt -> elt) -> arr -> arr
val one_hot : int -> arr -> arr
val pad : ?v:elt -> int list list -> arr -> arr
val print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(elt -> string) -> - arr -> - unit
val abs : arr -> arr
val neg : arr -> arr
val floor : arr -> arr
val ceil : arr -> arr
val round : arr -> arr
val sqr : arr -> arr
val sqrt : arr -> arr
val log : arr -> arr
val log2 : arr -> arr
val log10 : arr -> arr
val exp : arr -> arr
val sin : arr -> arr
val cos : arr -> arr
val tan : arr -> arr
val sinh : arr -> arr
val cosh : arr -> arr
val tanh : arr -> arr
val asin : arr -> arr
val acos : arr -> arr
val atan : arr -> arr
val asinh : arr -> arr
val acosh : arr -> arr
val atanh : arr -> arr
val sum_reduce : ?axis:int array -> arr -> arr
val signum : arr -> arr
val sigmoid : arr -> arr
val relu : arr -> arr
val dawsn : arr -> arr
val min' : arr -> elt
val max' : arr -> elt
val sum' : arr -> elt
val log_sum_exp' : arr -> elt
val log_sum_exp : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm' : arr -> elt
val l2norm' : arr -> elt
val l2norm_sqr' : arr -> elt
val clip_by_value : ?amin:elt -> ?amax:elt -> arr -> arr
val clip_by_l2norm : elt -> arr -> arr
val pow : arr -> arr -> arr
val scalar_pow : elt -> arr -> arr
val pow_scalar : arr -> elt -> arr
val atan2 : arr -> arr -> arr
val scalar_atan2 : elt -> arr -> arr
val atan2_scalar : arr -> elt -> arr
val add : arr -> arr -> arr
val sub : arr -> arr -> arr
val mul : arr -> arr -> arr
val div : arr -> arr -> arr
val add_scalar : arr -> elt -> arr
val sub_scalar : arr -> elt -> arr
val mul_scalar : arr -> elt -> arr
val div_scalar : arr -> elt -> arr
val scalar_add : elt -> arr -> arr
val scalar_sub : elt -> arr -> arr
val scalar_mul : elt -> arr -> arr
val scalar_div : elt -> arr -> arr
val fma : arr -> arr -> arr -> arr
val conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val dilated_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val transpose_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val max_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val upsampling2d : arr -> int array -> arr
val conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val dilated_conv1d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv1d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val transpose_conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val max_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val upsampling2d_backward : arr -> int array -> arr -> arr
val row_num : arr -> int
val col_num : arr -> int
val row : arr -> int -> arr
val rows : arr -> int array -> arr
val copy_row_to : arr -> arr -> int -> unit
val copy_col_to : arr -> arr -> int -> unit
val dot : arr -> arr -> arr
val diag : ?k:int -> arr -> arr
val trace : arr -> elt
val transpose : ?axis:int array -> arr -> arr
val to_rows : arr -> arr array
val of_rows : arr array -> arr
val to_cols : arr -> arr array
val of_cols : arr array -> arr
val of_array : elt array -> int array -> arr
val of_arrays : elt array array -> arr
val float_to_elt : float -> elt
val elt_to_float : elt -> float
val elt_equal : arr -> arr -> arr
val elt_not_equal : arr -> arr -> arr
val elt_less : arr -> arr -> arr
val elt_greater : arr -> arr -> arr
val elt_less_equal : arr -> arr -> arr
val elt_greater_equal : arr -> arr -> arr
val elt_equal_scalar : arr -> elt -> arr
val elt_not_equal_scalar : arr -> elt -> arr
val elt_less_scalar : arr -> elt -> arr
val elt_greater_scalar : arr -> elt -> arr
val elt_less_equal_scalar : arr -> elt -> arr
val elt_greater_equal_scalar : arr -> elt -> arr
module Scalar : sig ... end
module Mat : sig ... end
module Linalg : sig ... end
val create_ : out:arr -> elt -> unit
val uniform_ : ?a:elt -> ?b:elt -> out:arr -> unit
val gaussian_ : ?mu:elt -> ?sigma:elt -> out:arr -> unit
val sequential_ : ?a:elt -> ?step:elt -> out:arr -> unit
val bernoulli_ : ?p:elt -> out:arr -> unit
val zeros_ : out:arr -> unit
val ones_ : out:arr -> unit
val one_hot_ : out:arr -> int -> arr -> unit
val get_slice_ : out:arr -> int list list -> arr -> unit
val set_slice_ : out:arr -> int list list -> arr -> arr -> unit
val sub_left : arr -> int -> int -> arr
val reshape_ : out:arr -> arr -> unit
val reverse_ : out:arr -> arr -> unit
val transpose_ : out:arr -> ?axis:int array -> arr -> unit
val repeat_ : out:arr -> arr -> int array -> unit
val tile_ : out:arr -> arr -> int array -> unit
val pad_ : out:arr -> ?v:elt -> int list list -> arr -> unit
val hypot : arr -> arr -> arr
val fmod : arr -> arr -> arr
val min2 : arr -> arr -> arr
val max2 : arr -> arr -> arr
val add_ : ?out:arr -> arr -> arr -> unit
val sub_ : ?out:arr -> arr -> arr -> unit
val mul_ : ?out:arr -> arr -> arr -> unit
val div_ : ?out:arr -> arr -> arr -> unit
val pow_ : ?out:arr -> arr -> arr -> unit
val atan2_ : ?out:arr -> arr -> arr -> unit
val hypot_ : ?out:arr -> arr -> arr -> unit
val fmod_ : ?out:arr -> arr -> arr -> unit
val min2_ : ?out:arr -> arr -> arr -> unit
val max2_ : ?out:arr -> arr -> arr -> unit
val add_scalar_ : ?out:arr -> arr -> elt -> unit
val sub_scalar_ : ?out:arr -> arr -> elt -> unit
val mul_scalar_ : ?out:arr -> arr -> elt -> unit
val div_scalar_ : ?out:arr -> arr -> elt -> unit
val pow_scalar_ : ?out:arr -> arr -> elt -> unit
val atan2_scalar_ : ?out:arr -> arr -> elt -> unit
val fmod_scalar_ : ?out:arr -> arr -> elt -> unit
val scalar_add_ : ?out:arr -> elt -> arr -> unit
val scalar_sub_ : ?out:arr -> elt -> arr -> unit
val scalar_mul_ : ?out:arr -> elt -> arr -> unit
val scalar_div_ : ?out:arr -> elt -> arr -> unit
val scalar_pow_ : ?out:arr -> elt -> arr -> unit
val scalar_atan2_ : ?out:arr -> elt -> arr -> unit
val scalar_fmod_ : ?out:arr -> elt -> arr -> unit
val fma_ : ?out:arr -> arr -> arr -> arr -> unit
val clip_by_value_ : ?out:arr -> ?amin:elt -> ?amax:elt -> arr -> unit
val clip_by_l2norm_ : ?out:arr -> elt -> arr -> unit
val dot_ : - ?transa:bool -> - ?transb:bool -> - ?alpha:elt -> - ?beta:elt -> - c:arr -> - arr -> - arr -> - unit
val abs_ : ?out:arr -> arr -> unit
val neg_ : ?out:arr -> arr -> unit
val conj_ : ?out:arr -> arr -> unit
val reci_ : ?out:arr -> arr -> unit
val signum_ : ?out:arr -> arr -> unit
val sqr_ : ?out:arr -> arr -> unit
val sqrt_ : ?out:arr -> arr -> unit
val cbrt_ : ?out:arr -> arr -> unit
val exp_ : ?out:arr -> arr -> unit
val exp2_ : ?out:arr -> arr -> unit
val exp10_ : ?out:arr -> arr -> unit
val expm1_ : ?out:arr -> arr -> unit
val log_ : ?out:arr -> arr -> unit
val log2_ : ?out:arr -> arr -> unit
val log10_ : ?out:arr -> arr -> unit
val log1p_ : ?out:arr -> arr -> unit
val sin_ : ?out:arr -> arr -> unit
val cos_ : ?out:arr -> arr -> unit
val tan_ : ?out:arr -> arr -> unit
val asin_ : ?out:arr -> arr -> unit
val acos_ : ?out:arr -> arr -> unit
val atan_ : ?out:arr -> arr -> unit
val sinh_ : ?out:arr -> arr -> unit
val cosh_ : ?out:arr -> arr -> unit
val tanh_ : ?out:arr -> arr -> unit
val asinh_ : ?out:arr -> arr -> unit
val acosh_ : ?out:arr -> arr -> unit
val atanh_ : ?out:arr -> arr -> unit
val floor_ : ?out:arr -> arr -> unit
val ceil_ : ?out:arr -> arr -> unit
val round_ : ?out:arr -> arr -> unit
val trunc_ : ?out:arr -> arr -> unit
val fix_ : ?out:arr -> arr -> unit
val erf_ : ?out:arr -> arr -> unit
val erfc_ : ?out:arr -> arr -> unit
val relu_ : ?out:arr -> arr -> unit
val softplus_ : ?out:arr -> arr -> unit
val softsign_ : ?out:arr -> arr -> unit
val softmax_ : ?out:arr -> ?axis:int -> arr -> unit
val sigmoid_ : ?out:arr -> arr -> unit
val sum_ : out:arr -> axis:int -> arr -> unit
val min_ : out:arr -> axis:int -> arr -> unit
val max_ : out:arr -> axis:int -> arr -> unit
val sum : ?axis:int -> ?keep_dims:bool -> arr -> arr
val prod : ?axis:int -> ?keep_dims:bool -> arr -> arr
val min : ?axis:int -> ?keep_dims:bool -> arr -> arr
val max : ?axis:int -> ?keep_dims:bool -> arr -> arr
val mean : ?axis:int -> ?keep_dims:bool -> arr -> arr
val var : ?axis:int -> ?keep_dims:bool -> arr -> arr
val std : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l2norm : ?axis:int -> ?keep_dims:bool -> arr -> arr
val cumsum_ : ?out:arr -> ?axis:int -> arr -> unit
val cumprod_ : ?out:arr -> ?axis:int -> arr -> unit
val cummin_ : ?out:arr -> ?axis:int -> arr -> unit
val cummax_ : ?out:arr -> ?axis:int -> arr -> unit
val dropout_ : ?out:arr -> ?rate:float -> arr -> unit
val prod' : arr -> elt
val mean' : arr -> elt
val var' : arr -> elt
val std' : arr -> elt
val elt_equal_ : ?out:arr -> arr -> arr -> unit
val elt_not_equal_ : ?out:arr -> arr -> arr -> unit
val elt_less_ : ?out:arr -> arr -> arr -> unit
val elt_greater_ : ?out:arr -> arr -> arr -> unit
val elt_less_equal_ : ?out:arr -> arr -> arr -> unit
val elt_greater_equal_ : ?out:arr -> arr -> arr -> unit
val elt_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_not_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_less_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_greater_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_less_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_greater_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val conv1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val conv2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val conv3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val dilated_conv1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val dilated_conv2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val dilated_conv3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val transpose_conv1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val transpose_conv2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val transpose_conv3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val max_pool1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val max_pool2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val max_pool3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val upsampling2d_ : out:arr -> arr -> int array -> unit
val conv1d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv1d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv2d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv2d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv3d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv3d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val dilated_conv1d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv1d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv2d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv2d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv3d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv3d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val transpose_conv1d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv1d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv2d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv2d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv3d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv3d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val max_pool1d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val max_pool2d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val max_pool3d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool1d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool2d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool3d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val upsampling2d_backward_ : out:arr -> arr -> int array -> arr -> unit
val fused_adagrad_ : ?out:arr -> rate:float -> eps:float -> arr -> unit
\ No newline at end of file diff --git a/owl-base/Owl_neural_compiler/Make/Engine/Graph/Optimiser/Operator/Symbol/Shape/Type/Device/index.html b/owl-base/Owl_neural_compiler/Make/Engine/Graph/Optimiser/Operator/Symbol/Shape/Type/Device/index.html deleted file mode 100644 index 742436c6a..000000000 --- a/owl-base/Owl_neural_compiler/Make/Engine/Graph/Optimiser/Operator/Symbol/Shape/Type/Device/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Device (owl-base.Owl_neural_compiler.Make.Engine.Graph.Optimiser.Operator.Symbol.Shape.Type.Device)

Module Type.Device

module A : sig ... end
val make_device : unit -> device
val arr_to_value : A.arr -> value
val value_to_arr : value -> A.arr
val elt_to_value : A.elt -> value
val value_to_elt : value -> A.elt
val value_to_float : value -> float
val is_arr : value -> bool
val is_elt : value -> bool
\ No newline at end of file diff --git a/owl-base/Owl_neural_compiler/Make/Engine/Graph/Optimiser/Operator/Symbol/Shape/Type/index.html b/owl-base/Owl_neural_compiler/Make/Engine/Graph/Optimiser/Operator/Symbol/Shape/Type/index.html deleted file mode 100644 index 15ce71cf9..000000000 --- a/owl-base/Owl_neural_compiler/Make/Engine/Graph/Optimiser/Operator/Symbol/Shape/Type/index.html +++ /dev/null @@ -1,5 +0,0 @@ - -Type (owl-base.Owl_neural_compiler.Make.Engine.Graph.Optimiser.Operator.Symbol.Shape.Type)

Module Shape.Type

module Device : sig ... end
and block = E.Graph.Optimiser.Operator.Symbol.Shape.Type.block = {
  1. size : int;
  2. block_id : int;
  3. mutable active : t option;
  4. mutable memory : Device.value;
  5. mutable nodes : t list;
}
and attr = E.Graph.Optimiser.Operator.Symbol.Shape.Type.attr = {
  1. mutable op : op;
  2. mutable freeze : bool;
  3. mutable reuse : bool;
  4. mutable state : state;
  5. mutable shape : int array option array;
  6. mutable value : Device.value array;
  7. mutable block : block array option;
}
and op = E.Graph.Optimiser.Operator.Symbol.Shape.Type.op =
  1. | Noop
  2. | Var
  3. | Const
  4. | Empty of int array
  5. | Zeros of int array
  6. | Ones of int array
  7. | Create of int array
  8. | Sequential of int array
  9. | Uniform of int array
  10. | Gaussian of int array
  11. | Bernoulli of int array
  12. | Init of int array * int -> elt
  13. | Get of int array
  14. | Set of int array
  15. | GetSlice of int list list
  16. | SetSlice of int list list
  17. | GetFancy of Owl_types_common.index list
  18. | SetFancy of Owl_types_common.index list
  19. | Copy
  20. | Reset
  21. | Reshape of int array
  22. | Reverse
  23. | Tile of int array
  24. | Repeat of int array
  25. | Pad of elt * int list list
  26. | Concatenate of int
  27. | Stack of int
  28. | Split of int * int array
  29. | Draw of int * int
  30. | Map of elt -> elt
  31. | Fold of int * elt -> elt -> elt
  32. | Scan of int * elt -> elt -> elt
  33. | OneHot of int
  34. | OfArray of int array
  35. | Delay of Device.A.arr -> Device.A.arr
  36. | DelayArray of int array * Device.A.arr array -> Device.A.arr
  37. | LazyPrint of int option - * int option - * bool option - * (Device.A.elt -> string) option
  38. | Abs
  39. | Neg
  40. | Floor
  41. | Ceil
  42. | Round
  43. | Sqr
  44. | Sqrt
  45. | Log
  46. | Log2
  47. | Log10
  48. | Exp
  49. | Sin
  50. | Cos
  51. | Tan
  52. | Sinh
  53. | Cosh
  54. | Tanh
  55. | Asin
  56. | Acos
  57. | Atan
  58. | Asinh
  59. | Acosh
  60. | Atanh
  61. | Min of bool * int
  62. | Max of bool * int
  63. | Sum of bool * int
  64. | SumReduce of int array
  65. | Signum
  66. | Sigmoid
  67. | Relu
  68. | Dawsn
  69. | Min'
  70. | Max'
  71. | Sum'
  72. | LogSumExp'
  73. | LogSumExp of bool * int
  74. | L1norm'
  75. | L2norm'
  76. | L2NormSqr'
  77. | ClipByValue
  78. | ClipByL2norm
  79. | Pow
  80. | ScalarPow
  81. | PowScalar
  82. | Atan2
  83. | ScalarAtan2
  84. | Atan2Scalar
  85. | Hypot
  86. | Min2
  87. | Max2
  88. | Add
  89. | Sub
  90. | Mul
  91. | Div
  92. | AddScalar
  93. | SubScalar
  94. | MulScalar
  95. | DivScalar
  96. | ScalarAdd
  97. | ScalarSub
  98. | ScalarMul
  99. | ScalarDiv
  100. | FMA
  101. | EltEqual
  102. | EltNotEqual
  103. | EltLess
  104. | EltGreater
  105. | EltLessEqual
  106. | EltGreaterEqual
  107. | EltEqualScalar
  108. | EltNotEqualScalar
  109. | EltLessScalar
  110. | EltGreaterScalar
  111. | EltLessEqualScalar
  112. | EltGreaterEqualScalar
  113. | Conv1d of Owl_types_common.padding * int array
  114. | Conv2d of Owl_types_common.padding * int array
  115. | Conv3d of Owl_types_common.padding * int array
  116. | TransposeConv1d of Owl_types_common.padding * int array
  117. | TransposeConv2d of Owl_types_common.padding * int array
  118. | TransposeConv3d of Owl_types_common.padding * int array
  119. | DilatedConv1d of Owl_types_common.padding * int array * int array
  120. | DilatedConv2d of Owl_types_common.padding * int array * int array
  121. | DilatedConv3d of Owl_types_common.padding * int array * int array
  122. | MaxPool1d of Owl_types_common.padding * int array * int array
  123. | MaxPool2d of Owl_types_common.padding * int array * int array
  124. | MaxPool3d of Owl_types_common.padding * int array * int array
  125. | AvgPool1d of Owl_types_common.padding * int array * int array
  126. | AvgPool2d of Owl_types_common.padding * int array * int array
  127. | AvgPool3d of Owl_types_common.padding * int array * int array
  128. | UpSampling2d of int array
  129. | Conv1dBackwardInput of int array
  130. | Conv1dBackwardKernel of int array
  131. | Conv2dBackwardInput of int array
  132. | Conv2dBackwardKernel of int array
  133. | Conv3dBackwardInput of int array
  134. | Conv3dBackwardKernel of int array
  135. | TransposeConv1dBackwardInput of int array
  136. | TransposeConv1dBackwardKernel of int array
  137. | TransposeConv2dBackwardInput of int array
  138. | TransposeConv2dBackwardKernel of int array
  139. | TransposeConv3dBackwardInput of int array
  140. | TransposeConv3dBackwardKernel of int array
  141. | DilatedConv1dBackwardInput of int array * int array
  142. | DilatedConv1dBackwardKernel of int array * int array
  143. | DilatedConv2dBackwardInput of int array * int array
  144. | DilatedConv2dBackwardKernel of int array * int array
  145. | DilatedConv3dBackwardInput of int array * int array
  146. | DilatedConv3dBackwardKernel of int array * int array
  147. | MaxPool1dBackward of Owl_types_common.padding * int array * int array
  148. | MaxPool2dBackward of Owl_types_common.padding * int array * int array
  149. | MaxPool3dBackward of Owl_types_common.padding * int array * int array
  150. | AvgPool1dBackward of Owl_types_common.padding * int array * int array
  151. | AvgPool2dBackward of Owl_types_common.padding * int array * int array
  152. | AvgPool3dBackward of Owl_types_common.padding * int array * int array
  153. | UpSampling2dBackward of int array
  154. | RowNum
  155. | ColNum
  156. | Row
  157. | Rows of int array
  158. | CopyRowTo
  159. | CopyColTo
  160. | Dot of bool * bool * elt * elt
  161. | Inv
  162. | Trace
  163. | Transpose of int array
  164. | ToRows
  165. | OfRows
  166. | Scalar_Add
  167. | Scalar_Sub
  168. | Scalar_Mul
  169. | Scalar_Div
  170. | Scalar_Pow
  171. | Scalar_Atan2
  172. | Scalar_Abs
  173. | Scalar_Neg
  174. | Scalar_Sqr
  175. | Scalar_Sqrt
  176. | Scalar_Exp
  177. | Scalar_Log
  178. | Scalar_Log2
  179. | Scalar_Log10
  180. | Scalar_Signum
  181. | Scalar_Floor
  182. | Scalar_Ceil
  183. | Scalar_Round
  184. | Scalar_Sin
  185. | Scalar_Cos
  186. | Scalar_Tan
  187. | Scalar_Sinh
  188. | Scalar_Cosh
  189. | Scalar_Tanh
  190. | Scalar_Asin
  191. | Scalar_Acos
  192. | Scalar_Atan
  193. | Scalar_Asinh
  194. | Scalar_Acosh
  195. | Scalar_Atanh
  196. | Scalar_Relu
  197. | Scalar_Dawsn
  198. | Scalar_Sigmoid
  199. | Fused_Adagrad of float * float
\ No newline at end of file diff --git a/owl-base/Owl_neural_compiler/Make/Engine/Graph/Optimiser/Operator/Symbol/Shape/index.html b/owl-base/Owl_neural_compiler/Make/Engine/Graph/Optimiser/Operator/Symbol/Shape/index.html deleted file mode 100644 index 714981d7c..000000000 --- a/owl-base/Owl_neural_compiler/Make/Engine/Graph/Optimiser/Operator/Symbol/Shape/index.html +++ /dev/null @@ -1,5 +0,0 @@ - -Shape (owl-base.Owl_neural_compiler.Make.Engine.Graph.Optimiser.Operator.Symbol.Shape)

Module Symbol.Shape

module Type : sig ... end
val infer_shape : - Type.op -> - Type.attr Owl_graph.node array -> - int array option array
\ No newline at end of file diff --git a/owl-base/Owl_neural_compiler/Make/Engine/Graph/Optimiser/Operator/Symbol/index.html b/owl-base/Owl_neural_compiler/Make/Engine/Graph/Optimiser/Operator/Symbol/index.html deleted file mode 100644 index b82e88432..000000000 --- a/owl-base/Owl_neural_compiler/Make/Engine/Graph/Optimiser/Operator/Symbol/index.html +++ /dev/null @@ -1,28 +0,0 @@ - -Symbol (owl-base.Owl_neural_compiler.Make.Engine.Graph.Optimiser.Operator.Symbol)

Module Operator.Symbol

module Shape : sig ... end
val op_to_str : Shape.Type.op -> string
val is_random_variable : Shape.Type.op -> bool
val refnum : 'a Owl_graph.node -> int
val node_shape : Shape.Type.attr Owl_graph.node -> int array
val node_numel : Shape.Type.attr Owl_graph.node -> int
val is_shape_unknown : Shape.Type.attr Owl_graph.node -> bool
val infer_shape_graph : Shape.Type.attr Owl_graph.node array -> unit
val shape_to_str : int array option array -> string
val node_to_str : Shape.Type.attr Owl_graph.node -> string
val node_to_arr : Shape.Type.t -> Shape.Type.arr
val arr_to_node : Shape.Type.arr -> Shape.Type.t
val node_to_elt : Shape.Type.t -> Shape.Type.elt
val elt_to_node : Shape.Type.elt -> Shape.Type.t
val make_node : - ?name:string -> - ?value:Shape.Type.Device.value array -> - ?shape:int array option array -> - ?freeze:bool -> - ?reuse:bool -> - ?state:Shape.Type.state -> - Shape.Type.op -> - Shape.Type.attr Owl_graph.node
val make_then_connect : - ?shape:int array option array -> - Shape.Type.op -> - Shape.Type.attr Owl_graph.node array -> - Shape.Type.attr Owl_graph.node
val var_arr : ?shape:int array -> string -> Shape.Type.arr
val var_elt : string -> Shape.Type.elt
val const_arr : string -> Shape.Type.Device.A.arr -> Shape.Type.arr
val const_elt : string -> Shape.Type.Device.A.elt -> Shape.Type.elt
val new_block_id : unit -> int
val make_empty_block : ?block_id:int -> int -> Shape.Type.block
val make_value_block : - Shape.Type.Device.value -> - Shape.Type.attr Owl_graph.node -> - unit
val add_node_to_block : - Shape.Type.attr Owl_graph.node -> - Shape.Type.block -> - unit
val get_active_node : Shape.Type.block -> Shape.Type.attr Owl_graph.node option
val set_active_node : - Shape.Type.block -> - Shape.Type.attr Owl_graph.node -> - unit
val get_block_id : Shape.Type.attr Owl_graph.node -> int
val set_value : - Shape.Type.attr Owl_graph.node -> - Shape.Type.Device.value array -> - unit
val set_operator : Shape.Type.attr Owl_graph.node -> Shape.Type.op -> unit
val set_reuse : Shape.Type.attr Owl_graph.node -> bool -> unit
val get_reuse : Shape.Type.attr Owl_graph.node -> bool
val is_shared : Shape.Type.attr Owl_graph.node -> bool
val get_shared_nodes : - Shape.Type.attr Owl_graph.node -> - Shape.Type.attr Owl_graph.node array
val is_var : Shape.Type.attr Owl_graph.node -> bool
val is_const : Shape.Type.attr Owl_graph.node -> bool
val is_node_arr : Shape.Type.attr Owl_graph.node -> bool
val is_node_elt : Shape.Type.attr Owl_graph.node -> bool
val is_assigned : Shape.Type.attr Owl_graph.node -> bool
val check_assigned : Shape.Type.attr Owl_graph.node -> unit
val is_valid : Shape.Type.attr Owl_graph.node -> bool
val validate : Shape.Type.attr Owl_graph.node -> unit
val invalidate : Shape.Type.attr Owl_graph.node -> unit
val invalidate_graph : Shape.Type.attr Owl_graph.node -> unit
val is_freeze : Shape.Type.attr Owl_graph.node -> bool
val freeze : Shape.Type.attr Owl_graph.node -> unit
val freeze_descendants : Shape.Type.attr Owl_graph.node array -> unit
val freeze_ancestors : Shape.Type.attr Owl_graph.node array -> unit
val unsafe_assign_arr : Shape.Type.arr -> Shape.Type.Device.A.arr -> unit
val assign_arr : Shape.Type.arr -> Shape.Type.Device.A.arr -> unit
val assign_elt : Shape.Type.elt -> Shape.Type.Device.A.elt -> unit
val float_to_elt : float -> Shape.Type.elt
val elt_to_float : Shape.Type.elt -> float
\ No newline at end of file diff --git a/owl-base/Owl_neural_compiler/Make/Engine/Graph/Optimiser/Operator/index.html b/owl-base/Owl_neural_compiler/Make/Engine/Graph/Optimiser/Operator/index.html deleted file mode 100644 index 920d9c011..000000000 --- a/owl-base/Owl_neural_compiler/Make/Engine/Graph/Optimiser/Operator/index.html +++ /dev/null @@ -1,420 +0,0 @@ - -Operator (owl-base.Owl_neural_compiler.Make.Engine.Graph.Optimiser.Operator)

Module Optimiser.Operator

module Symbol : sig ... end
val empty : int array -> Symbol.Shape.Type.arr
val zeros : int array -> Symbol.Shape.Type.arr
val ones : int array -> Symbol.Shape.Type.arr
val create : int array -> Symbol.Shape.Type.elt -> Symbol.Shape.Type.arr
val sequential : - ?a:Symbol.Shape.Type.elt -> - ?step:Symbol.Shape.Type.elt -> - int array -> - Symbol.Shape.Type.arr
val uniform : - ?a:Symbol.Shape.Type.elt -> - ?b:Symbol.Shape.Type.elt -> - int array -> - Symbol.Shape.Type.arr
val gaussian : - ?mu:Symbol.Shape.Type.elt -> - ?sigma:Symbol.Shape.Type.elt -> - int array -> - Symbol.Shape.Type.arr
val bernoulli : ?p:Symbol.Shape.Type.elt -> int array -> Symbol.Shape.Type.arr
val init : int array -> (int -> Symbol.Shape.Type.elt) -> Symbol.Shape.Type.arr
val init_nd : - int array -> - (int array -> Symbol.Shape.Type.elt) -> - Symbol.Shape.Type.arr
val shape : Symbol.Shape.Type.arr -> int array
val numel : Symbol.Shape.Type.arr -> int
val set : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.elt -> unit
val get_slice : int list list -> Symbol.Shape.Type.arr -> Symbol.Shape.Type.arr
val set_slice : - int list list -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - unit
val set_fancy : - Owl_types.index list -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - unit
val copy_ : out:'a -> 'b -> 'c
val reset : Symbol.Shape.Type.arr -> unit
val reshape : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.arr
val tile : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.arr
val repeat : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.arr
val pad : - ?v:Symbol.Shape.Type.elt -> - int list list -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val expand : ?hi:bool -> Symbol.Shape.Type.arr -> int -> Symbol.Shape.Type.arr
val squeeze : ?axis:int array -> Symbol.Shape.Type.arr -> Symbol.Shape.Type.arr
val concatenate : - ?axis:int -> - Symbol.Shape.Type.arr array -> - Symbol.Shape.Type.arr
val stack : ?axis:int -> Symbol.Shape.Type.arr array -> Symbol.Shape.Type.arr
val split : ?axis:int -> 'a -> 'b -> 'c
val draw : - ?axis:int -> - Symbol.Shape.Type.arr -> - int -> - Symbol.Shape.Type.arr * 'a array
val lazy_print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(Symbol.Shape.Type.Device.A.elt -> string) -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val print : ?max_row:'a -> ?max_col:'b -> ?header:'c -> ?fmt:'d -> 'e -> unit
val min : - ?axis:int -> - ?keep_dims:bool -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val max : - ?axis:int -> - ?keep_dims:bool -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val sum : - ?axis:int -> - ?keep_dims:bool -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val sum_reduce : - ?axis:int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val log_sum_exp : - ?axis:int -> - ?keep_dims:bool -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val elt_less_equal_scalar : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.elt -> - Symbol.Shape.Type.arr
val elt_greater_equal_scalar : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.elt -> - Symbol.Shape.Type.arr
val conv1d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr
val conv2d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr
val conv3d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr
val transpose_conv1d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr
val transpose_conv2d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr
val transpose_conv3d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr
val dilated_conv1d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr
val dilated_conv2d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr
val dilated_conv3d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr
val max_pool1d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr
val max_pool2d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr
val max_pool3d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr
val avg_pool1d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr
val avg_pool2d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr
val avg_pool3d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr
val upsampling2d : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.arr
val conv1d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val conv1d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val conv2d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val conv2d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val conv3d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val conv3d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val transpose_conv1d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val transpose_conv1d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val transpose_conv2d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val transpose_conv2d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val transpose_conv3d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val transpose_conv3d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val dilated_conv1d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val dilated_conv1d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val dilated_conv2d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val dilated_conv2d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val dilated_conv3d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val dilated_conv3d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val max_pool1d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val max_pool2d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val max_pool3d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val avg_pool1d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val avg_pool2d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val avg_pool3d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val upsampling2d_backward : - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val row_num : Symbol.Shape.Type.arr -> int
val col_num : Symbol.Shape.Type.arr -> int
val rows : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.arr
val copy_row_to : Symbol.Shape.Type.arr -> 'a -> 'b -> unit
val copy_col_to : Symbol.Shape.Type.arr -> 'a -> 'b -> unit
val transpose : - ?axis:int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val to_rows : Symbol.Shape.Type.arr -> 'a array
val to_cols : Symbol.Shape.Type.arr -> 'a array
val of_array : - Symbol.Shape.Type.elt array -> - int array -> - Symbol.Shape.Type.arr
val of_arrays : Symbol.Shape.Type.elt array array -> Symbol.Shape.Type.arr
val to_arrays : Symbol.Shape.Type.arr -> Symbol.Shape.Type.elt array array
module Scalar : sig ... end
module Mat : sig ... end
module Linalg : sig ... end
\ No newline at end of file diff --git a/owl-base/Owl_neural_compiler/Make/Engine/Graph/Optimiser/index.html b/owl-base/Owl_neural_compiler/Make/Engine/Graph/Optimiser/index.html deleted file mode 100644 index 7f20b7899..000000000 --- a/owl-base/Owl_neural_compiler/Make/Engine/Graph/Optimiser/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Optimiser (owl-base.Owl_neural_compiler.Make.Engine.Graph.Optimiser)

Module Graph.Optimiser

module Operator : sig ... end
val estimate_complexity : 'a Owl_graph.node array -> int * int
val optimise_nodes : - Operator.Symbol.Shape.Type.attr Owl_graph.node array -> - unit
\ No newline at end of file diff --git a/owl-base/Owl_neural_compiler/Make/Engine/Graph/index.html b/owl-base/Owl_neural_compiler/Make/Engine/Graph/index.html deleted file mode 100644 index eaa02c03f..000000000 --- a/owl-base/Owl_neural_compiler/Make/Engine/Graph/index.html +++ /dev/null @@ -1,33 +0,0 @@ - -Graph (owl-base.Owl_neural_compiler.Make.Engine.Graph)

Module Engine.Graph

module Optimiser : sig ... end
type graph = E.Graph.graph
val shape_or_value : Optimiser.Operator.Symbol.Shape.Type.t -> string
val graph_to_dot : graph -> string
val graph_to_trace : graph -> string
val save_graph : 'a -> string -> unit
val load_graph : string -> 'a * 'b
val invalidate_rvs : graph -> unit
val is_iopair_safe : 'a Owl_graph.node -> 'a Owl_graph.node -> bool
val update_iopair : graph -> unit
val remove_unused_iopair : - 'a Owl_graph.node array -> - 'b array -> - 'a Owl_graph.node array * 'b array
val optimise : graph -> unit
\ No newline at end of file diff --git a/owl-base/Owl_neural_compiler/Make/Engine/index.html b/owl-base/Owl_neural_compiler/Make/Engine/index.html deleted file mode 100644 index 1840cd461..000000000 --- a/owl-base/Owl_neural_compiler/Make/Engine/index.html +++ /dev/null @@ -1,485 +0,0 @@ - -Engine (owl-base.Owl_neural_compiler.Make.Engine)

Module Make.Engine

module Graph : sig ... end
val eval_graph : Graph.graph -> unit
module Optimiser = Graph.Optimiser
type graph = E.Graph.graph
val shape_or_value : Optimiser.Operator.Symbol.Shape.Type.t -> string
val graph_to_dot : graph -> string
val graph_to_trace : graph -> string
val save_graph : 'a -> string -> unit
val load_graph : string -> 'a * 'b
val invalidate_rvs : graph -> unit
val is_iopair_safe : 'a Owl_graph.node -> 'a Owl_graph.node -> bool
val update_iopair : graph -> unit
val remove_unused_iopair : - 'a Owl_graph.node array -> - 'b array -> - 'a Owl_graph.node array * 'b array
val optimise : graph -> unit
module Operator = Graph.Optimiser.Operator
val estimate_complexity : 'a Owl_graph.node array -> int * int
val optimise_nodes : - Operator.Symbol.Shape.Type.attr Owl_graph.node array -> - unit
val empty : int array -> Symbol.Shape.Type.arr
val zeros : int array -> Symbol.Shape.Type.arr
val ones : int array -> Symbol.Shape.Type.arr
val create : int array -> Symbol.Shape.Type.elt -> Symbol.Shape.Type.arr
val sequential : - ?a:Symbol.Shape.Type.elt -> - ?step:Symbol.Shape.Type.elt -> - int array -> - Symbol.Shape.Type.arr
val uniform : - ?a:Symbol.Shape.Type.elt -> - ?b:Symbol.Shape.Type.elt -> - int array -> - Symbol.Shape.Type.arr
val gaussian : - ?mu:Symbol.Shape.Type.elt -> - ?sigma:Symbol.Shape.Type.elt -> - int array -> - Symbol.Shape.Type.arr
val bernoulli : ?p:Symbol.Shape.Type.elt -> int array -> Symbol.Shape.Type.arr
val init : int array -> (int -> Symbol.Shape.Type.elt) -> Symbol.Shape.Type.arr
val init_nd : - int array -> - (int array -> Symbol.Shape.Type.elt) -> - Symbol.Shape.Type.arr
val shape : Symbol.Shape.Type.arr -> int array
val numel : Symbol.Shape.Type.arr -> int
val set : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.elt -> unit
val get_slice : int list list -> Symbol.Shape.Type.arr -> Symbol.Shape.Type.arr
val set_slice : - int list list -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - unit
val set_fancy : - Owl_types.index list -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - unit
val copy_ : out:'a -> 'b -> 'c
val reset : Symbol.Shape.Type.arr -> unit
val reshape : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.arr
val tile : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.arr
val repeat : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.arr
val pad : - ?v:Symbol.Shape.Type.elt -> - int list list -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val expand : ?hi:bool -> Symbol.Shape.Type.arr -> int -> Symbol.Shape.Type.arr
val squeeze : ?axis:int array -> Symbol.Shape.Type.arr -> Symbol.Shape.Type.arr
val concatenate : - ?axis:int -> - Symbol.Shape.Type.arr array -> - Symbol.Shape.Type.arr
val stack : ?axis:int -> Symbol.Shape.Type.arr array -> Symbol.Shape.Type.arr
val split : ?axis:int -> 'a -> 'b -> 'c
val draw : - ?axis:int -> - Symbol.Shape.Type.arr -> - int -> - Symbol.Shape.Type.arr * 'a array
val lazy_print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(Symbol.Shape.Type.Device.A.elt -> string) -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val print : ?max_row:'a -> ?max_col:'b -> ?header:'c -> ?fmt:'d -> 'e -> unit
val min : - ?axis:int -> - ?keep_dims:bool -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val max : - ?axis:int -> - ?keep_dims:bool -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val sum : - ?axis:int -> - ?keep_dims:bool -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val sum_reduce : - ?axis:int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val log_sum_exp : - ?axis:int -> - ?keep_dims:bool -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val elt_less_equal_scalar : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.elt -> - Symbol.Shape.Type.arr
val elt_greater_equal_scalar : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.elt -> - Symbol.Shape.Type.arr
val conv1d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr
val conv2d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr
val conv3d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr
val transpose_conv1d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr
val transpose_conv2d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr
val transpose_conv3d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr
val dilated_conv1d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr
val dilated_conv2d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr
val dilated_conv3d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr
val max_pool1d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr
val max_pool2d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr
val max_pool3d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr
val avg_pool1d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr
val avg_pool2d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr
val avg_pool3d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr
val upsampling2d : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.arr
val conv1d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val conv1d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val conv2d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val conv2d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val conv3d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val conv3d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val transpose_conv1d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val transpose_conv1d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val transpose_conv2d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val transpose_conv2d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val transpose_conv3d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val transpose_conv3d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val dilated_conv1d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val dilated_conv1d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val dilated_conv2d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val dilated_conv2d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val dilated_conv3d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val dilated_conv3d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val max_pool1d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val max_pool2d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val max_pool3d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val avg_pool1d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val avg_pool2d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val avg_pool3d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val upsampling2d_backward : - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val row_num : Symbol.Shape.Type.arr -> int
val col_num : Symbol.Shape.Type.arr -> int
val rows : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.arr
val copy_row_to : Symbol.Shape.Type.arr -> 'a -> 'b -> unit
val copy_col_to : Symbol.Shape.Type.arr -> 'a -> 'b -> unit
val transpose : - ?axis:int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr
val to_rows : Symbol.Shape.Type.arr -> 'a array
val to_cols : Symbol.Shape.Type.arr -> 'a array
val of_array : - Symbol.Shape.Type.elt array -> - int array -> - Symbol.Shape.Type.arr
val of_arrays : Symbol.Shape.Type.elt array array -> Symbol.Shape.Type.arr
val to_arrays : Symbol.Shape.Type.arr -> Symbol.Shape.Type.elt array array
val op_to_str : Shape.Type.op -> string
val is_random_variable : Shape.Type.op -> bool
val refnum : 'a Owl_graph.node -> int
val node_shape : Shape.Type.attr Owl_graph.node -> int array
val node_numel : Shape.Type.attr Owl_graph.node -> int
val is_shape_unknown : Shape.Type.attr Owl_graph.node -> bool
val infer_shape_graph : Shape.Type.attr Owl_graph.node array -> unit
val shape_to_str : int array option array -> string
val node_to_str : Shape.Type.attr Owl_graph.node -> string
val node_to_arr : Shape.Type.t -> Shape.Type.arr
val arr_to_node : Shape.Type.arr -> Shape.Type.t
val node_to_elt : Shape.Type.t -> Shape.Type.elt
val elt_to_node : Shape.Type.elt -> Shape.Type.t
val make_node : - ?name:string -> - ?value:Shape.Type.Device.value array -> - ?shape:int array option array -> - ?freeze:bool -> - ?reuse:bool -> - ?state:Shape.Type.state -> - Shape.Type.op -> - Shape.Type.attr Owl_graph.node
val make_then_connect : - ?shape:int array option array -> - Shape.Type.op -> - Shape.Type.attr Owl_graph.node array -> - Shape.Type.attr Owl_graph.node
val var_arr : ?shape:int array -> string -> Shape.Type.arr
val var_elt : string -> Shape.Type.elt
val const_arr : string -> Shape.Type.Device.A.arr -> Shape.Type.arr
val const_elt : string -> Shape.Type.Device.A.elt -> Shape.Type.elt
val new_block_id : unit -> int
val make_empty_block : ?block_id:int -> int -> Shape.Type.block
val make_value_block : - Shape.Type.Device.value -> - Shape.Type.attr Owl_graph.node -> - unit
val add_node_to_block : - Shape.Type.attr Owl_graph.node -> - Shape.Type.block -> - unit
val get_active_node : Shape.Type.block -> Shape.Type.attr Owl_graph.node option
val set_active_node : - Shape.Type.block -> - Shape.Type.attr Owl_graph.node -> - unit
val get_block_id : Shape.Type.attr Owl_graph.node -> int
val set_value : - Shape.Type.attr Owl_graph.node -> - Shape.Type.Device.value array -> - unit
val set_operator : Shape.Type.attr Owl_graph.node -> Shape.Type.op -> unit
val set_reuse : Shape.Type.attr Owl_graph.node -> bool -> unit
val get_reuse : Shape.Type.attr Owl_graph.node -> bool
val is_shared : Shape.Type.attr Owl_graph.node -> bool
val get_shared_nodes : - Shape.Type.attr Owl_graph.node -> - Shape.Type.attr Owl_graph.node array
val is_var : Shape.Type.attr Owl_graph.node -> bool
val is_const : Shape.Type.attr Owl_graph.node -> bool
val is_node_arr : Shape.Type.attr Owl_graph.node -> bool
val is_node_elt : Shape.Type.attr Owl_graph.node -> bool
val is_assigned : Shape.Type.attr Owl_graph.node -> bool
val check_assigned : Shape.Type.attr Owl_graph.node -> unit
val is_valid : Shape.Type.attr Owl_graph.node -> bool
val validate : Shape.Type.attr Owl_graph.node -> unit
val invalidate : Shape.Type.attr Owl_graph.node -> unit
val invalidate_graph : Shape.Type.attr Owl_graph.node -> unit
val is_freeze : Shape.Type.attr Owl_graph.node -> bool
val freeze : Shape.Type.attr Owl_graph.node -> unit
val freeze_descendants : Shape.Type.attr Owl_graph.node array -> unit
val freeze_ancestors : Shape.Type.attr Owl_graph.node array -> unit
val unsafe_assign_arr : Shape.Type.arr -> Shape.Type.Device.A.arr -> unit
val assign_arr : Shape.Type.arr -> Shape.Type.Device.A.arr -> unit
val assign_elt : Shape.Type.elt -> Shape.Type.Device.A.elt -> unit
val float_to_elt : float -> Shape.Type.elt
val elt_to_float : Shape.Type.elt -> float
val infer_shape : - Type.op -> - Type.attr Owl_graph.node array -> - int array option array
and block = E.Graph.Optimiser.Operator.Symbol.Shape.Type.block = {
  1. size : int;
  2. block_id : int;
  3. mutable active : t option;
  4. mutable memory : Device.value;
  5. mutable nodes : t list;
}
and attr = E.Graph.Optimiser.Operator.Symbol.Shape.Type.attr = {
  1. mutable op : op;
  2. mutable freeze : bool;
  3. mutable reuse : bool;
  4. mutable state : state;
  5. mutable shape : int array option array;
  6. mutable value : Device.value array;
  7. mutable block : block array option;
}
and op = E.Graph.Optimiser.Operator.Symbol.Shape.Type.op =
  1. | Noop
  2. | Var
  3. | Const
  4. | Empty of int array
  5. | Zeros of int array
  6. | Ones of int array
  7. | Create of int array
  8. | Sequential of int array
  9. | Uniform of int array
  10. | Gaussian of int array
  11. | Bernoulli of int array
  12. | Init of int array * int -> elt
  13. | Get of int array
  14. | Set of int array
  15. | GetSlice of int list list
  16. | SetSlice of int list list
  17. | GetFancy of Owl_types_common.index list
  18. | SetFancy of Owl_types_common.index list
  19. | Copy
  20. | Reset
  21. | Reshape of int array
  22. | Reverse
  23. | Tile of int array
  24. | Repeat of int array
  25. | Pad of elt * int list list
  26. | Concatenate of int
  27. | Stack of int
  28. | Split of int * int array
  29. | Draw of int * int
  30. | Map of elt -> elt
  31. | Fold of int * elt -> elt -> elt
  32. | Scan of int * elt -> elt -> elt
  33. | OneHot of int
  34. | OfArray of int array
  35. | Delay of Device.A.arr -> Device.A.arr
  36. | DelayArray of int array * Device.A.arr array -> Device.A.arr
  37. | LazyPrint of int option - * int option - * bool option - * (Device.A.elt -> string) option
  38. | Abs
  39. | Neg
  40. | Floor
  41. | Ceil
  42. | Round
  43. | Sqr
  44. | Sqrt
  45. | Log
  46. | Log2
  47. | Log10
  48. | Exp
  49. | Sin
  50. | Cos
  51. | Tan
  52. | Sinh
  53. | Cosh
  54. | Tanh
  55. | Asin
  56. | Acos
  57. | Atan
  58. | Asinh
  59. | Acosh
  60. | Atanh
  61. | Min of bool * int
  62. | Max of bool * int
  63. | Sum of bool * int
  64. | SumReduce of int array
  65. | Signum
  66. | Sigmoid
  67. | Relu
  68. | Dawsn
  69. | Min'
  70. | Max'
  71. | Sum'
  72. | LogSumExp'
  73. | LogSumExp of bool * int
  74. | L1norm'
  75. | L2norm'
  76. | L2NormSqr'
  77. | ClipByValue
  78. | ClipByL2norm
  79. | Pow
  80. | ScalarPow
  81. | PowScalar
  82. | Atan2
  83. | ScalarAtan2
  84. | Atan2Scalar
  85. | Hypot
  86. | Min2
  87. | Max2
  88. | Add
  89. | Sub
  90. | Mul
  91. | Div
  92. | AddScalar
  93. | SubScalar
  94. | MulScalar
  95. | DivScalar
  96. | ScalarAdd
  97. | ScalarSub
  98. | ScalarMul
  99. | ScalarDiv
  100. | FMA
  101. | EltEqual
  102. | EltNotEqual
  103. | EltLess
  104. | EltGreater
  105. | EltLessEqual
  106. | EltGreaterEqual
  107. | EltEqualScalar
  108. | EltNotEqualScalar
  109. | EltLessScalar
  110. | EltGreaterScalar
  111. | EltLessEqualScalar
  112. | EltGreaterEqualScalar
  113. | Conv1d of Owl_types_common.padding * int array
  114. | Conv2d of Owl_types_common.padding * int array
  115. | Conv3d of Owl_types_common.padding * int array
  116. | TransposeConv1d of Owl_types_common.padding * int array
  117. | TransposeConv2d of Owl_types_common.padding * int array
  118. | TransposeConv3d of Owl_types_common.padding * int array
  119. | DilatedConv1d of Owl_types_common.padding * int array * int array
  120. | DilatedConv2d of Owl_types_common.padding * int array * int array
  121. | DilatedConv3d of Owl_types_common.padding * int array * int array
  122. | MaxPool1d of Owl_types_common.padding * int array * int array
  123. | MaxPool2d of Owl_types_common.padding * int array * int array
  124. | MaxPool3d of Owl_types_common.padding * int array * int array
  125. | AvgPool1d of Owl_types_common.padding * int array * int array
  126. | AvgPool2d of Owl_types_common.padding * int array * int array
  127. | AvgPool3d of Owl_types_common.padding * int array * int array
  128. | UpSampling2d of int array
  129. | Conv1dBackwardInput of int array
  130. | Conv1dBackwardKernel of int array
  131. | Conv2dBackwardInput of int array
  132. | Conv2dBackwardKernel of int array
  133. | Conv3dBackwardInput of int array
  134. | Conv3dBackwardKernel of int array
  135. | TransposeConv1dBackwardInput of int array
  136. | TransposeConv1dBackwardKernel of int array
  137. | TransposeConv2dBackwardInput of int array
  138. | TransposeConv2dBackwardKernel of int array
  139. | TransposeConv3dBackwardInput of int array
  140. | TransposeConv3dBackwardKernel of int array
  141. | DilatedConv1dBackwardInput of int array * int array
  142. | DilatedConv1dBackwardKernel of int array * int array
  143. | DilatedConv2dBackwardInput of int array * int array
  144. | DilatedConv2dBackwardKernel of int array * int array
  145. | DilatedConv3dBackwardInput of int array * int array
  146. | DilatedConv3dBackwardKernel of int array * int array
  147. | MaxPool1dBackward of Owl_types_common.padding * int array * int array
  148. | MaxPool2dBackward of Owl_types_common.padding * int array * int array
  149. | MaxPool3dBackward of Owl_types_common.padding * int array * int array
  150. | AvgPool1dBackward of Owl_types_common.padding * int array * int array
  151. | AvgPool2dBackward of Owl_types_common.padding * int array * int array
  152. | AvgPool3dBackward of Owl_types_common.padding * int array * int array
  153. | UpSampling2dBackward of int array
  154. | RowNum
  155. | ColNum
  156. | Row
  157. | Rows of int array
  158. | CopyRowTo
  159. | CopyColTo
  160. | Dot of bool * bool * elt * elt
  161. | Inv
  162. | Trace
  163. | Transpose of int array
  164. | ToRows
  165. | OfRows
  166. | Scalar_Add
  167. | Scalar_Sub
  168. | Scalar_Mul
  169. | Scalar_Div
  170. | Scalar_Pow
  171. | Scalar_Atan2
  172. | Scalar_Abs
  173. | Scalar_Neg
  174. | Scalar_Sqr
  175. | Scalar_Sqrt
  176. | Scalar_Exp
  177. | Scalar_Log
  178. | Scalar_Log2
  179. | Scalar_Log10
  180. | Scalar_Signum
  181. | Scalar_Floor
  182. | Scalar_Ceil
  183. | Scalar_Round
  184. | Scalar_Sin
  185. | Scalar_Cos
  186. | Scalar_Tan
  187. | Scalar_Sinh
  188. | Scalar_Cosh
  189. | Scalar_Tanh
  190. | Scalar_Asin
  191. | Scalar_Acos
  192. | Scalar_Atan
  193. | Scalar_Asinh
  194. | Scalar_Acosh
  195. | Scalar_Atanh
  196. | Scalar_Relu
  197. | Scalar_Dawsn
  198. | Scalar_Sigmoid
  199. | Fused_Adagrad of float * float
val make_device : unit -> device
val arr_to_value : A.arr -> value
val value_to_arr : value -> A.arr
val elt_to_value : A.elt -> value
val value_to_elt : value -> A.elt
val value_to_float : value -> float
val is_arr : value -> bool
val is_elt : value -> bool
\ No newline at end of file diff --git a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Activation/index.html b/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Activation/index.html deleted file mode 100644 index ceeed5247..000000000 --- a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Activation/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Activation (owl-base.Owl_neural_compiler.Make.Neural.Graph.Neuron.Activation)

Module Neuron.Activation

type typ = Owl_neural_generic.Make_Embedded(Engine).Neuron.Activation.typ =
  1. | Elu
  2. | Relu
  3. | Sigmoid
  4. | HardSigmoid
  5. | Softmax of int
  6. | Softplus
  7. | Softsign
  8. | Tanh
  9. | Relu6
  10. | LeakyRelu of float
  11. | TRelu of float
  12. | Custom of Optimise.Algodiff.t -> Optimise.Algodiff.t
  13. | None
type neuron_typ = - Owl_neural_generic.Make_Embedded(Engine).Neuron.Activation.neuron_typ = - {
  1. mutable activation : typ;
  2. mutable in_shape : int array;
  3. mutable out_shape : int array;
}
val create : typ -> neuron_typ
val connect : int array -> neuron_typ -> unit
val run_activation : Optimise.Algodiff.t -> typ -> Optimise.Algodiff.t
val copy : neuron_typ -> neuron_typ
val activation_to_string : typ -> string
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Add/index.html b/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Add/index.html deleted file mode 100644 index b290ba30d..000000000 --- a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Add/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Add (owl-base.Owl_neural_compiler.Make.Neural.Graph.Neuron.Add)

Module Neuron.Add

type neuron_typ = - Owl_neural_generic.Make_Embedded(Engine).Neuron.Add.neuron_typ = - {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
}
val create : unit -> neuron_typ
val connect : int array array -> neuron_typ -> unit
val copy : 'a -> neuron_typ
val run : Optimise.Algodiff.t array -> 'a -> Optimise.Algodiff.t
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/AlphaDropout/index.html b/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/AlphaDropout/index.html deleted file mode 100644 index f84573020..000000000 --- a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/AlphaDropout/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -AlphaDropout (owl-base.Owl_neural_compiler.Make.Neural.Graph.Neuron.AlphaDropout)

Module Neuron.AlphaDropout

type neuron_typ = - Owl_neural_generic.Make_Embedded(Engine).Neuron.AlphaDropout.neuron_typ = - {
  1. mutable rate : float;
  2. mutable in_shape : int array;
  3. mutable out_shape : int array;
}
val create : float -> neuron_typ
val connect : int array -> neuron_typ -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Average/index.html b/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Average/index.html deleted file mode 100644 index b75e5903f..000000000 --- a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Average/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Average (owl-base.Owl_neural_compiler.Make.Neural.Graph.Neuron.Average)

Module Neuron.Average

type neuron_typ = - Owl_neural_generic.Make_Embedded(Engine).Neuron.Average.neuron_typ = - {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
}
val create : unit -> neuron_typ
val connect : int array array -> neuron_typ -> unit
val copy : 'a -> neuron_typ
val run : Optimise.Algodiff.t array -> 'a -> Optimise.Algodiff.t
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/AvgPool1D/index.html b/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/AvgPool1D/index.html deleted file mode 100644 index a1c4f7701..000000000 --- a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/AvgPool1D/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -AvgPool1D (owl-base.Owl_neural_compiler.Make.Neural.Graph.Neuron.AvgPool1D)

Module Neuron.AvgPool1D

type neuron_typ = - Owl_neural_generic.Make_Embedded(Engine).Neuron.AvgPool1D.neuron_typ = - {
  1. mutable padding : Owl_types.padding;
  2. mutable kernel : int array;
  3. mutable stride : int array;
  4. mutable in_shape : int array;
  5. mutable out_shape : int array;
}
val create : Owl_types.padding -> int array -> int array -> neuron_typ
val connect : int array -> neuron_typ -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/AvgPool2D/index.html b/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/AvgPool2D/index.html deleted file mode 100644 index 904eaf03b..000000000 --- a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/AvgPool2D/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -AvgPool2D (owl-base.Owl_neural_compiler.Make.Neural.Graph.Neuron.AvgPool2D)

Module Neuron.AvgPool2D

type neuron_typ = - Owl_neural_generic.Make_Embedded(Engine).Neuron.AvgPool2D.neuron_typ = - {
  1. mutable padding : Owl_types.padding;
  2. mutable kernel : int array;
  3. mutable stride : int array;
  4. mutable in_shape : int array;
  5. mutable out_shape : int array;
}
val create : Owl_types.padding -> int array -> int array -> neuron_typ
val connect : int array -> neuron_typ -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Concatenate/index.html b/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Concatenate/index.html deleted file mode 100644 index 32aff4f28..000000000 --- a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Concatenate/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Concatenate (owl-base.Owl_neural_compiler.Make.Neural.Graph.Neuron.Concatenate)

Module Neuron.Concatenate

type neuron_typ = - Owl_neural_generic.Make_Embedded(Engine).Neuron.Concatenate.neuron_typ = - {
  1. mutable axis : int;
  2. mutable in_shape : int array;
  3. mutable out_shape : int array;
}
val create : int -> neuron_typ
val connect : int array array -> neuron_typ -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Conv1D/index.html b/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Conv1D/index.html deleted file mode 100644 index 41e56abac..000000000 --- a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Conv1D/index.html +++ /dev/null @@ -1,10 +0,0 @@ - -Conv1D (owl-base.Owl_neural_compiler.Make.Neural.Graph.Neuron.Conv1D)

Module Neuron.Conv1D

type neuron_typ = - Owl_neural_generic.Make_Embedded(Engine).Neuron.Conv1D.neuron_typ = - {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable b : Optimise.Algodiff.t;
  3. mutable kernel : int array;
  4. mutable stride : int array;
  5. mutable padding : Owl_types.padding;
  6. mutable init_typ : Init.typ;
  7. mutable in_shape : int array;
  8. mutable out_shape : int array;
}
val create : - ?inputs:int array -> - Owl_types.padding -> - int array -> - int array -> - Init.typ -> - neuron_typ
val connect : int array -> neuron_typ -> unit
val init : neuron_typ -> unit
val reset : neuron_typ -> unit
val mktag : int -> neuron_typ -> unit
val mkpar : neuron_typ -> Optimise.Algodiff.t array
val mkpri : neuron_typ -> Optimise.Algodiff.t array
val mkadj : neuron_typ -> Optimise.Algodiff.t array
val update : neuron_typ -> Optimise.Algodiff.t array -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Conv2D/index.html b/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Conv2D/index.html deleted file mode 100644 index d34466fb8..000000000 --- a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Conv2D/index.html +++ /dev/null @@ -1,10 +0,0 @@ - -Conv2D (owl-base.Owl_neural_compiler.Make.Neural.Graph.Neuron.Conv2D)

Module Neuron.Conv2D

type neuron_typ = - Owl_neural_generic.Make_Embedded(Engine).Neuron.Conv2D.neuron_typ = - {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable b : Optimise.Algodiff.t;
  3. mutable kernel : int array;
  4. mutable stride : int array;
  5. mutable padding : Owl_types.padding;
  6. mutable init_typ : Init.typ;
  7. mutable in_shape : int array;
  8. mutable out_shape : int array;
}
val create : - ?inputs:int array -> - Owl_types.padding -> - int array -> - int array -> - Init.typ -> - neuron_typ
val connect : int array -> neuron_typ -> unit
val init : neuron_typ -> unit
val reset : neuron_typ -> unit
val mktag : int -> neuron_typ -> unit
val mkpar : neuron_typ -> Optimise.Algodiff.t array
val mkpri : neuron_typ -> Optimise.Algodiff.t array
val mkadj : neuron_typ -> Optimise.Algodiff.t array
val update : neuron_typ -> Optimise.Algodiff.t array -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Conv3D/index.html b/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Conv3D/index.html deleted file mode 100644 index 916606827..000000000 --- a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Conv3D/index.html +++ /dev/null @@ -1,10 +0,0 @@ - -Conv3D (owl-base.Owl_neural_compiler.Make.Neural.Graph.Neuron.Conv3D)

Module Neuron.Conv3D

type neuron_typ = - Owl_neural_generic.Make_Embedded(Engine).Neuron.Conv3D.neuron_typ = - {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable b : Optimise.Algodiff.t;
  3. mutable kernel : int array;
  4. mutable stride : int array;
  5. mutable padding : Owl_types.padding;
  6. mutable init_typ : Init.typ;
  7. mutable in_shape : int array;
  8. mutable out_shape : int array;
}
val create : - ?inputs:int array -> - Owl_types.padding -> - int array -> - int array -> - Init.typ -> - neuron_typ
val connect : int array -> neuron_typ -> unit
val init : neuron_typ -> unit
val reset : neuron_typ -> unit
val mktag : int -> neuron_typ -> unit
val mkpar : neuron_typ -> Optimise.Algodiff.t array
val mkpri : neuron_typ -> Optimise.Algodiff.t array
val mkadj : neuron_typ -> Optimise.Algodiff.t array
val update : neuron_typ -> Optimise.Algodiff.t array -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/DilatedConv1D/index.html b/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/DilatedConv1D/index.html deleted file mode 100644 index a801c3234..000000000 --- a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/DilatedConv1D/index.html +++ /dev/null @@ -1,11 +0,0 @@ - -DilatedConv1D (owl-base.Owl_neural_compiler.Make.Neural.Graph.Neuron.DilatedConv1D)

Module Neuron.DilatedConv1D

type neuron_typ = - Owl_neural_generic.Make_Embedded(Engine).Neuron.DilatedConv1D.neuron_typ = - {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable b : Optimise.Algodiff.t;
  3. mutable kernel : int array;
  4. mutable stride : int array;
  5. mutable rate : int array;
  6. mutable padding : Owl_types.padding;
  7. mutable init_typ : Init.typ;
  8. mutable in_shape : int array;
  9. mutable out_shape : int array;
}
val create : - ?inputs:int array -> - Owl_types.padding -> - int array -> - int array -> - int array -> - Init.typ -> - neuron_typ
val connect : int array -> neuron_typ -> unit
val init : neuron_typ -> unit
val reset : neuron_typ -> unit
val mktag : int -> neuron_typ -> unit
val mkpar : neuron_typ -> Optimise.Algodiff.t array
val mkpri : neuron_typ -> Optimise.Algodiff.t array
val mkadj : neuron_typ -> Optimise.Algodiff.t array
val update : neuron_typ -> Optimise.Algodiff.t array -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/DilatedConv2D/index.html b/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/DilatedConv2D/index.html deleted file mode 100644 index 4f2996a30..000000000 --- a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/DilatedConv2D/index.html +++ /dev/null @@ -1,11 +0,0 @@ - -DilatedConv2D (owl-base.Owl_neural_compiler.Make.Neural.Graph.Neuron.DilatedConv2D)

Module Neuron.DilatedConv2D

type neuron_typ = - Owl_neural_generic.Make_Embedded(Engine).Neuron.DilatedConv2D.neuron_typ = - {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable b : Optimise.Algodiff.t;
  3. mutable kernel : int array;
  4. mutable stride : int array;
  5. mutable rate : int array;
  6. mutable padding : Owl_types.padding;
  7. mutable init_typ : Init.typ;
  8. mutable in_shape : int array;
  9. mutable out_shape : int array;
}
val create : - ?inputs:int array -> - Owl_types.padding -> - int array -> - int array -> - int array -> - Init.typ -> - neuron_typ
val connect : int array -> neuron_typ -> unit
val init : neuron_typ -> unit
val reset : neuron_typ -> unit
val mktag : int -> neuron_typ -> unit
val mkpar : neuron_typ -> Optimise.Algodiff.t array
val mkpri : neuron_typ -> Optimise.Algodiff.t array
val mkadj : neuron_typ -> Optimise.Algodiff.t array
val update : neuron_typ -> Optimise.Algodiff.t array -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/DilatedConv3D/index.html b/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/DilatedConv3D/index.html deleted file mode 100644 index ef4d28772..000000000 --- a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/DilatedConv3D/index.html +++ /dev/null @@ -1,11 +0,0 @@ - -DilatedConv3D (owl-base.Owl_neural_compiler.Make.Neural.Graph.Neuron.DilatedConv3D)

Module Neuron.DilatedConv3D

type neuron_typ = - Owl_neural_generic.Make_Embedded(Engine).Neuron.DilatedConv3D.neuron_typ = - {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable b : Optimise.Algodiff.t;
  3. mutable kernel : int array;
  4. mutable stride : int array;
  5. mutable rate : int array;
  6. mutable padding : Owl_types.padding;
  7. mutable init_typ : Init.typ;
  8. mutable in_shape : int array;
  9. mutable out_shape : int array;
}
val create : - ?inputs:int array -> - Owl_types.padding -> - int array -> - int array -> - int array -> - Init.typ -> - neuron_typ
val connect : int array -> neuron_typ -> unit
val init : neuron_typ -> unit
val reset : neuron_typ -> unit
val mktag : int -> neuron_typ -> unit
val mkpar : neuron_typ -> Optimise.Algodiff.t array
val mkpri : neuron_typ -> Optimise.Algodiff.t array
val mkadj : neuron_typ -> Optimise.Algodiff.t array
val update : neuron_typ -> Optimise.Algodiff.t array -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Dot/index.html b/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Dot/index.html deleted file mode 100644 index 65581c135..000000000 --- a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Dot/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Dot (owl-base.Owl_neural_compiler.Make.Neural.Graph.Neuron.Dot)

Module Neuron.Dot

type neuron_typ = - Owl_neural_generic.Make_Embedded(Engine).Neuron.Dot.neuron_typ = - {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
}
val create : unit -> neuron_typ
val connect : int array array -> neuron_typ -> unit
val copy : 'a -> neuron_typ
val run : Optimise.Algodiff.t array -> 'a -> Optimise.Algodiff.t
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Dropout/index.html b/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Dropout/index.html deleted file mode 100644 index feaed628c..000000000 --- a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Dropout/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Dropout (owl-base.Owl_neural_compiler.Make.Neural.Graph.Neuron.Dropout)

Module Neuron.Dropout

type neuron_typ = - Owl_neural_generic.Make_Embedded(Engine).Neuron.Dropout.neuron_typ = - {
  1. mutable rate : float;
  2. mutable in_shape : int array;
  3. mutable out_shape : int array;
}
val create : float -> neuron_typ
val connect : int array -> neuron_typ -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Embedding/index.html b/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Embedding/index.html deleted file mode 100644 index fa2947152..000000000 --- a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Embedding/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Embedding (owl-base.Owl_neural_compiler.Make.Neural.Graph.Neuron.Embedding)

Module Neuron.Embedding

type neuron_typ = - Owl_neural_generic.Make_Embedded(Engine).Neuron.Embedding.neuron_typ = - {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable init_typ : Init.typ;
  3. mutable in_dim : int;
  4. mutable in_shape : int array;
  5. mutable out_shape : int array;
}
val create : ?inputs:int -> int -> int -> Init.typ -> neuron_typ
val connect : int array -> neuron_typ -> unit
val init : neuron_typ -> unit
val reset : neuron_typ -> unit
val mktag : int -> neuron_typ -> unit
val mkpar : neuron_typ -> Optimise.Algodiff.t array
val mkpri : neuron_typ -> Optimise.Algodiff.t array
val mkadj : neuron_typ -> Optimise.Algodiff.t array
val update : neuron_typ -> Optimise.Algodiff.t array -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Flatten/index.html b/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Flatten/index.html deleted file mode 100644 index 60a88ec55..000000000 --- a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Flatten/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Flatten (owl-base.Owl_neural_compiler.Make.Neural.Graph.Neuron.Flatten)

Module Neuron.Flatten

type neuron_typ = - Owl_neural_generic.Make_Embedded(Engine).Neuron.Flatten.neuron_typ = - {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
}
val create : unit -> neuron_typ
val connect : int array -> neuron_typ -> unit
val copy : 'a -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/FullyConnected/index.html b/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/FullyConnected/index.html deleted file mode 100644 index e63c05403..000000000 --- a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/FullyConnected/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -FullyConnected (owl-base.Owl_neural_compiler.Make.Neural.Graph.Neuron.FullyConnected)

Module Neuron.FullyConnected

type neuron_typ = - Owl_neural_generic.Make_Embedded(Engine).Neuron.FullyConnected.neuron_typ = - {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable b : Optimise.Algodiff.t;
  3. mutable init_typ : Init.typ;
  4. mutable in_shape : int array;
  5. mutable out_shape : int array;
}
val create : ?inputs:int -> int -> Init.typ -> neuron_typ
val connect : int array -> neuron_typ -> unit
val init : neuron_typ -> unit
val reset : neuron_typ -> unit
val mktag : int -> neuron_typ -> unit
val mkpar : neuron_typ -> Optimise.Algodiff.t array
val mkpri : neuron_typ -> Optimise.Algodiff.t array
val mkadj : neuron_typ -> Optimise.Algodiff.t array
val update : neuron_typ -> Optimise.Algodiff.t array -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/GRU/index.html b/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/GRU/index.html deleted file mode 100644 index b93212a9a..000000000 --- a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/GRU/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -GRU (owl-base.Owl_neural_compiler.Make.Neural.Graph.Neuron.GRU)

Module Neuron.GRU

type neuron_typ = - Owl_neural_generic.Make_Embedded(Engine).Neuron.GRU.neuron_typ = - {
  1. mutable wxz : Optimise.Algodiff.t;
  2. mutable whz : Optimise.Algodiff.t;
  3. mutable wxr : Optimise.Algodiff.t;
  4. mutable whr : Optimise.Algodiff.t;
  5. mutable wxh : Optimise.Algodiff.t;
  6. mutable whh : Optimise.Algodiff.t;
  7. mutable bz : Optimise.Algodiff.t;
  8. mutable br : Optimise.Algodiff.t;
  9. mutable bh : Optimise.Algodiff.t;
  10. mutable h : Optimise.Algodiff.t;
  11. mutable init_typ : Init.typ;
  12. mutable in_shape : int array;
  13. mutable out_shape : int array;
}
val create : ?time_steps:int -> ?inputs:int -> int -> Init.typ -> neuron_typ
val connect : int array -> neuron_typ -> unit
val init : neuron_typ -> unit
val reset : neuron_typ -> unit
val mktag : int -> neuron_typ -> unit
val mkpar : neuron_typ -> Optimise.Algodiff.t array
val mkpri : neuron_typ -> Optimise.Algodiff.t array
val mkadj : neuron_typ -> Optimise.Algodiff.t array
val update : neuron_typ -> Optimise.Algodiff.t array -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/GaussianDropout/index.html b/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/GaussianDropout/index.html deleted file mode 100644 index d34b556f4..000000000 --- a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/GaussianDropout/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -GaussianDropout (owl-base.Owl_neural_compiler.Make.Neural.Graph.Neuron.GaussianDropout)

Module Neuron.GaussianDropout

type neuron_typ = - Owl_neural_generic.Make_Embedded(Engine).Neuron.GaussianDropout.neuron_typ = - {
  1. mutable rate : float;
  2. mutable in_shape : int array;
  3. mutable out_shape : int array;
}
val create : float -> neuron_typ
val connect : int array -> neuron_typ -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/GaussianNoise/index.html b/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/GaussianNoise/index.html deleted file mode 100644 index 35576b629..000000000 --- a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/GaussianNoise/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -GaussianNoise (owl-base.Owl_neural_compiler.Make.Neural.Graph.Neuron.GaussianNoise)

Module Neuron.GaussianNoise

type neuron_typ = - Owl_neural_generic.Make_Embedded(Engine).Neuron.GaussianNoise.neuron_typ = - {
  1. mutable sigma : float;
  2. mutable in_shape : int array;
  3. mutable out_shape : int array;
}
val create : float -> neuron_typ
val connect : int array -> neuron_typ -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/GlobalAvgPool1D/index.html b/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/GlobalAvgPool1D/index.html deleted file mode 100644 index 61903c7f0..000000000 --- a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/GlobalAvgPool1D/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -GlobalAvgPool1D (owl-base.Owl_neural_compiler.Make.Neural.Graph.Neuron.GlobalAvgPool1D)

Module Neuron.GlobalAvgPool1D

type neuron_typ = - Owl_neural_generic.Make_Embedded(Engine).Neuron.GlobalAvgPool1D.neuron_typ = - {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
}
val create : unit -> neuron_typ
val connect : int array -> neuron_typ -> unit
val copy : 'a -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/GlobalAvgPool2D/index.html b/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/GlobalAvgPool2D/index.html deleted file mode 100644 index 928f43c56..000000000 --- a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/GlobalAvgPool2D/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -GlobalAvgPool2D (owl-base.Owl_neural_compiler.Make.Neural.Graph.Neuron.GlobalAvgPool2D)

Module Neuron.GlobalAvgPool2D

type neuron_typ = - Owl_neural_generic.Make_Embedded(Engine).Neuron.GlobalAvgPool2D.neuron_typ = - {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
}
val create : unit -> neuron_typ
val connect : int array -> neuron_typ -> unit
val copy : 'a -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/GlobalMaxPool1D/index.html b/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/GlobalMaxPool1D/index.html deleted file mode 100644 index 8b70ad930..000000000 --- a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/GlobalMaxPool1D/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -GlobalMaxPool1D (owl-base.Owl_neural_compiler.Make.Neural.Graph.Neuron.GlobalMaxPool1D)

Module Neuron.GlobalMaxPool1D

type neuron_typ = - Owl_neural_generic.Make_Embedded(Engine).Neuron.GlobalMaxPool1D.neuron_typ = - {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
}
val create : unit -> neuron_typ
val connect : int array -> neuron_typ -> unit
val copy : 'a -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/GlobalMaxPool2D/index.html b/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/GlobalMaxPool2D/index.html deleted file mode 100644 index 6f41ebc8a..000000000 --- a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/GlobalMaxPool2D/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -GlobalMaxPool2D (owl-base.Owl_neural_compiler.Make.Neural.Graph.Neuron.GlobalMaxPool2D)

Module Neuron.GlobalMaxPool2D

type neuron_typ = - Owl_neural_generic.Make_Embedded(Engine).Neuron.GlobalMaxPool2D.neuron_typ = - {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
}
val create : unit -> neuron_typ
val connect : int array -> neuron_typ -> unit
val copy : 'a -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Init/index.html b/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Init/index.html deleted file mode 100644 index a89f7b6fc..000000000 --- a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Init/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Init (owl-base.Owl_neural_compiler.Make.Neural.Graph.Neuron.Init)

Module Neuron.Init

type typ = Owl_neural_generic.Make_Embedded(Engine).Neuron.Init.typ =
  1. | Uniform of float * float
  2. | Gaussian of float * float
  3. | Standard
  4. | Tanh
  5. | GlorotNormal
  6. | GlorotUniform
  7. | LecunNormal
  8. | HeNormal
  9. | Custom of int array -> Optimise.Algodiff.t
val calc_fans : int array -> float * float
val run : typ -> int array -> Optimise.Algodiff.t -> Optimise.Algodiff.t
val to_string : typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Input/index.html b/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Input/index.html deleted file mode 100644 index 16be452f9..000000000 --- a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Input/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Input (owl-base.Owl_neural_compiler.Make.Neural.Graph.Neuron.Input)

Module Neuron.Input

type neuron_typ = - Owl_neural_generic.Make_Embedded(Engine).Neuron.Input.neuron_typ = - {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
}
val create : int array -> neuron_typ
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/LSTM/index.html b/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/LSTM/index.html deleted file mode 100644 index 5bd3ac21b..000000000 --- a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/LSTM/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -LSTM (owl-base.Owl_neural_compiler.Make.Neural.Graph.Neuron.LSTM)

Module Neuron.LSTM

type neuron_typ = - Owl_neural_generic.Make_Embedded(Engine).Neuron.LSTM.neuron_typ = - {
  1. mutable wxi : Optimise.Algodiff.t;
  2. mutable whi : Optimise.Algodiff.t;
  3. mutable wxc : Optimise.Algodiff.t;
  4. mutable whc : Optimise.Algodiff.t;
  5. mutable wxf : Optimise.Algodiff.t;
  6. mutable whf : Optimise.Algodiff.t;
  7. mutable wxo : Optimise.Algodiff.t;
  8. mutable who : Optimise.Algodiff.t;
  9. mutable bi : Optimise.Algodiff.t;
  10. mutable bc : Optimise.Algodiff.t;
  11. mutable bf : Optimise.Algodiff.t;
  12. mutable bo : Optimise.Algodiff.t;
  13. mutable c : Optimise.Algodiff.t;
  14. mutable h : Optimise.Algodiff.t;
  15. mutable init_typ : Init.typ;
  16. mutable in_shape : int array;
  17. mutable out_shape : int array;
}
val create : ?time_steps:int -> ?inputs:int -> int -> Init.typ -> neuron_typ
val connect : int array -> neuron_typ -> unit
val init : neuron_typ -> unit
val reset : neuron_typ -> unit
val mktag : int -> neuron_typ -> unit
val mkpar : neuron_typ -> Optimise.Algodiff.t array
val mkpri : neuron_typ -> Optimise.Algodiff.t array
val mkadj : neuron_typ -> Optimise.Algodiff.t array
val update : neuron_typ -> Optimise.Algodiff.t array -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Lambda/index.html b/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Lambda/index.html deleted file mode 100644 index e214e6f92..000000000 --- a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Lambda/index.html +++ /dev/null @@ -1,7 +0,0 @@ - -Lambda (owl-base.Owl_neural_compiler.Make.Neural.Graph.Neuron.Lambda)

Module Neuron.Lambda

type neuron_typ = - Owl_neural_generic.Make_Embedded(Engine).Neuron.Lambda.neuron_typ = - {
  1. mutable lambda : Optimise.Algodiff.t -> Optimise.Algodiff.t;
  2. mutable in_shape : int array;
  3. mutable out_shape : int array;
}
val create : - ?out_shape:int array -> - (Optimise.Algodiff.t -> Optimise.Algodiff.t) -> - neuron_typ
val connect : int array -> neuron_typ -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/LambdaArray/index.html b/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/LambdaArray/index.html deleted file mode 100644 index e5b183b0b..000000000 --- a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/LambdaArray/index.html +++ /dev/null @@ -1,7 +0,0 @@ - -LambdaArray (owl-base.Owl_neural_compiler.Make.Neural.Graph.Neuron.LambdaArray)

Module Neuron.LambdaArray

type neuron_typ = - Owl_neural_generic.Make_Embedded(Engine).Neuron.LambdaArray.neuron_typ = - {
  1. mutable lambda : Optimise.Algodiff.t array -> Optimise.Algodiff.t;
  2. mutable in_shape : int array;
  3. mutable out_shape : int array;
}
val create : - int array -> - (Optimise.Algodiff.t array -> Optimise.Algodiff.t) -> - neuron_typ
val connect : int array array -> neuron_typ -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Linear/index.html b/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Linear/index.html deleted file mode 100644 index bc5a0f438..000000000 --- a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Linear/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Linear (owl-base.Owl_neural_compiler.Make.Neural.Graph.Neuron.Linear)

Module Neuron.Linear

type neuron_typ = - Owl_neural_generic.Make_Embedded(Engine).Neuron.Linear.neuron_typ = - {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable b : Optimise.Algodiff.t;
  3. mutable init_typ : Init.typ;
  4. mutable in_shape : int array;
  5. mutable out_shape : int array;
}
val create : ?inputs:int -> int -> Init.typ -> neuron_typ
val connect : int array -> neuron_typ -> unit
val init : neuron_typ -> unit
val reset : neuron_typ -> unit
val mktag : int -> neuron_typ -> unit
val mkpar : neuron_typ -> Optimise.Algodiff.t array
val mkpri : neuron_typ -> Optimise.Algodiff.t array
val mkadj : neuron_typ -> Optimise.Algodiff.t array
val update : neuron_typ -> Optimise.Algodiff.t array -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/LinearNoBias/index.html b/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/LinearNoBias/index.html deleted file mode 100644 index 0b2cbf7df..000000000 --- a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/LinearNoBias/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -LinearNoBias (owl-base.Owl_neural_compiler.Make.Neural.Graph.Neuron.LinearNoBias)

Module Neuron.LinearNoBias

type neuron_typ = - Owl_neural_generic.Make_Embedded(Engine).Neuron.LinearNoBias.neuron_typ = - {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable init_typ : Init.typ;
  3. mutable in_shape : int array;
  4. mutable out_shape : int array;
}
val create : ?inputs:int -> int -> Init.typ -> neuron_typ
val connect : int array -> neuron_typ -> unit
val init : neuron_typ -> unit
val reset : neuron_typ -> unit
val mktag : int -> neuron_typ -> unit
val mkpar : neuron_typ -> Optimise.Algodiff.t array
val mkpri : neuron_typ -> Optimise.Algodiff.t array
val mkadj : neuron_typ -> Optimise.Algodiff.t array
val update : neuron_typ -> Optimise.Algodiff.t array -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Masking/index.html b/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Masking/index.html deleted file mode 100644 index 436fd6537..000000000 --- a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Masking/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Masking (owl-base.Owl_neural_compiler.Make.Neural.Graph.Neuron.Masking)

Module Neuron.Masking

\ No newline at end of file diff --git a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Max/index.html b/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Max/index.html deleted file mode 100644 index a3ea8ad27..000000000 --- a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Max/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Max (owl-base.Owl_neural_compiler.Make.Neural.Graph.Neuron.Max)

Module Neuron.Max

type neuron_typ = - Owl_neural_generic.Make_Embedded(Engine).Neuron.Max.neuron_typ = - {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
}
val create : unit -> neuron_typ
val connect : int array array -> neuron_typ -> unit
val copy : 'a -> neuron_typ
val run : Optimise.Algodiff.t array -> 'a -> Optimise.Algodiff.t
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/MaxPool1D/index.html b/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/MaxPool1D/index.html deleted file mode 100644 index 86566a723..000000000 --- a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/MaxPool1D/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -MaxPool1D (owl-base.Owl_neural_compiler.Make.Neural.Graph.Neuron.MaxPool1D)

Module Neuron.MaxPool1D

type neuron_typ = - Owl_neural_generic.Make_Embedded(Engine).Neuron.MaxPool1D.neuron_typ = - {
  1. mutable padding : Owl_types.padding;
  2. mutable kernel : int array;
  3. mutable stride : int array;
  4. mutable in_shape : int array;
  5. mutable out_shape : int array;
}
val create : Owl_types.padding -> int array -> int array -> neuron_typ
val connect : int array -> neuron_typ -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/MaxPool2D/index.html b/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/MaxPool2D/index.html deleted file mode 100644 index a3903eba6..000000000 --- a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/MaxPool2D/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -MaxPool2D (owl-base.Owl_neural_compiler.Make.Neural.Graph.Neuron.MaxPool2D)

Module Neuron.MaxPool2D

type neuron_typ = - Owl_neural_generic.Make_Embedded(Engine).Neuron.MaxPool2D.neuron_typ = - {
  1. mutable padding : Owl_types.padding;
  2. mutable kernel : int array;
  3. mutable stride : int array;
  4. mutable in_shape : int array;
  5. mutable out_shape : int array;
}
val create : Owl_types.padding -> int array -> int array -> neuron_typ
val connect : int array -> neuron_typ -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Mul/index.html b/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Mul/index.html deleted file mode 100644 index fb14ebde0..000000000 --- a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Mul/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Mul (owl-base.Owl_neural_compiler.Make.Neural.Graph.Neuron.Mul)

Module Neuron.Mul

type neuron_typ = - Owl_neural_generic.Make_Embedded(Engine).Neuron.Mul.neuron_typ = - {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
}
val create : unit -> neuron_typ
val connect : int array array -> neuron_typ -> unit
val copy : 'a -> neuron_typ
val run : Optimise.Algodiff.t array -> 'a -> Optimise.Algodiff.t
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Normalisation/index.html b/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Normalisation/index.html deleted file mode 100644 index 271969984..000000000 --- a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Normalisation/index.html +++ /dev/null @@ -1,10 +0,0 @@ - -Normalisation (owl-base.Owl_neural_compiler.Make.Neural.Graph.Neuron.Normalisation)

Module Neuron.Normalisation

type neuron_typ = - Owl_neural_generic.Make_Embedded(Engine).Neuron.Normalisation.neuron_typ = - {
  1. mutable axis : int;
  2. mutable beta : Optimise.Algodiff.t;
  3. mutable gamma : Optimise.Algodiff.t;
  4. mutable mu : Optimise.Algodiff.t;
  5. mutable var : Optimise.Algodiff.t;
  6. mutable decay : Optimise.Algodiff.t;
  7. mutable training : bool;
  8. mutable in_shape : int array;
  9. mutable out_shape : int array;
}
val create : - ?training:bool -> - ?decay:float -> - ?mu:Optimise.Algodiff.A.arr -> - ?var:Optimise.Algodiff.A.arr -> - int -> - neuron_typ
val connect : int array -> neuron_typ -> unit
val init : neuron_typ -> unit
val reset : neuron_typ -> unit
val mktag : int -> neuron_typ -> unit
val mkpar : neuron_typ -> Optimise.Algodiff.t array
val mkpri : neuron_typ -> Optimise.Algodiff.t array
val mkadj : neuron_typ -> Optimise.Algodiff.t array
val update : neuron_typ -> Optimise.Algodiff.t array -> unit
val load_weights : neuron_typ -> Optimise.Algodiff.t array -> unit
val save_weights : neuron_typ -> Optimise.Algodiff.t array
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Optimise/Algodiff/A/Linalg/index.html b/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Optimise/Algodiff/A/Linalg/index.html deleted file mode 100644 index 063dc809f..000000000 --- a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Optimise/Algodiff/A/Linalg/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -Linalg (owl-base.Owl_neural_compiler.Make.Neural.Graph.Neuron.Optimise.Algodiff.A.Linalg)

Module A.Linalg

val inv : arr -> arr
val logdet : arr -> elt
val chol : ?upper:bool -> arr -> arr
val svd : ?thin:bool -> arr -> arr * arr * arr
val qr : arr -> arr * arr
val lq : arr -> arr * arr
val sylvester : arr -> arr -> arr -> arr
val lyapunov : arr -> arr -> arr
val discrete_lyapunov : - ?solver:[ `bilinear | `default | `direct ] -> - arr -> - arr -> - arr
val linsolve : ?trans:bool -> ?typ:[ `l | `n | `u ] -> arr -> arr -> arr
val care : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
val dare : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
\ No newline at end of file diff --git a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Optimise/Algodiff/A/Mat/index.html b/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Optimise/Algodiff/A/Mat/index.html deleted file mode 100644 index 43f8a9002..000000000 --- a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Optimise/Algodiff/A/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl-base.Owl_neural_compiler.Make.Neural.Graph.Neuron.Optimise.Algodiff.A.Mat)

Module A.Mat

val diagm : ?k:int -> arr -> arr
val triu : ?k:int -> arr -> arr
val tril : ?k:int -> arr -> arr
val eye : int -> arr
\ No newline at end of file diff --git a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Optimise/Algodiff/A/Scalar/index.html b/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Optimise/Algodiff/A/Scalar/index.html deleted file mode 100644 index f4267e690..000000000 --- a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Optimise/Algodiff/A/Scalar/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Scalar (owl-base.Owl_neural_compiler.Make.Neural.Graph.Neuron.Optimise.Algodiff.A.Scalar)

Module A.Scalar

val add : elt -> elt -> elt
val sub : elt -> elt -> elt
val mul : elt -> elt -> elt
val div : elt -> elt -> elt
val pow : elt -> elt -> elt
val atan2 : elt -> elt -> elt
val abs : elt -> elt
val neg : elt -> elt
val sqr : elt -> elt
val sqrt : elt -> elt
val exp : elt -> elt
val log : elt -> elt
val log2 : elt -> elt
val log10 : elt -> elt
val signum : elt -> elt
val floor : elt -> elt
val ceil : elt -> elt
val round : elt -> elt
val sin : elt -> elt
val cos : elt -> elt
val tan : elt -> elt
val sinh : elt -> elt
val cosh : elt -> elt
val tanh : elt -> elt
val asin : elt -> elt
val acos : elt -> elt
val atan : elt -> elt
val asinh : elt -> elt
val acosh : elt -> elt
val atanh : elt -> elt
val relu : elt -> elt
val dawsn : elt -> elt
val sigmoid : elt -> elt
\ No newline at end of file diff --git a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Optimise/Algodiff/A/index.html b/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Optimise/Algodiff/A/index.html deleted file mode 100644 index ec17f7289..000000000 --- a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Optimise/Algodiff/A/index.html +++ /dev/null @@ -1,160 +0,0 @@ - -A (owl-base.Owl_neural_compiler.Make.Neural.Graph.Neuron.Optimise.Algodiff.A)

Module Algodiff.A

val empty : int array -> arr
val zeros : int array -> arr
val ones : int array -> arr
val create : int array -> elt -> arr
val sequential : ?a:elt -> ?step:elt -> int array -> arr
val uniform : ?a:elt -> ?b:elt -> int array -> arr
val gaussian : ?mu:elt -> ?sigma:elt -> int array -> arr
val bernoulli : ?p:elt -> int array -> arr
val init : int array -> (int -> elt) -> arr
val init_nd : int array -> (int array -> elt) -> arr
val shape : arr -> int array
val numel : arr -> int
val get : arr -> int array -> elt
val set : arr -> int array -> elt -> unit
val get_slice : int list list -> arr -> arr
val set_slice : int list list -> arr -> arr -> unit
val get_fancy : Owl_types_common.index list -> arr -> arr
val set_fancy : Owl_types_common.index list -> arr -> arr -> unit
val copy : arr -> arr
val copy_ : out:arr -> arr -> unit
val reset : arr -> unit
val reshape : arr -> int array -> arr
val reverse : arr -> arr
val tile : arr -> int array -> arr
val repeat : arr -> int array -> arr
val concatenate : ?axis:int -> arr array -> arr
val stack : ?axis:int -> arr array -> arr
val split : ?axis:int -> int array -> arr -> arr array
val expand : ?hi:bool -> arr -> int -> arr
val squeeze : ?axis:int array -> arr -> arr
val draw : ?axis:int -> arr -> int -> arr * int array
val map : (elt -> elt) -> arr -> arr
val fold : ?axis:int -> (elt -> elt -> elt) -> elt -> arr -> arr
val scan : ?axis:int -> (elt -> elt -> elt) -> arr -> arr
val one_hot : int -> arr -> arr
val pad : ?v:elt -> int list list -> arr -> arr
val print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(elt -> string) -> - arr -> - unit
val abs : arr -> arr
val neg : arr -> arr
val floor : arr -> arr
val ceil : arr -> arr
val round : arr -> arr
val sqr : arr -> arr
val sqrt : arr -> arr
val log : arr -> arr
val log2 : arr -> arr
val log10 : arr -> arr
val exp : arr -> arr
val sin : arr -> arr
val cos : arr -> arr
val tan : arr -> arr
val sinh : arr -> arr
val cosh : arr -> arr
val tanh : arr -> arr
val asin : arr -> arr
val acos : arr -> arr
val atan : arr -> arr
val asinh : arr -> arr
val acosh : arr -> arr
val atanh : arr -> arr
val min : ?axis:int -> ?keep_dims:bool -> arr -> arr
val max : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum_reduce : ?axis:int array -> arr -> arr
val signum : arr -> arr
val sigmoid : arr -> arr
val relu : arr -> arr
val dawsn : arr -> arr
val min' : arr -> elt
val max' : arr -> elt
val sum' : arr -> elt
val log_sum_exp' : arr -> elt
val log_sum_exp : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm' : arr -> elt
val l2norm' : arr -> elt
val l2norm_sqr' : arr -> elt
val clip_by_value : ?amin:elt -> ?amax:elt -> arr -> arr
val clip_by_l2norm : elt -> arr -> arr
val pow : arr -> arr -> arr
val scalar_pow : elt -> arr -> arr
val pow_scalar : arr -> elt -> arr
val atan2 : arr -> arr -> arr
val scalar_atan2 : elt -> arr -> arr
val atan2_scalar : arr -> elt -> arr
val add : arr -> arr -> arr
val sub : arr -> arr -> arr
val mul : arr -> arr -> arr
val div : arr -> arr -> arr
val add_scalar : arr -> elt -> arr
val sub_scalar : arr -> elt -> arr
val mul_scalar : arr -> elt -> arr
val div_scalar : arr -> elt -> arr
val scalar_add : elt -> arr -> arr
val scalar_sub : elt -> arr -> arr
val scalar_mul : elt -> arr -> arr
val scalar_div : elt -> arr -> arr
val fma : arr -> arr -> arr -> arr
val conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val dilated_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val transpose_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val max_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val upsampling2d : arr -> int array -> arr
val conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val dilated_conv1d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv1d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val transpose_conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val max_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val upsampling2d_backward : arr -> int array -> arr -> arr
val row_num : arr -> int
val col_num : arr -> int
val row : arr -> int -> arr
val rows : arr -> int array -> arr
val copy_row_to : arr -> arr -> int -> unit
val copy_col_to : arr -> arr -> int -> unit
val dot : arr -> arr -> arr
val diag : ?k:int -> arr -> arr
val trace : arr -> elt
val transpose : ?axis:int array -> arr -> arr
val to_rows : arr -> arr array
val of_rows : arr array -> arr
val to_cols : arr -> arr array
val of_cols : arr array -> arr
val of_array : elt array -> int array -> arr
val of_arrays : elt array array -> arr
val float_to_elt : float -> elt
val elt_to_float : elt -> float
val elt_equal : arr -> arr -> arr
val elt_not_equal : arr -> arr -> arr
val elt_less : arr -> arr -> arr
val elt_greater : arr -> arr -> arr
val elt_less_equal : arr -> arr -> arr
val elt_greater_equal : arr -> arr -> arr
val elt_equal_scalar : arr -> elt -> arr
val elt_not_equal_scalar : arr -> elt -> arr
val elt_less_scalar : arr -> elt -> arr
val elt_greater_scalar : arr -> elt -> arr
val elt_less_equal_scalar : arr -> elt -> arr
val elt_greater_equal_scalar : arr -> elt -> arr
module Scalar : sig ... end
module Mat : sig ... end
module Linalg : sig ... end
\ No newline at end of file diff --git a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Optimise/Algodiff/Arr/index.html b/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Optimise/Algodiff/Arr/index.html deleted file mode 100644 index 7981d66ec..000000000 --- a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Optimise/Algodiff/Arr/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Arr (owl-base.Owl_neural_compiler.Make.Neural.Graph.Neuron.Optimise.Algodiff.Arr)

Module Algodiff.Arr

val empty : int array -> t
val zeros : int array -> t
val ones : int array -> t
val uniform : ?a:A.elt -> ?b:A.elt -> int array -> t
val gaussian : ?mu:A.elt -> ?sigma:A.elt -> int array -> t
val shape : t -> int array
val numel : t -> int
val reset : t -> unit
val reshape : t -> int array -> t
val add : t -> t -> t
val sub : t -> t -> t
val mul : t -> t -> t
val div : t -> t -> t
val dot : t -> t -> t
\ No newline at end of file diff --git a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Optimise/Algodiff/Builder/index.html b/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Optimise/Algodiff/Builder/index.html deleted file mode 100644 index 8763caa7b..000000000 --- a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Optimise/Algodiff/Builder/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Builder (owl-base.Owl_neural_compiler.Make.Neural.Graph.Neuron.Optimise.Algodiff.Builder)

Module Algodiff.Builder

module type Siso = sig ... end
val build_siso : (module Siso) -> t -> t
module type Sipo = sig ... end
val build_sipo : (module Sipo) -> t -> t * t
module type Sito = sig ... end
val build_sito : (module Sito) -> t -> t * t * t
module type Siao = sig ... end
val build_siao : (module Siao) -> t -> t array
module type Piso = sig ... end
val build_piso : (module Piso) -> t -> t -> t
module type Aiso = sig ... end
val build_aiso : (module Aiso) -> t array -> t
\ No newline at end of file diff --git a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Optimise/Algodiff/Builder/module-type-Aiso/index.html b/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Optimise/Algodiff/Builder/module-type-Aiso/index.html deleted file mode 100644 index 95ee6f144..000000000 --- a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Optimise/Algodiff/Builder/module-type-Aiso/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Aiso (owl-base.Owl_neural_compiler.Make.Neural.Graph.Neuron.Optimise.Algodiff.Builder.Aiso)

Module type Builder.Aiso

val label : string
val ff : t array -> t
val df : int list -> t -> t array -> t array -> t
val dr : int list -> t array -> t -> t Stdlib.ref -> t list
\ No newline at end of file diff --git a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Optimise/Algodiff/Builder/module-type-Piso/index.html b/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Optimise/Algodiff/Builder/module-type-Piso/index.html deleted file mode 100644 index 62cf018e2..000000000 --- a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Optimise/Algodiff/Builder/module-type-Piso/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Piso (owl-base.Owl_neural_compiler.Make.Neural.Graph.Neuron.Optimise.Algodiff.Builder.Piso)

Module type Builder.Piso

val label : string
val ff_aa : A.elt -> A.elt -> t
val ff_ab : A.elt -> A.arr -> t
val ff_ba : A.arr -> A.elt -> t
val ff_bb : A.arr -> A.arr -> t
val df_da : t -> t -> t -> t -> t
val df_db : t -> t -> t -> t -> t
val df_dab : t -> t -> t -> t -> t -> t
val dr_ab : t -> t -> t -> t Stdlib.ref -> t * t
val dr_a : t -> t -> t -> t Stdlib.ref -> t
val dr_b : t -> t -> t -> t Stdlib.ref -> t
\ No newline at end of file diff --git a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Optimise/Algodiff/Builder/module-type-Siao/index.html b/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Optimise/Algodiff/Builder/module-type-Siao/index.html deleted file mode 100644 index c110165be..000000000 --- a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Optimise/Algodiff/Builder/module-type-Siao/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Siao (owl-base.Owl_neural_compiler.Make.Neural.Graph.Neuron.Optimise.Algodiff.Builder.Siao)

Module type Builder.Siao

val label : string
val ff_f : A.elt -> t array
val ff_arr : A.arr -> t array
val df : t array -> t -> t -> t array
val dr : t -> t -> t Stdlib.ref array -> t Stdlib.ref array -> t
\ No newline at end of file diff --git a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Optimise/Algodiff/Builder/module-type-Sipo/index.html b/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Optimise/Algodiff/Builder/module-type-Sipo/index.html deleted file mode 100644 index 76eeb31d2..000000000 --- a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Optimise/Algodiff/Builder/module-type-Sipo/index.html +++ /dev/null @@ -1,7 +0,0 @@ - -Sipo (owl-base.Owl_neural_compiler.Make.Neural.Graph.Neuron.Optimise.Algodiff.Builder.Sipo)

Module type Builder.Sipo

val label : string
val ff_f : A.elt -> t * t
val ff_arr : A.arr -> t * t
val df : t -> t -> t -> t
val dr : - t -> - t -> - (t Stdlib.ref * t Stdlib.ref) -> - (t Stdlib.ref * t Stdlib.ref) -> - t
\ No newline at end of file diff --git a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Optimise/Algodiff/Builder/module-type-Siso/index.html b/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Optimise/Algodiff/Builder/module-type-Siso/index.html deleted file mode 100644 index 9e0c83ae4..000000000 --- a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Optimise/Algodiff/Builder/module-type-Siso/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Siso (owl-base.Owl_neural_compiler.Make.Neural.Graph.Neuron.Optimise.Algodiff.Builder.Siso)

Module type Builder.Siso

val label : string
val ff_f : A.elt -> t
val ff_arr : A.arr -> t
val df : t -> t -> t -> t
val dr : t -> t -> t Stdlib.ref -> t
\ No newline at end of file diff --git a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Optimise/Algodiff/Builder/module-type-Sito/index.html b/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Optimise/Algodiff/Builder/module-type-Sito/index.html deleted file mode 100644 index e2caec601..000000000 --- a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Optimise/Algodiff/Builder/module-type-Sito/index.html +++ /dev/null @@ -1,7 +0,0 @@ - -Sito (owl-base.Owl_neural_compiler.Make.Neural.Graph.Neuron.Optimise.Algodiff.Builder.Sito)

Module type Builder.Sito

val label : string
val ff_f : A.elt -> t * t * t
val ff_arr : A.arr -> t * t * t
val df : t -> t -> t -> t
val dr : - t -> - t -> - (t Stdlib.ref * t Stdlib.ref * t Stdlib.ref) -> - (t Stdlib.ref * t Stdlib.ref * t Stdlib.ref) -> - t
\ No newline at end of file diff --git a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Optimise/Algodiff/Linalg/index.html b/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Optimise/Algodiff/Linalg/index.html deleted file mode 100644 index e04a9acae..000000000 --- a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Optimise/Algodiff/Linalg/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -Linalg (owl-base.Owl_neural_compiler.Make.Neural.Graph.Neuron.Optimise.Algodiff.Linalg)

Module Algodiff.Linalg

val inv : t -> t
val logdet : t -> t
val chol : ?upper:bool -> t -> t
val qr : t -> t * t
val lq : t -> t * t
val svd : ?thin:bool -> t -> t * t * t
val sylvester : t -> t -> t -> t
val lyapunov : t -> t -> t
val discrete_lyapunov : - ?solver:[ `bilinear | `default | `direct ] -> - t -> - t -> - t
val (/@) : t -> t -> t
val linsolve : ?trans:bool -> ?typ:[ `l | `n | `u ] -> t -> t -> t
val care : ?diag_r:bool -> t -> t -> t -> t -> t
val dare : ?diag_r:bool -> t -> t -> t -> t -> t
\ No newline at end of file diff --git a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Optimise/Algodiff/Mat/index.html b/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Optimise/Algodiff/Mat/index.html deleted file mode 100644 index de2a96f6c..000000000 --- a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Optimise/Algodiff/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl-base.Owl_neural_compiler.Make.Neural.Graph.Neuron.Optimise.Algodiff.Mat)

Module Algodiff.Mat

val empty : int -> int -> t
val zeros : int -> int -> t
val eye : int -> t
val ones : int -> int -> t
val uniform : ?a:A.elt -> ?b:A.elt -> int -> int -> t
val gaussian : ?mu:A.elt -> ?sigma:A.elt -> int -> int -> t
val shape : t -> int * int
val numel : t -> int
val row_num : t -> int
val col_num : t -> int
val reset : t -> unit
val reshape : int -> int -> t -> t
val get : t -> int -> int -> t
val set : t -> int -> int -> t -> t
val row : t -> int -> t
val mean : t -> t
val add : t -> t -> t
val sub : t -> t -> t
val mul : t -> t -> t
val div : t -> t -> t
val dot : t -> t -> t
val map_by_row : (t -> t) -> t -> t
val of_arrays : A.elt array array -> t
val init_2d : int -> int -> (int -> int -> t) -> t
val print : t -> unit
\ No newline at end of file diff --git a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Optimise/Algodiff/Maths/index.html b/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Optimise/Algodiff/Maths/index.html deleted file mode 100644 index a06619a77..000000000 --- a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Optimise/Algodiff/Maths/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Maths (owl-base.Owl_neural_compiler.Make.Neural.Graph.Neuron.Optimise.Algodiff.Maths)

Module Algodiff.Maths

val (+) : t -> t -> t
val (-) : t -> t -> t
val (*) : t -> t -> t
val (/) : t -> t -> t
val (*@) : t -> t -> t
val (**) : t -> t -> t
val add : t -> t -> t
val sub : t -> t -> t
val mul : t -> t -> t
val div : t -> t -> t
val kron : t -> t -> t
val dot : t -> t -> t
val pow : t -> t -> t
val atan2 : t -> t -> t
val min2 : t -> t -> t
val max2 : t -> t -> t
val cross_entropy : t -> t -> t
val inv : t -> t
val neg : t -> t
val abs : t -> t
val signum : t -> t
val floor : t -> t
val ceil : t -> t
val round : t -> t
val sqr : t -> t
val sqrt : t -> t
val log : t -> t
val log2 : t -> t
val log10 : t -> t
val exp : t -> t
val sin : t -> t
val cos : t -> t
val tan : t -> t
val sinh : t -> t
val cosh : t -> t
val tanh : t -> t
val asin : t -> t
val acos : t -> t
val atan : t -> t
val asinh : t -> t
val acosh : t -> t
val atanh : t -> t
val sum' : t -> t
val log_sum_exp' : t -> t
val log_sum_exp : ?axis:int -> ?keep_dims:bool -> t -> t
val sum : ?axis:int -> ?keep_dims:bool -> t -> t
val sum_reduce : ?axis:int array -> t -> t
val mean : t -> t
val transpose : ?axis:int array -> t -> t
val swap : int -> int -> t -> t
val l1norm' : t -> t
val l2norm' : t -> t
val l2norm_sqr' : t -> t
val sigmoid : t -> t
val relu : t -> t
val dawsn : t -> t
val softplus : t -> t
val softsign : t -> t
val softmax : ?axis:int -> t -> t
val reshape : t -> int array -> t
val flatten : t -> t
val get_item : t -> int -> int -> t
val get_row : t -> int -> t
val concat : axis:int -> t -> t -> t
val split : axis:int -> int array -> t -> t array
val of_arrays : t array array -> t
val to_arrays : t -> t array array
val concatenate : axis:int -> t array -> t
val stack : axis:int -> t array -> t
val get_slice : int list list -> t -> t
val set_slice : int list list -> t -> t -> t
val get_fancy : Owl_types.index list -> t -> t
val set_fancy : Owl_types.index list -> t -> t -> t
val diag : ?k:int -> t -> t
val diagm : ?k:int -> t -> t
val trace : t -> t
val triu : ?k:int -> t -> t
val tril : ?k:int -> t -> t
\ No newline at end of file diff --git a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Optimise/Algodiff/NN/index.html b/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Optimise/Algodiff/NN/index.html deleted file mode 100644 index dafc2270d..000000000 --- a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Optimise/Algodiff/NN/index.html +++ /dev/null @@ -1,20 +0,0 @@ - -NN (owl-base.Owl_neural_compiler.Make.Neural.Graph.Neuron.Optimise.Algodiff.NN)

Module Algodiff.NN

val dropout : ?rate:float -> t -> t
val conv1d : ?padding:Owl_types.padding -> t -> t -> int array -> t
val conv2d : ?padding:Owl_types.padding -> t -> t -> int array -> t
val conv3d : ?padding:Owl_types.padding -> t -> t -> int array -> t
val dilated_conv1d : - ?padding:Owl_types.padding -> - t -> - t -> - int array -> - int array -> - t
val dilated_conv2d : - ?padding:Owl_types.padding -> - t -> - t -> - int array -> - int array -> - t
val dilated_conv3d : - ?padding:Owl_types.padding -> - t -> - t -> - int array -> - int array -> - t
val transpose_conv1d : ?padding:Owl_types.padding -> t -> t -> int array -> t
val transpose_conv2d : ?padding:Owl_types.padding -> t -> t -> int array -> t
val transpose_conv3d : ?padding:Owl_types.padding -> t -> t -> int array -> t
val max_pool1d : Owl_types.padding -> t -> int array -> int array -> t
val max_pool2d : Owl_types.padding -> t -> int array -> int array -> t
val max_pool3d : Owl_types.padding -> t -> int array -> int array -> t
val avg_pool1d : Owl_types.padding -> t -> int array -> int array -> t
val avg_pool2d : Owl_types.padding -> t -> int array -> int array -> t
val avg_pool3d : Owl_types.padding -> t -> int array -> int array -> t
val upsampling2d : t -> int array -> t
val pad : ?v:A.elt -> int list list -> t -> t
\ No newline at end of file diff --git a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Optimise/Algodiff/index.html b/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Optimise/Algodiff/index.html deleted file mode 100644 index 544f6ae82..000000000 --- a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Optimise/Algodiff/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Algodiff (owl-base.Owl_neural_compiler.Make.Neural.Graph.Neuron.Optimise.Algodiff)

Module Optimise.Algodiff

module A : sig ... end
type t = Owl_neural_generic.Make_Embedded(Engine).Neuron.Optimise.Algodiff.t =
  1. | F of A.elt
  2. | Arr of A.arr
  3. | DF of t * t * int
  4. | DR of t * t Stdlib.ref * op * int Stdlib.ref * int * int Stdlib.ref
and adjoint = t -> t Stdlib.ref -> (t * t) list -> (t * t) list
and register = t list -> t list
and label = string * t list
and op = adjoint * register * label
val tag : unit -> int
val primal : t -> t
val primal' : t -> t
val zero : t -> t
val reset_zero : t -> t
val tangent : t -> t
val adjref : t -> t Stdlib.ref
val adjval : t -> t
val shape : t -> int array
val is_float : t -> bool
val is_arr : t -> bool
val row_num : t -> int
val col_num : t -> int
val numel : t -> int
val clip_by_value : amin:A.elt -> amax:A.elt -> t -> t
val clip_by_l2norm : A.elt -> t -> t
val copy_primal' : t -> t
val tile : t -> int array -> t
val repeat : t -> int array -> t
val pack_elt : A.elt -> t
val unpack_elt : t -> A.elt
val pack_flt : float -> t
val _f : float -> t
val unpack_flt : t -> float
val pack_arr : A.arr -> t
val unpack_arr : t -> A.arr
val deep_info : t -> string
val type_info : t -> string
val error_binop : string -> t -> t -> 'a
val error_uniop : string -> t -> 'a
val make_forward : t -> t -> int -> t
val make_reverse : t -> int -> t
val reverse_prop : t -> t -> unit
val diff : (t -> t) -> t -> t
val diff' : (t -> t) -> t -> t * t
val grad : (t -> t) -> t -> t
val grad' : (t -> t) -> t -> t * t
val jacobian : (t -> t) -> t -> t
val jacobian' : (t -> t) -> t -> t * t
val jacobianv : (t -> t) -> t -> t -> t
val jacobianv' : (t -> t) -> t -> t -> t * t
val jacobianTv : (t -> t) -> t -> t -> t
val jacobianTv' : (t -> t) -> t -> t -> t * t
val hessian : (t -> t) -> t -> t
val hessian' : (t -> t) -> t -> t * t
val hessianv : (t -> t) -> t -> t -> t
val hessianv' : (t -> t) -> t -> t -> t * t
val laplacian : (t -> t) -> t -> t
val laplacian' : (t -> t) -> t -> t * t
val gradhessian : (t -> t) -> t -> t * t
val gradhessian' : (t -> t) -> t -> t * t * t
val gradhessianv : (t -> t) -> t -> t -> t * t
val gradhessianv' : (t -> t) -> t -> t -> t * t * t
module Builder : sig ... end
module Maths : sig ... end
module Linalg : sig ... end
module NN : sig ... end
module Mat : sig ... end
module Arr : sig ... end
val to_trace : t list -> string
val to_dot : t list -> string
val pp_num : Stdlib.Format.formatter -> t -> unit
\ No newline at end of file diff --git a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Optimise/Batch/index.html b/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Optimise/Batch/index.html deleted file mode 100644 index 02b165b77..000000000 --- a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Optimise/Batch/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Batch (owl-base.Owl_neural_compiler.Make.Neural.Graph.Neuron.Optimise.Batch)

Module Optimise.Batch

type typ = Owl_neural_generic.Make_Embedded(Engine).Neuron.Optimise.Batch.typ =
  1. | Full
  2. | Mini of int
  3. | Sample of int
  4. | Stochastic
val run : typ -> Algodiff.t -> Algodiff.t -> int -> Algodiff.t * Algodiff.t
val batches : typ -> Algodiff.t -> int
val to_string : typ -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Optimise/Checkpoint/index.html b/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Optimise/Checkpoint/index.html deleted file mode 100644 index cac526494..000000000 --- a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Optimise/Checkpoint/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -Checkpoint (owl-base.Owl_neural_compiler.Make.Neural.Graph.Neuron.Optimise.Checkpoint)

Module Optimise.Checkpoint

type state = - Owl_neural_generic.Make_Embedded(Engine).Neuron.Optimise.Checkpoint.state = - {
  1. mutable current_batch : int;
  2. mutable batches_per_epoch : int;
  3. mutable epochs : float;
  4. mutable batches : int;
  5. mutable loss : Algodiff.t array;
  6. mutable start_at : float;
  7. mutable stop : bool;
  8. mutable gs : Algodiff.t array array;
  9. mutable ps : Algodiff.t array array;
  10. mutable us : Algodiff.t array array;
  11. mutable ch : Algodiff.t array array array;
}
type typ = - Owl_neural_generic.Make_Embedded(Engine).Neuron.Optimise.Checkpoint.typ = -
  1. | Batch of int
  2. | Epoch of float
  3. | Custom of state -> unit
  4. | None
val init_state : int -> float -> state
val default_checkpoint_fun : (string -> 'a) -> 'a
val print_state_info : state -> unit
val print_summary : state -> unit
val run : typ -> (string -> unit) -> int -> Algodiff.t -> state -> unit
val to_string : typ -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Optimise/Clipping/index.html b/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Optimise/Clipping/index.html deleted file mode 100644 index d233dbdba..000000000 --- a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Optimise/Clipping/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Clipping (owl-base.Owl_neural_compiler.Make.Neural.Graph.Neuron.Optimise.Clipping)

Module Optimise.Clipping

type typ = - Owl_neural_generic.Make_Embedded(Engine).Neuron.Optimise.Clipping.typ = -
  1. | L2norm of float
  2. | Value of float * float
  3. | None
val run : typ -> Algodiff.t -> Algodiff.t
val default : typ -> typ
val to_string : typ -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Optimise/Gradient/index.html b/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Optimise/Gradient/index.html deleted file mode 100644 index 6d6311668..000000000 --- a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Optimise/Gradient/index.html +++ /dev/null @@ -1,11 +0,0 @@ - -Gradient (owl-base.Owl_neural_compiler.Make.Neural.Graph.Neuron.Optimise.Gradient)

Module Optimise.Gradient

type typ = - Owl_neural_generic.Make_Embedded(Engine).Neuron.Optimise.Gradient.typ = -
  1. | GD
  2. | CG
  3. | CD
  4. | NonlinearCG
  5. | DaiYuanCG
  6. | NewtonCG
  7. | Newton
val run : - typ -> - (Algodiff.t -> Algodiff.t) -> - Algodiff.t -> - Algodiff.t -> - Algodiff.t -> - Algodiff.t -> - Algodiff.t
val to_string : typ -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Optimise/Learning_Rate/index.html b/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Optimise/Learning_Rate/index.html deleted file mode 100644 index 0ba40bff5..000000000 --- a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Optimise/Learning_Rate/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Learning_Rate (owl-base.Owl_neural_compiler.Make.Neural.Graph.Neuron.Optimise.Learning_Rate)

Module Optimise.Learning_Rate

type typ = - Owl_neural_generic.Make_Embedded(Engine).Neuron.Optimise.Learning_Rate.typ = -
  1. | Adagrad of float
  2. | Const of float
  3. | Decay of float * float
  4. | Exp_decay of float * float
  5. | RMSprop of float * float
  6. | Adam of float * float * float
  7. | Schedule of float array
val run : typ -> int -> Algodiff.t -> Algodiff.t array -> Algodiff.t
val default : typ -> typ
val update_ch : typ -> Algodiff.t -> Algodiff.t array -> Algodiff.t array
val to_string : typ -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Optimise/Loss/index.html b/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Optimise/Loss/index.html deleted file mode 100644 index 8656a935b..000000000 --- a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Optimise/Loss/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Loss (owl-base.Owl_neural_compiler.Make.Neural.Graph.Neuron.Optimise.Loss)

Module Optimise.Loss

type typ = Owl_neural_generic.Make_Embedded(Engine).Neuron.Optimise.Loss.typ =
  1. | Hinge
  2. | L1norm
  3. | L2norm
  4. | Quadratic
  5. | Cross_entropy
  6. | Custom of Algodiff.t -> Algodiff.t -> Algodiff.t
val run : typ -> Algodiff.t -> Algodiff.t -> Algodiff.t
val to_string : typ -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Optimise/Momentum/index.html b/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Optimise/Momentum/index.html deleted file mode 100644 index 037ee5e00..000000000 --- a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Optimise/Momentum/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Momentum (owl-base.Owl_neural_compiler.Make.Neural.Graph.Neuron.Optimise.Momentum)

Module Optimise.Momentum

type typ = - Owl_neural_generic.Make_Embedded(Engine).Neuron.Optimise.Momentum.typ = -
  1. | Standard of float
  2. | Nesterov of float
  3. | None
val run : typ -> Algodiff.t -> Algodiff.t -> Algodiff.t
val default : typ -> typ
val to_string : typ -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Optimise/Params/index.html b/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Optimise/Params/index.html deleted file mode 100644 index 9f84e5238..000000000 --- a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Optimise/Params/index.html +++ /dev/null @@ -1,15 +0,0 @@ - -Params (owl-base.Owl_neural_compiler.Make.Neural.Graph.Neuron.Optimise.Params)

Module Optimise.Params

type typ = Owl_neural_generic.Make_Embedded(Engine).Neuron.Optimise.Params.typ = - {
  1. mutable epochs : float;
  2. mutable batch : Batch.typ;
  3. mutable gradient : Gradient.typ;
  4. mutable loss : Loss.typ;
  5. mutable learning_rate : Learning_Rate.typ;
  6. mutable regularisation : Regularisation.typ;
  7. mutable momentum : Momentum.typ;
  8. mutable clipping : Clipping.typ;
  9. mutable stopping : Stopping.typ;
  10. mutable checkpoint : Checkpoint.typ;
  11. mutable verbosity : bool;
}
val default : unit -> typ
val config : - ?batch:Batch.typ -> - ?gradient:Gradient.typ -> - ?loss:Loss.typ -> - ?learning_rate:Learning_Rate.typ -> - ?regularisation:Regularisation.typ -> - ?momentum:Momentum.typ -> - ?clipping:Clipping.typ -> - ?stopping:Stopping.typ -> - ?checkpoint:Checkpoint.typ -> - ?verbosity:bool -> - float -> - typ
val to_string : typ -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Optimise/Regularisation/index.html b/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Optimise/Regularisation/index.html deleted file mode 100644 index 32b0f5c8c..000000000 --- a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Optimise/Regularisation/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Regularisation (owl-base.Owl_neural_compiler.Make.Neural.Graph.Neuron.Optimise.Regularisation)

Module Optimise.Regularisation

type typ = - Owl_neural_generic.Make_Embedded(Engine).Neuron.Optimise.Regularisation.typ = -
  1. | L1norm of float
  2. | L2norm of float
  3. | Elastic_net of float * float
  4. | None
val run : typ -> Algodiff.t -> Algodiff.t
val to_string : typ -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Optimise/Stopping/index.html b/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Optimise/Stopping/index.html deleted file mode 100644 index 6fe58fd54..000000000 --- a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Optimise/Stopping/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Stopping (owl-base.Owl_neural_compiler.Make.Neural.Graph.Neuron.Optimise.Stopping)

Module Optimise.Stopping

type typ = - Owl_neural_generic.Make_Embedded(Engine).Neuron.Optimise.Stopping.typ = -
  1. | Const of float
  2. | Early of int * int
  3. | None
val run : typ -> float -> bool
val default : typ -> typ
val to_string : typ -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Optimise/Utils/index.html b/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Optimise/Utils/index.html deleted file mode 100644 index b6840fc9d..000000000 --- a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Optimise/Utils/index.html +++ /dev/null @@ -1,7 +0,0 @@ - -Utils (owl-base.Owl_neural_compiler.Make.Neural.Graph.Neuron.Optimise.Utils)

Module Optimise.Utils

val sample_num : Algodiff.t -> int
val draw_samples : Algodiff.t -> Algodiff.t -> int -> Algodiff.t * Algodiff.t
val get_chunk : - Algodiff.t -> - Algodiff.t -> - int -> - int -> - Algodiff.t * Algodiff.t
\ No newline at end of file diff --git a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Optimise/index.html b/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Optimise/index.html deleted file mode 100644 index ea2d0e986..000000000 --- a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Optimise/index.html +++ /dev/null @@ -1,31 +0,0 @@ - -Optimise (owl-base.Owl_neural_compiler.Make.Neural.Graph.Neuron.Optimise)

Module Neuron.Optimise

module Algodiff : sig ... end
module Utils : sig ... end
module Learning_Rate : sig ... end
module Batch : sig ... end
module Loss : sig ... end
module Gradient : sig ... end
module Momentum : sig ... end
module Regularisation : sig ... end
module Clipping : sig ... end
module Stopping : sig ... end
module Checkpoint : sig ... end
module Params : sig ... end
val minimise_weight : - ?state:Checkpoint.state -> - Params.typ -> - (Algodiff.t -> Algodiff.t -> Algodiff.t) -> - Algodiff.t -> - Algodiff.t -> - Algodiff.t -> - Checkpoint.state * Algodiff.t
val minimise_network : - ?state:Checkpoint.state -> - Params.typ -> - (Algodiff.t -> Algodiff.t * Algodiff.t array array) -> - (Algodiff.t -> Algodiff.t array array * Algodiff.t array array) -> - (Algodiff.t array array -> unit) -> - (string -> unit) -> - Algodiff.t -> - Algodiff.t -> - Checkpoint.state
val minimise_fun : - ?state:Checkpoint.state -> - Params.typ -> - (Algodiff.t -> Algodiff.t) -> - Algodiff.t -> - Checkpoint.state * Algodiff.t
val minimise_compiled_network : - ?state:Checkpoint.state -> - Params.typ -> - (Algodiff.t -> Algodiff.t -> Algodiff.t) -> - (unit -> unit) -> - (string -> unit) -> - Algodiff.t -> - Algodiff.t -> - Checkpoint.state
\ No newline at end of file diff --git a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Padding1D/index.html b/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Padding1D/index.html deleted file mode 100644 index 700e80164..000000000 --- a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Padding1D/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Padding1D (owl-base.Owl_neural_compiler.Make.Neural.Graph.Neuron.Padding1D)

Module Neuron.Padding1D

\ No newline at end of file diff --git a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Padding2D/index.html b/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Padding2D/index.html deleted file mode 100644 index 71979e1f6..000000000 --- a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Padding2D/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Padding2D (owl-base.Owl_neural_compiler.Make.Neural.Graph.Neuron.Padding2D)

Module Neuron.Padding2D

type neuron_typ = - Owl_neural_generic.Make_Embedded(Engine).Neuron.Padding2D.neuron_typ = - {
  1. mutable padding : int array array;
  2. mutable in_shape : int array;
  3. mutable out_shape : int array;
}
val create : int array array -> neuron_typ
val connect : int array -> neuron_typ -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Padding3D/index.html b/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Padding3D/index.html deleted file mode 100644 index 739fd1272..000000000 --- a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Padding3D/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Padding3D (owl-base.Owl_neural_compiler.Make.Neural.Graph.Neuron.Padding3D)

Module Neuron.Padding3D

\ No newline at end of file diff --git a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Recurrent/index.html b/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Recurrent/index.html deleted file mode 100644 index 2fbf07660..000000000 --- a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Recurrent/index.html +++ /dev/null @@ -1,11 +0,0 @@ - -Recurrent (owl-base.Owl_neural_compiler.Make.Neural.Graph.Neuron.Recurrent)

Module Neuron.Recurrent

type neuron_typ = - Owl_neural_generic.Make_Embedded(Engine).Neuron.Recurrent.neuron_typ = - {
  1. mutable whh : Optimise.Algodiff.t;
  2. mutable wxh : Optimise.Algodiff.t;
  3. mutable why : Optimise.Algodiff.t;
  4. mutable bh : Optimise.Algodiff.t;
  5. mutable by : Optimise.Algodiff.t;
  6. mutable h : Optimise.Algodiff.t;
  7. mutable hiddens : int;
  8. mutable act : Activation.typ;
  9. mutable init_typ : Init.typ;
  10. mutable in_shape : int array;
  11. mutable out_shape : int array;
}
val create : - ?time_steps:int -> - ?inputs:int -> - int -> - int -> - Activation.typ -> - Init.typ -> - neuron_typ
val connect : int array -> neuron_typ -> unit
val init : neuron_typ -> unit
val reset : neuron_typ -> unit
val mktag : int -> neuron_typ -> unit
val mkpar : neuron_typ -> Optimise.Algodiff.t array
val mkpri : neuron_typ -> Optimise.Algodiff.t array
val mkadj : neuron_typ -> Optimise.Algodiff.t array
val update : neuron_typ -> Optimise.Algodiff.t array -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Reshape/index.html b/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Reshape/index.html deleted file mode 100644 index 01d98d0e6..000000000 --- a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Reshape/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Reshape (owl-base.Owl_neural_compiler.Make.Neural.Graph.Neuron.Reshape)

Module Neuron.Reshape

type neuron_typ = - Owl_neural_generic.Make_Embedded(Engine).Neuron.Reshape.neuron_typ = - {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
}
val create : ?inputs:int array -> int array -> neuron_typ
val connect : int array -> neuron_typ -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Slice/index.html b/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Slice/index.html deleted file mode 100644 index 0e70ef820..000000000 --- a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/Slice/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Slice (owl-base.Owl_neural_compiler.Make.Neural.Graph.Neuron.Slice)

Module Neuron.Slice

type neuron_typ = - Owl_neural_generic.Make_Embedded(Engine).Neuron.Slice.neuron_typ = - {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
  3. mutable slice : int list list;
}
val create : int list list -> neuron_typ
val connect : int array -> neuron_typ -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/TransposeConv1D/index.html b/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/TransposeConv1D/index.html deleted file mode 100644 index 5ddbe73aa..000000000 --- a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/TransposeConv1D/index.html +++ /dev/null @@ -1,10 +0,0 @@ - -TransposeConv1D (owl-base.Owl_neural_compiler.Make.Neural.Graph.Neuron.TransposeConv1D)

Module Neuron.TransposeConv1D

type neuron_typ = - Owl_neural_generic.Make_Embedded(Engine).Neuron.TransposeConv1D.neuron_typ = - {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable b : Optimise.Algodiff.t;
  3. mutable kernel : int array;
  4. mutable stride : int array;
  5. mutable padding : Owl_types.padding;
  6. mutable init_typ : Init.typ;
  7. mutable in_shape : int array;
  8. mutable out_shape : int array;
}
val create : - ?inputs:int array -> - Owl_types.padding -> - int array -> - int array -> - Init.typ -> - neuron_typ
val connect : int array -> neuron_typ -> unit
val init : neuron_typ -> unit
val reset : neuron_typ -> unit
val mktag : int -> neuron_typ -> unit
val mkpar : neuron_typ -> Optimise.Algodiff.t array
val mkpri : neuron_typ -> Optimise.Algodiff.t array
val mkadj : neuron_typ -> Optimise.Algodiff.t array
val update : neuron_typ -> Optimise.Algodiff.t array -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/TransposeConv2D/index.html b/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/TransposeConv2D/index.html deleted file mode 100644 index 0f7f59e3b..000000000 --- a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/TransposeConv2D/index.html +++ /dev/null @@ -1,10 +0,0 @@ - -TransposeConv2D (owl-base.Owl_neural_compiler.Make.Neural.Graph.Neuron.TransposeConv2D)

Module Neuron.TransposeConv2D

type neuron_typ = - Owl_neural_generic.Make_Embedded(Engine).Neuron.TransposeConv2D.neuron_typ = - {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable b : Optimise.Algodiff.t;
  3. mutable kernel : int array;
  4. mutable stride : int array;
  5. mutable padding : Owl_types.padding;
  6. mutable init_typ : Init.typ;
  7. mutable in_shape : int array;
  8. mutable out_shape : int array;
}
val create : - ?inputs:int array -> - Owl_types.padding -> - int array -> - int array -> - Init.typ -> - neuron_typ
val connect : int array -> neuron_typ -> unit
val init : neuron_typ -> unit
val reset : neuron_typ -> unit
val mktag : int -> neuron_typ -> unit
val mkpar : neuron_typ -> Optimise.Algodiff.t array
val mkpri : neuron_typ -> Optimise.Algodiff.t array
val mkadj : neuron_typ -> Optimise.Algodiff.t array
val update : neuron_typ -> Optimise.Algodiff.t array -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/TransposeConv3D/index.html b/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/TransposeConv3D/index.html deleted file mode 100644 index 7fc843cca..000000000 --- a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/TransposeConv3D/index.html +++ /dev/null @@ -1,10 +0,0 @@ - -TransposeConv3D (owl-base.Owl_neural_compiler.Make.Neural.Graph.Neuron.TransposeConv3D)

Module Neuron.TransposeConv3D

type neuron_typ = - Owl_neural_generic.Make_Embedded(Engine).Neuron.TransposeConv3D.neuron_typ = - {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable b : Optimise.Algodiff.t;
  3. mutable kernel : int array;
  4. mutable stride : int array;
  5. mutable padding : Owl_types.padding;
  6. mutable init_typ : Init.typ;
  7. mutable in_shape : int array;
  8. mutable out_shape : int array;
}
val create : - ?inputs:int array -> - Owl_types.padding -> - int array -> - int array -> - Init.typ -> - neuron_typ
val connect : int array -> neuron_typ -> unit
val init : neuron_typ -> unit
val reset : neuron_typ -> unit
val mktag : int -> neuron_typ -> unit
val mkpar : neuron_typ -> Optimise.Algodiff.t array
val mkpri : neuron_typ -> Optimise.Algodiff.t array
val mkadj : neuron_typ -> Optimise.Algodiff.t array
val update : neuron_typ -> Optimise.Algodiff.t array -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/UpSampling1D/index.html b/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/UpSampling1D/index.html deleted file mode 100644 index fd6db46e5..000000000 --- a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/UpSampling1D/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -UpSampling1D (owl-base.Owl_neural_compiler.Make.Neural.Graph.Neuron.UpSampling1D)

Module Neuron.UpSampling1D

\ No newline at end of file diff --git a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/UpSampling2D/index.html b/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/UpSampling2D/index.html deleted file mode 100644 index ed88baf3d..000000000 --- a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/UpSampling2D/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -UpSampling2D (owl-base.Owl_neural_compiler.Make.Neural.Graph.Neuron.UpSampling2D)

Module Neuron.UpSampling2D

type neuron_typ = - Owl_neural_generic.Make_Embedded(Engine).Neuron.UpSampling2D.neuron_typ = - {
  1. mutable size : int array;
  2. mutable in_shape : int array;
  3. mutable out_shape : int array;
}
val create : int array -> neuron_typ
val connect : int array -> neuron_typ -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/UpSampling3D/index.html b/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/UpSampling3D/index.html deleted file mode 100644 index 6e76e2a2b..000000000 --- a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/UpSampling3D/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -UpSampling3D (owl-base.Owl_neural_compiler.Make.Neural.Graph.Neuron.UpSampling3D)

Module Neuron.UpSampling3D

\ No newline at end of file diff --git a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/index.html b/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/index.html deleted file mode 100644 index d7745c285..000000000 --- a/owl-base/Owl_neural_compiler/Make/Neural/Graph/Neuron/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Neuron (owl-base.Owl_neural_compiler.Make.Neural.Graph.Neuron)

Module Graph.Neuron

module Optimise : sig ... end
module Init : sig ... end
module Input : sig ... end
module Activation : sig ... end
module Linear : sig ... end
module LinearNoBias : sig ... end
module Recurrent : sig ... end
module LSTM : sig ... end
module GRU : sig ... end
module Conv1D : sig ... end
module Conv2D : sig ... end
module Conv3D : sig ... end
module DilatedConv1D : sig ... end
module DilatedConv2D : sig ... end
module DilatedConv3D : sig ... end
module TransposeConv1D : sig ... end
module TransposeConv2D : sig ... end
module TransposeConv3D : sig ... end
module FullyConnected : sig ... end
module MaxPool1D : sig ... end
module MaxPool2D : sig ... end
module AvgPool1D : sig ... end
module AvgPool2D : sig ... end
module GlobalMaxPool1D : sig ... end
module GlobalMaxPool2D : sig ... end
module GlobalAvgPool1D : sig ... end
module GlobalAvgPool2D : sig ... end
module UpSampling1D : sig ... end
module UpSampling2D : sig ... end
module UpSampling3D : sig ... end
module Padding1D : sig ... end
module Padding2D : sig ... end
module Padding3D : sig ... end
module Lambda : sig ... end
module LambdaArray : sig ... end
module Dropout : sig ... end
module Reshape : sig ... end
module Flatten : sig ... end
module Slice : sig ... end
module Add : sig ... end
module Mul : sig ... end
module Dot : sig ... end
module Max : sig ... end
module Average : sig ... end
module Concatenate : sig ... end
module Normalisation : sig ... end
module GaussianNoise : sig ... end
module GaussianDropout : sig ... end
module AlphaDropout : sig ... end
module Embedding : sig ... end
module Masking : sig ... end
type neuron = Owl_neural_generic.Make_Embedded(Engine).Neuron.neuron =
  1. | Input of Input.neuron_typ
  2. | Linear of Linear.neuron_typ
  3. | LinearNoBias of LinearNoBias.neuron_typ
  4. | Embedding of Embedding.neuron_typ
  5. | LSTM of LSTM.neuron_typ
  6. | GRU of GRU.neuron_typ
  7. | Recurrent of Recurrent.neuron_typ
  8. | Conv1D of Conv1D.neuron_typ
  9. | Conv2D of Conv2D.neuron_typ
  10. | Conv3D of Conv3D.neuron_typ
  11. | DilatedConv1D of DilatedConv1D.neuron_typ
  12. | DilatedConv2D of DilatedConv2D.neuron_typ
  13. | DilatedConv3D of DilatedConv3D.neuron_typ
  14. | TransposeConv1D of TransposeConv1D.neuron_typ
  15. | TransposeConv2D of TransposeConv2D.neuron_typ
  16. | TransposeConv3D of TransposeConv3D.neuron_typ
  17. | FullyConnected of FullyConnected.neuron_typ
  18. | MaxPool1D of MaxPool1D.neuron_typ
  19. | MaxPool2D of MaxPool2D.neuron_typ
  20. | AvgPool1D of AvgPool1D.neuron_typ
  21. | AvgPool2D of AvgPool2D.neuron_typ
  22. | GlobalMaxPool1D of GlobalMaxPool1D.neuron_typ
  23. | GlobalMaxPool2D of GlobalMaxPool2D.neuron_typ
  24. | GlobalAvgPool1D of GlobalAvgPool1D.neuron_typ
  25. | GlobalAvgPool2D of GlobalAvgPool2D.neuron_typ
  26. | UpSampling2D of UpSampling2D.neuron_typ
  27. | Padding2D of Padding2D.neuron_typ
  28. | Dropout of Dropout.neuron_typ
  29. | Reshape of Reshape.neuron_typ
  30. | Flatten of Flatten.neuron_typ
  31. | Slice of Slice.neuron_typ
  32. | Lambda of Lambda.neuron_typ
  33. | LambdaArray of LambdaArray.neuron_typ
  34. | Activation of Activation.neuron_typ
  35. | GaussianNoise of GaussianNoise.neuron_typ
  36. | GaussianDropout of GaussianDropout.neuron_typ
  37. | AlphaDropout of AlphaDropout.neuron_typ
  38. | Normalisation of Normalisation.neuron_typ
  39. | Add of Add.neuron_typ
  40. | Mul of Mul.neuron_typ
  41. | Dot of Dot.neuron_typ
  42. | Max of Max.neuron_typ
  43. | Average of Average.neuron_typ
  44. | Concatenate of Concatenate.neuron_typ
val get_in_out_shape : neuron -> int array * int array
val get_in_shape : neuron -> int array
val get_out_shape : neuron -> int array
val connect : int array array -> neuron -> unit
val init : neuron -> unit
val reset : neuron -> unit
val mktag : int -> neuron -> unit
val mkpar : neuron -> Optimise.Algodiff.t array
val mkpri : neuron -> Optimise.Algodiff.t array
val mkadj : neuron -> Optimise.Algodiff.t array
val update : neuron -> Optimise.Algodiff.t array -> unit
val load_weights : neuron -> Optimise.Algodiff.t array -> unit
val save_weights : neuron -> Optimise.Algodiff.t array
val copy : neuron -> neuron
val to_string : neuron -> string
val to_name : neuron -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_compiler/Make/Neural/Graph/index.html b/owl-base/Owl_neural_compiler/Make/Neural/Graph/index.html deleted file mode 100644 index b46cb2c2a..000000000 --- a/owl-base/Owl_neural_compiler/Make/Neural/Graph/index.html +++ /dev/null @@ -1,243 +0,0 @@ - -Graph (owl-base.Owl_neural_compiler.Make.Neural.Graph)

Module Neural.Graph

module Neuron : sig ... end
type node = Owl_neural_generic.Make_Embedded(Engine).node = {
  1. mutable name : string;
  2. mutable prev : node array;
  3. mutable next : node array;
  4. mutable neuron : Neuron.neuron;
  5. mutable output : Neuron.Optimise.Algodiff.t option;
  6. mutable network : network;
  7. mutable train : bool;
}
and network = Owl_neural_generic.Make_Embedded(Engine).network = {
  1. mutable nnid : string;
  2. mutable size : int;
  3. mutable roots : node array;
  4. mutable outputs : node array;
  5. mutable topo : node array;
}
val make_network : ?nnid:string -> int -> node array -> node array -> network
val make_node : - ?name:string -> - ?train:bool -> - node array -> - node array -> - Neuron.neuron -> - Neuron.Optimise.Algodiff.t option -> - network -> - node
val get_roots : network -> node array
val get_outputs : network -> node array
val get_node : network -> string -> node
val get_network : ?name:string -> node -> network
val outputs : ?name:string -> node array -> network
val get_network_name : network -> string
val set_network_name : network -> string -> unit
val collect_output : node array -> Neuron.Optimise.Algodiff.t array
val connect_pair : node -> node -> unit
val connect_to_parents : node array -> node -> unit
val add_node : - ?act_typ:Neuron.Activation.typ -> - network -> - node array -> - node -> - node
val input_shape : network -> int array
val input_shapes : network -> int array array
val init : network -> unit
val reset : network -> unit
val mktag : int -> network -> unit
val mkpar : network -> Neuron.Optimise.Algodiff.t array array
val mkpri : network -> Neuron.Optimise.Algodiff.t array array
val mkadj : network -> Neuron.Optimise.Algodiff.t array array
val update : network -> Neuron.Optimise.Algodiff.t array array -> unit
val run_inputs : - Neuron.Optimise.Algodiff.t array -> - network -> - Neuron.Optimise.Algodiff.t array
val forward_inputs : - network -> - Neuron.Optimise.Algodiff.t array -> - Neuron.Optimise.Algodiff.t array * Neuron.Optimise.Algodiff.t array array
val backward : - network -> - Neuron.Optimise.Algodiff.t -> - Neuron.Optimise.Algodiff.t array array - * Neuron.Optimise.Algodiff.t array array
val copy : network -> network
val model_inputs : - network -> - Neuron.Optimise.Algodiff.A.arr array -> - Neuron.Optimise.Algodiff.A.arr array
val input : ?name:string -> int array -> node
val inputs : ?names:string array -> int array array -> node array
val activation : ?name:string -> Neuron.Activation.typ -> node -> node
val linear : - ?name:string -> - ?init_typ:Neuron.Init.typ -> - ?act_typ:Neuron.Activation.typ -> - int -> - node -> - node
val linear_nobias : - ?name:string -> - ?init_typ:Neuron.Init.typ -> - ?act_typ:Neuron.Activation.typ -> - int -> - node -> - node
val embedding : - ?name:string -> - ?init_typ:Neuron.Init.typ -> - ?act_typ:Neuron.Activation.typ -> - int -> - int -> - node -> - node
val recurrent : - ?name:string -> - ?init_typ:Neuron.Init.typ -> - act_typ:Neuron.Activation.typ -> - int -> - int -> - node -> - node
val lstm : ?name:string -> ?init_typ:Neuron.Init.typ -> int -> node -> node
val gru : ?name:string -> ?init_typ:Neuron.Init.typ -> int -> node -> node
val conv1d : - ?name:string -> - ?padding:Owl_types.padding -> - ?init_typ:Neuron.Init.typ -> - ?act_typ:Neuron.Activation.typ -> - int array -> - int array -> - node -> - node
val conv2d : - ?name:string -> - ?padding:Owl_types.padding -> - ?init_typ:Neuron.Init.typ -> - ?act_typ:Neuron.Activation.typ -> - int array -> - int array -> - node -> - node
val conv3d : - ?name:string -> - ?padding:Owl_types.padding -> - ?init_typ:Neuron.Init.typ -> - ?act_typ:Neuron.Activation.typ -> - int array -> - int array -> - node -> - node
val dilated_conv1d : - ?name:string -> - ?padding:Owl_types.padding -> - ?init_typ:Neuron.Init.typ -> - ?act_typ:Neuron.Activation.typ -> - int array -> - int array -> - int array -> - node -> - node
val dilated_conv2d : - ?name:string -> - ?padding:Owl_types.padding -> - ?init_typ:Neuron.Init.typ -> - ?act_typ:Neuron.Activation.typ -> - int array -> - int array -> - int array -> - node -> - node
val dilated_conv3d : - ?name:string -> - ?padding:Owl_types.padding -> - ?init_typ:Neuron.Init.typ -> - ?act_typ:Neuron.Activation.typ -> - int array -> - int array -> - int array -> - node -> - node
val transpose_conv1d : - ?name:string -> - ?padding:Owl_types.padding -> - ?init_typ:Neuron.Init.typ -> - ?act_typ:Neuron.Activation.typ -> - int array -> - int array -> - node -> - node
val transpose_conv2d : - ?name:string -> - ?padding:Owl_types.padding -> - ?init_typ:Neuron.Init.typ -> - ?act_typ:Neuron.Activation.typ -> - int array -> - int array -> - node -> - node
val transpose_conv3d : - ?name:string -> - ?padding:Owl_types.padding -> - ?init_typ:Neuron.Init.typ -> - ?act_typ:Neuron.Activation.typ -> - int array -> - int array -> - node -> - node
val fully_connected : - ?name:string -> - ?init_typ:Neuron.Init.typ -> - ?act_typ:Neuron.Activation.typ -> - int -> - node -> - node
val max_pool1d : - ?name:string -> - ?padding:Owl_types.padding -> - ?act_typ:Neuron.Activation.typ -> - int array -> - int array -> - node -> - node
val max_pool2d : - ?name:string -> - ?padding:Owl_types.padding -> - ?act_typ:Neuron.Activation.typ -> - int array -> - int array -> - node -> - node
val avg_pool1d : - ?name:string -> - ?padding:Owl_types.padding -> - ?act_typ:Neuron.Activation.typ -> - int array -> - int array -> - node -> - node
val avg_pool2d : - ?name:string -> - ?padding:Owl_types.padding -> - ?act_typ:Neuron.Activation.typ -> - int array -> - int array -> - node -> - node
val global_max_pool1d : - ?name:string -> - ?act_typ:Neuron.Activation.typ -> - node -> - node
val global_max_pool2d : - ?name:string -> - ?act_typ:Neuron.Activation.typ -> - node -> - node
val global_avg_pool1d : - ?name:string -> - ?act_typ:Neuron.Activation.typ -> - node -> - node
val global_avg_pool2d : - ?name:string -> - ?act_typ:Neuron.Activation.typ -> - node -> - node
val upsampling2d : - ?name:string -> - ?act_typ:Neuron.Activation.typ -> - int array -> - node -> - node
val padding2d : - ?name:string -> - ?act_typ:Neuron.Activation.typ -> - int array array -> - node -> - node
val dropout : ?name:string -> float -> node -> node
val gaussian_noise : ?name:string -> float -> node -> node
val gaussian_dropout : ?name:string -> float -> node -> node
val alpha_dropout : ?name:string -> float -> node -> node
val normalisation : - ?name:string -> - ?axis:int -> - ?training:bool -> - ?decay:float -> - ?mu:Neuron.Optimise.Algodiff.A.arr -> - ?var:Neuron.Optimise.Algodiff.A.arr -> - node -> - node
val reshape : ?name:string -> int array -> node -> node
val flatten : ?name:string -> node -> node
val slice : ?name:string -> int list list -> node -> node
val lambda : - ?name:string -> - ?act_typ:Neuron.Activation.typ -> - ?out_shape:int array -> - (Neuron.Optimise.Algodiff.t -> Neuron.Optimise.Algodiff.t) -> - node -> - node
val lambda_array : - ?name:string -> - ?act_typ:Neuron.Activation.typ -> - int array -> - (Neuron.Optimise.Algodiff.t array -> Neuron.Optimise.Algodiff.t) -> - node array -> - node
val add : ?name:string -> ?act_typ:Neuron.Activation.typ -> node array -> node
val mul : ?name:string -> ?act_typ:Neuron.Activation.typ -> node array -> node
val dot : ?name:string -> ?act_typ:Neuron.Activation.typ -> node array -> node
val max : ?name:string -> ?act_typ:Neuron.Activation.typ -> node array -> node
val average : - ?name:string -> - ?act_typ:Neuron.Activation.typ -> - node array -> - node
val concatenate : - ?name:string -> - ?act_typ:Neuron.Activation.typ -> - int -> - node array -> - node
val to_string : network -> string
val pp_network : Stdlib.Format.formatter -> network -> unit
val print : network -> unit
val save : ?unsafe:bool -> network -> string -> unit
val load : string -> network
val save_weights : network -> string -> unit
val load_weights : network -> string -> unit
val make_subnetwork : - ?copy:bool -> - ?make_inputs:string array -> - network -> - string array -> - network
\ No newline at end of file diff --git a/owl-base/Owl_neural_compiler/Make/Neural/index.html b/owl-base/Owl_neural_compiler/Make/Neural/index.html deleted file mode 100644 index c147cc73b..000000000 --- a/owl-base/Owl_neural_compiler/Make/Neural/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Neural (owl-base.Owl_neural_compiler.Make.Neural)

Module Make.Neural

module Graph : sig ... end
module Optimise = Graph.Neuron.Optimise
module Init = Graph.Neuron.Init
module Activation = Graph.Neuron.Activation
module Regularisation = Graph.Neuron.Optimise.Regularisation
\ No newline at end of file diff --git a/owl-base/Owl_neural_compiler/Make/argument-1-E/Graph/Optimiser/Operator/Linalg/index.html b/owl-base/Owl_neural_compiler/Make/argument-1-E/Graph/Optimiser/Operator/Linalg/index.html deleted file mode 100644 index 6ccb9afc5..000000000 --- a/owl-base/Owl_neural_compiler/Make/argument-1-E/Graph/Optimiser/Operator/Linalg/index.html +++ /dev/null @@ -1,33 +0,0 @@ - -Linalg (owl-base.Owl_neural_compiler.Make.E.Graph.Optimiser.Operator.Linalg)

Module Operator.Linalg

\ No newline at end of file diff --git a/owl-base/Owl_neural_compiler/Make/argument-1-E/Graph/Optimiser/Operator/Mat/index.html b/owl-base/Owl_neural_compiler/Make/argument-1-E/Graph/Optimiser/Operator/Mat/index.html deleted file mode 100644 index 516020321..000000000 --- a/owl-base/Owl_neural_compiler/Make/argument-1-E/Graph/Optimiser/Operator/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl-base.Owl_neural_compiler.Make.E.Graph.Optimiser.Operator.Mat)

Module Operator.Mat

val eye : int -> Symbol.Shape.Type.arr

TODO

TODO

TODO

TODO

\ No newline at end of file diff --git a/owl-base/Owl_neural_compiler/Make/argument-1-E/Graph/Optimiser/Operator/Scalar/index.html b/owl-base/Owl_neural_compiler/Make/argument-1-E/Graph/Optimiser/Operator/Scalar/index.html deleted file mode 100644 index 214d00e3c..000000000 --- a/owl-base/Owl_neural_compiler/Make/argument-1-E/Graph/Optimiser/Operator/Scalar/index.html +++ /dev/null @@ -1,20 +0,0 @@ - -Scalar (owl-base.Owl_neural_compiler.Make.E.Graph.Optimiser.Operator.Scalar)

Module Operator.Scalar

\ No newline at end of file diff --git a/owl-base/Owl_neural_compiler/Make/argument-1-E/Graph/Optimiser/Operator/Symbol/Shape/Type/Device/A/Linalg/index.html b/owl-base/Owl_neural_compiler/Make/argument-1-E/Graph/Optimiser/Operator/Symbol/Shape/Type/Device/A/Linalg/index.html deleted file mode 100644 index 13180af94..000000000 --- a/owl-base/Owl_neural_compiler/Make/argument-1-E/Graph/Optimiser/Operator/Symbol/Shape/Type/Device/A/Linalg/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -Linalg (owl-base.Owl_neural_compiler.Make.E.Graph.Optimiser.Operator.Symbol.Shape.Type.Device.A.Linalg)

Module A.Linalg

val inv : arr -> arr
val logdet : arr -> elt
val chol : ?upper:bool -> arr -> arr
val svd : ?thin:bool -> arr -> arr * arr * arr
val qr : arr -> arr * arr
val lq : arr -> arr * arr
val sylvester : arr -> arr -> arr -> arr
val lyapunov : arr -> arr -> arr
val discrete_lyapunov : - ?solver:[ `default | `bilinear | `direct ] -> - arr -> - arr -> - arr
val linsolve : ?trans:bool -> ?typ:[ `n | `u | `l ] -> arr -> arr -> arr
val care : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
val dare : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
\ No newline at end of file diff --git a/owl-base/Owl_neural_compiler/Make/argument-1-E/Graph/Optimiser/Operator/Symbol/Shape/Type/Device/A/Mat/index.html b/owl-base/Owl_neural_compiler/Make/argument-1-E/Graph/Optimiser/Operator/Symbol/Shape/Type/Device/A/Mat/index.html deleted file mode 100644 index 19c5d42c1..000000000 --- a/owl-base/Owl_neural_compiler/Make/argument-1-E/Graph/Optimiser/Operator/Symbol/Shape/Type/Device/A/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl-base.Owl_neural_compiler.Make.E.Graph.Optimiser.Operator.Symbol.Shape.Type.Device.A.Mat)

Module A.Mat

val diagm : ?k:int -> arr -> arr
val triu : ?k:int -> arr -> arr
val tril : ?k:int -> arr -> arr
val eye : int -> arr
\ No newline at end of file diff --git a/owl-base/Owl_neural_compiler/Make/argument-1-E/Graph/Optimiser/Operator/Symbol/Shape/Type/Device/A/Scalar/index.html b/owl-base/Owl_neural_compiler/Make/argument-1-E/Graph/Optimiser/Operator/Symbol/Shape/Type/Device/A/Scalar/index.html deleted file mode 100644 index 0a4002d1e..000000000 --- a/owl-base/Owl_neural_compiler/Make/argument-1-E/Graph/Optimiser/Operator/Symbol/Shape/Type/Device/A/Scalar/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Scalar (owl-base.Owl_neural_compiler.Make.E.Graph.Optimiser.Operator.Symbol.Shape.Type.Device.A.Scalar)

Module A.Scalar

val add : elt -> elt -> elt
val sub : elt -> elt -> elt
val mul : elt -> elt -> elt
val div : elt -> elt -> elt
val pow : elt -> elt -> elt
val atan2 : elt -> elt -> elt
val abs : elt -> elt
val neg : elt -> elt
val sqr : elt -> elt
val sqrt : elt -> elt
val exp : elt -> elt
val log : elt -> elt
val log2 : elt -> elt
val log10 : elt -> elt
val signum : elt -> elt
val floor : elt -> elt
val ceil : elt -> elt
val round : elt -> elt
val sin : elt -> elt
val cos : elt -> elt
val tan : elt -> elt
val sinh : elt -> elt
val cosh : elt -> elt
val tanh : elt -> elt
val asin : elt -> elt
val acos : elt -> elt
val atan : elt -> elt
val asinh : elt -> elt
val acosh : elt -> elt
val atanh : elt -> elt
val relu : elt -> elt
val dawsn : elt -> elt
val sigmoid : elt -> elt
\ No newline at end of file diff --git a/owl-base/Owl_neural_compiler/Make/argument-1-E/Graph/Optimiser/Operator/Symbol/Shape/Type/Device/A/index.html b/owl-base/Owl_neural_compiler/Make/argument-1-E/Graph/Optimiser/Operator/Symbol/Shape/Type/Device/A/index.html deleted file mode 100644 index 9a6a1d119..000000000 --- a/owl-base/Owl_neural_compiler/Make/argument-1-E/Graph/Optimiser/Operator/Symbol/Shape/Type/Device/A/index.html +++ /dev/null @@ -1,379 +0,0 @@ - -A (owl-base.Owl_neural_compiler.Make.E.Graph.Optimiser.Operator.Symbol.Shape.Type.Device.A)

Module Device.A

include Owl_types_ndarray_algodiff.Sig
include Owl_types_ndarray_eltcmp.Sig
include Owl_types_ndarray_basic.Sig
type arr
type elt
val empty : int array -> arr
val zeros : int array -> arr
val ones : int array -> arr
val create : int array -> elt -> arr
val sequential : ?a:elt -> ?step:elt -> int array -> arr
val uniform : ?a:elt -> ?b:elt -> int array -> arr
val gaussian : ?mu:elt -> ?sigma:elt -> int array -> arr
val bernoulli : ?p:elt -> int array -> arr
val init : int array -> (int -> elt) -> arr
val init_nd : int array -> (int array -> elt) -> arr
val shape : arr -> int array
val numel : arr -> int
val get : arr -> int array -> elt
val set : arr -> int array -> elt -> unit
val get_slice : int list list -> arr -> arr
val set_slice : int list list -> arr -> arr -> unit
val get_fancy : Owl_types_common.index list -> arr -> arr
val set_fancy : Owl_types_common.index list -> arr -> arr -> unit
val copy : arr -> arr
val copy_ : out:arr -> arr -> unit
val reset : arr -> unit
val reshape : arr -> int array -> arr
val reverse : arr -> arr
val tile : arr -> int array -> arr
val repeat : arr -> int array -> arr
val concatenate : ?axis:int -> arr array -> arr
val stack : ?axis:int -> arr array -> arr
val split : ?axis:int -> int array -> arr -> arr array
val expand : ?hi:bool -> arr -> int -> arr
val squeeze : ?axis:int array -> arr -> arr
val draw : ?axis:int -> arr -> int -> arr * int array
val map : (elt -> elt) -> arr -> arr
val fold : ?axis:int -> (elt -> elt -> elt) -> elt -> arr -> arr
val scan : ?axis:int -> (elt -> elt -> elt) -> arr -> arr
val one_hot : int -> arr -> arr
val pad : ?v:elt -> int list list -> arr -> arr
val print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(elt -> string) -> - arr -> - unit
val abs : arr -> arr
val neg : arr -> arr
val floor : arr -> arr
val ceil : arr -> arr
val round : arr -> arr
val sqr : arr -> arr
val sqrt : arr -> arr
val log : arr -> arr
val log2 : arr -> arr
val log10 : arr -> arr
val exp : arr -> arr
val sin : arr -> arr
val cos : arr -> arr
val tan : arr -> arr
val sinh : arr -> arr
val cosh : arr -> arr
val tanh : arr -> arr
val asin : arr -> arr
val acos : arr -> arr
val atan : arr -> arr
val asinh : arr -> arr
val acosh : arr -> arr
val atanh : arr -> arr
val sum_reduce : ?axis:int array -> arr -> arr
val signum : arr -> arr
val sigmoid : arr -> arr
val relu : arr -> arr
val dawsn : arr -> arr
val min' : arr -> elt
val max' : arr -> elt
val sum' : arr -> elt
val log_sum_exp' : arr -> elt
val log_sum_exp : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm' : arr -> elt
val l2norm' : arr -> elt
val l2norm_sqr' : arr -> elt
val clip_by_value : ?amin:elt -> ?amax:elt -> arr -> arr
val clip_by_l2norm : elt -> arr -> arr
val pow : arr -> arr -> arr
val scalar_pow : elt -> arr -> arr
val pow_scalar : arr -> elt -> arr
val atan2 : arr -> arr -> arr
val scalar_atan2 : elt -> arr -> arr
val atan2_scalar : arr -> elt -> arr
val add : arr -> arr -> arr
val sub : arr -> arr -> arr
val mul : arr -> arr -> arr
val div : arr -> arr -> arr
val add_scalar : arr -> elt -> arr
val sub_scalar : arr -> elt -> arr
val mul_scalar : arr -> elt -> arr
val div_scalar : arr -> elt -> arr
val scalar_add : elt -> arr -> arr
val scalar_sub : elt -> arr -> arr
val scalar_mul : elt -> arr -> arr
val scalar_div : elt -> arr -> arr
val fma : arr -> arr -> arr -> arr
val conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val dilated_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val transpose_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val max_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val upsampling2d : arr -> int array -> arr
val conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val dilated_conv1d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv1d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val transpose_conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val max_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val upsampling2d_backward : arr -> int array -> arr -> arr
val row_num : arr -> int
val col_num : arr -> int
val row : arr -> int -> arr
val rows : arr -> int array -> arr
val copy_row_to : arr -> arr -> int -> unit
val copy_col_to : arr -> arr -> int -> unit
val dot : arr -> arr -> arr
val diag : ?k:int -> arr -> arr
val trace : arr -> elt
val transpose : ?axis:int array -> arr -> arr
val to_rows : arr -> arr array
val of_rows : arr array -> arr
val to_cols : arr -> arr array
val of_cols : arr array -> arr
val of_array : elt array -> int array -> arr
val of_arrays : elt array array -> arr
val float_to_elt : float -> elt
val elt_to_float : elt -> float
val elt_equal : arr -> arr -> arr
val elt_not_equal : arr -> arr -> arr
val elt_less : arr -> arr -> arr
val elt_greater : arr -> arr -> arr
val elt_less_equal : arr -> arr -> arr
val elt_greater_equal : arr -> arr -> arr
val elt_equal_scalar : arr -> elt -> arr
val elt_not_equal_scalar : arr -> elt -> arr
val elt_less_scalar : arr -> elt -> arr
val elt_greater_scalar : arr -> elt -> arr
val elt_less_equal_scalar : arr -> elt -> arr
val elt_greater_equal_scalar : arr -> elt -> arr
module Scalar : sig ... end
module Mat : sig ... end
module Linalg : sig ... end
val create_ : out:arr -> elt -> unit
val uniform_ : ?a:elt -> ?b:elt -> out:arr -> unit
val gaussian_ : ?mu:elt -> ?sigma:elt -> out:arr -> unit
val sequential_ : ?a:elt -> ?step:elt -> out:arr -> unit
val bernoulli_ : ?p:elt -> out:arr -> unit
val zeros_ : out:arr -> unit
val ones_ : out:arr -> unit
val one_hot_ : out:arr -> int -> arr -> unit
val get_slice_ : out:arr -> int list list -> arr -> unit
val set_slice_ : out:arr -> int list list -> arr -> arr -> unit
val sub_left : arr -> int -> int -> arr
val reshape_ : out:arr -> arr -> unit
val reverse_ : out:arr -> arr -> unit
val transpose_ : out:arr -> ?axis:int array -> arr -> unit
val repeat_ : out:arr -> arr -> int array -> unit
val tile_ : out:arr -> arr -> int array -> unit
val pad_ : out:arr -> ?v:elt -> int list list -> arr -> unit
val hypot : arr -> arr -> arr
val fmod : arr -> arr -> arr
val min2 : arr -> arr -> arr
val max2 : arr -> arr -> arr
val add_ : ?out:arr -> arr -> arr -> unit
val sub_ : ?out:arr -> arr -> arr -> unit
val mul_ : ?out:arr -> arr -> arr -> unit
val div_ : ?out:arr -> arr -> arr -> unit
val pow_ : ?out:arr -> arr -> arr -> unit
val atan2_ : ?out:arr -> arr -> arr -> unit
val hypot_ : ?out:arr -> arr -> arr -> unit
val fmod_ : ?out:arr -> arr -> arr -> unit
val min2_ : ?out:arr -> arr -> arr -> unit
val max2_ : ?out:arr -> arr -> arr -> unit
val add_scalar_ : ?out:arr -> arr -> elt -> unit
val sub_scalar_ : ?out:arr -> arr -> elt -> unit
val mul_scalar_ : ?out:arr -> arr -> elt -> unit
val div_scalar_ : ?out:arr -> arr -> elt -> unit
val pow_scalar_ : ?out:arr -> arr -> elt -> unit
val atan2_scalar_ : ?out:arr -> arr -> elt -> unit
val fmod_scalar_ : ?out:arr -> arr -> elt -> unit
val scalar_add_ : ?out:arr -> elt -> arr -> unit
val scalar_sub_ : ?out:arr -> elt -> arr -> unit
val scalar_mul_ : ?out:arr -> elt -> arr -> unit
val scalar_div_ : ?out:arr -> elt -> arr -> unit
val scalar_pow_ : ?out:arr -> elt -> arr -> unit
val scalar_atan2_ : ?out:arr -> elt -> arr -> unit
val scalar_fmod_ : ?out:arr -> elt -> arr -> unit
val fma_ : ?out:arr -> arr -> arr -> arr -> unit
val clip_by_value_ : ?out:arr -> ?amin:elt -> ?amax:elt -> arr -> unit
val clip_by_l2norm_ : ?out:arr -> elt -> arr -> unit
val dot_ : - ?transa:bool -> - ?transb:bool -> - ?alpha:elt -> - ?beta:elt -> - c:arr -> - arr -> - arr -> - unit
val abs_ : ?out:arr -> arr -> unit
val neg_ : ?out:arr -> arr -> unit
val conj_ : ?out:arr -> arr -> unit
val reci_ : ?out:arr -> arr -> unit
val signum_ : ?out:arr -> arr -> unit
val sqr_ : ?out:arr -> arr -> unit
val sqrt_ : ?out:arr -> arr -> unit
val cbrt_ : ?out:arr -> arr -> unit
val exp_ : ?out:arr -> arr -> unit
val exp2_ : ?out:arr -> arr -> unit
val exp10_ : ?out:arr -> arr -> unit
val expm1_ : ?out:arr -> arr -> unit
val log_ : ?out:arr -> arr -> unit
val log2_ : ?out:arr -> arr -> unit
val log10_ : ?out:arr -> arr -> unit
val log1p_ : ?out:arr -> arr -> unit
val sin_ : ?out:arr -> arr -> unit
val cos_ : ?out:arr -> arr -> unit
val tan_ : ?out:arr -> arr -> unit
val asin_ : ?out:arr -> arr -> unit
val acos_ : ?out:arr -> arr -> unit
val atan_ : ?out:arr -> arr -> unit
val sinh_ : ?out:arr -> arr -> unit
val cosh_ : ?out:arr -> arr -> unit
val tanh_ : ?out:arr -> arr -> unit
val asinh_ : ?out:arr -> arr -> unit
val acosh_ : ?out:arr -> arr -> unit
val atanh_ : ?out:arr -> arr -> unit
val floor_ : ?out:arr -> arr -> unit
val ceil_ : ?out:arr -> arr -> unit
val round_ : ?out:arr -> arr -> unit
val trunc_ : ?out:arr -> arr -> unit
val fix_ : ?out:arr -> arr -> unit
val erf_ : ?out:arr -> arr -> unit
val erfc_ : ?out:arr -> arr -> unit
val relu_ : ?out:arr -> arr -> unit
val softplus_ : ?out:arr -> arr -> unit
val softsign_ : ?out:arr -> arr -> unit
val softmax_ : ?out:arr -> ?axis:int -> arr -> unit
val sigmoid_ : ?out:arr -> arr -> unit
val sum_ : out:arr -> axis:int -> arr -> unit
val min_ : out:arr -> axis:int -> arr -> unit
val max_ : out:arr -> axis:int -> arr -> unit
val sum : ?axis:int -> ?keep_dims:bool -> arr -> arr
val prod : ?axis:int -> ?keep_dims:bool -> arr -> arr
val min : ?axis:int -> ?keep_dims:bool -> arr -> arr
val max : ?axis:int -> ?keep_dims:bool -> arr -> arr
val mean : ?axis:int -> ?keep_dims:bool -> arr -> arr
val var : ?axis:int -> ?keep_dims:bool -> arr -> arr
val std : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l2norm : ?axis:int -> ?keep_dims:bool -> arr -> arr
val cumsum_ : ?out:arr -> ?axis:int -> arr -> unit
val cumprod_ : ?out:arr -> ?axis:int -> arr -> unit
val cummin_ : ?out:arr -> ?axis:int -> arr -> unit
val cummax_ : ?out:arr -> ?axis:int -> arr -> unit
val dropout_ : ?out:arr -> ?rate:float -> arr -> unit
val prod' : arr -> elt
val mean' : arr -> elt
val var' : arr -> elt
val std' : arr -> elt
val elt_equal_ : ?out:arr -> arr -> arr -> unit
val elt_not_equal_ : ?out:arr -> arr -> arr -> unit
val elt_less_ : ?out:arr -> arr -> arr -> unit
val elt_greater_ : ?out:arr -> arr -> arr -> unit
val elt_less_equal_ : ?out:arr -> arr -> arr -> unit
val elt_greater_equal_ : ?out:arr -> arr -> arr -> unit
val elt_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_not_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_less_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_greater_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_less_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_greater_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val conv1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val conv2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val conv3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val dilated_conv1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val dilated_conv2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val dilated_conv3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val transpose_conv1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val transpose_conv2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val transpose_conv3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val max_pool1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val max_pool2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val max_pool3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val upsampling2d_ : out:arr -> arr -> int array -> unit
val conv1d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv1d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv2d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv2d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv3d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv3d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val dilated_conv1d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv1d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv2d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv2d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv3d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv3d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val transpose_conv1d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv1d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv2d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv2d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv3d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv3d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val max_pool1d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val max_pool2d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val max_pool3d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool1d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool2d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool3d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val upsampling2d_backward_ : out:arr -> arr -> int array -> arr -> unit
val fused_adagrad_ : ?out:arr -> rate:float -> eps:float -> arr -> unit
\ No newline at end of file diff --git a/owl-base/Owl_neural_compiler/Make/argument-1-E/Graph/Optimiser/Operator/Symbol/Shape/Type/Device/index.html b/owl-base/Owl_neural_compiler/Make/argument-1-E/Graph/Optimiser/Operator/Symbol/Shape/Type/Device/index.html deleted file mode 100644 index 564076f82..000000000 --- a/owl-base/Owl_neural_compiler/Make/argument-1-E/Graph/Optimiser/Operator/Symbol/Shape/Type/Device/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Device (owl-base.Owl_neural_compiler.Make.E.Graph.Optimiser.Operator.Symbol.Shape.Type.Device)

Module Type.Device

Type definition
type device

TODO

type value

TODO

Core functions
val make_device : unit -> device

TODO

val arr_to_value : A.arr -> value

TODO

val value_to_arr : value -> A.arr

TODO

val elt_to_value : A.elt -> value

TODO

val value_to_elt : value -> A.elt

TODO

val value_to_float : value -> float

TODO

val is_arr : value -> bool

TODO

val is_elt : value -> bool

TODO

\ No newline at end of file diff --git a/owl-base/Owl_neural_compiler/Make/argument-1-E/Graph/Optimiser/Operator/Symbol/Shape/Type/index.html b/owl-base/Owl_neural_compiler/Make/argument-1-E/Graph/Optimiser/Operator/Symbol/Shape/Type/index.html deleted file mode 100644 index 37155e145..000000000 --- a/owl-base/Owl_neural_compiler/Make/argument-1-E/Graph/Optimiser/Operator/Symbol/Shape/Type/index.html +++ /dev/null @@ -1,5 +0,0 @@ - -Type (owl-base.Owl_neural_compiler.Make.E.Graph.Optimiser.Operator.Symbol.Shape.Type)

Module Shape.Type

Type definition
type state =
  1. | Valid
  2. | Invalid
    (*

    TODO

    *)

TODO

and block = {
  1. size : int;
  2. block_id : int;
  3. mutable active : t option;
  4. mutable memory : Device.value;
  5. mutable nodes : t list;
}

block type keeps a reference to a block of memory and to the nodes sharing that block.

and attr = {
  1. mutable op : op;
  2. mutable freeze : bool;
  3. mutable reuse : bool;
  4. mutable state : state;
  5. mutable shape : int array option array;
  6. mutable value : Device.value array;
  7. mutable block : block array option;
}

TODO

and arr =
  1. | Arr of t
and elt =
  1. | Elt of t
and op =
  1. | Noop
  2. | Var
  3. | Const
  4. | Empty of int array
  5. | Zeros of int array
  6. | Ones of int array
  7. | Create of int array
  8. | Sequential of int array
  9. | Uniform of int array
  10. | Gaussian of int array
  11. | Bernoulli of int array
  12. | Init of int array * int -> elt
  13. | Get of int array
  14. | Set of int array
  15. | GetSlice of int list list
  16. | SetSlice of int list list
  17. | GetFancy of Owl_types_common.index list
  18. | SetFancy of Owl_types_common.index list
  19. | Copy
  20. | Reset
  21. | Reshape of int array
  22. | Reverse
  23. | Tile of int array
  24. | Repeat of int array
  25. | Pad of elt * int list list
  26. | Concatenate of int
  27. | Stack of int
  28. | Split of int * int array
  29. | Draw of int * int
  30. | Map of elt -> elt
  31. | Fold of int * elt -> elt -> elt
  32. | Scan of int * elt -> elt -> elt
  33. | OneHot of int
  34. | OfArray of int array
  35. | Delay of Device.A.arr -> Device.A.arr
  36. | DelayArray of int array * Device.A.arr array -> Device.A.arr
  37. | LazyPrint of int option - * int option - * bool option - * (Device.A.elt -> string) option
  38. | Abs
  39. | Neg
  40. | Floor
  41. | Ceil
  42. | Round
  43. | Sqr
  44. | Sqrt
  45. | Log
  46. | Log2
  47. | Log10
  48. | Exp
  49. | Sin
  50. | Cos
  51. | Tan
  52. | Sinh
  53. | Cosh
  54. | Tanh
  55. | Asin
  56. | Acos
  57. | Atan
  58. | Asinh
  59. | Acosh
  60. | Atanh
  61. | Min of bool * int
  62. | Max of bool * int
  63. | Sum of bool * int
  64. | SumReduce of int array
  65. | Signum
  66. | Sigmoid
  67. | Relu
  68. | Dawsn
  69. | Min'
  70. | Max'
  71. | Sum'
  72. | LogSumExp'
  73. | LogSumExp of bool * int
  74. | L1norm'
  75. | L2norm'
  76. | L2NormSqr'
  77. | ClipByValue
  78. | ClipByL2norm
  79. | Pow
  80. | ScalarPow
  81. | PowScalar
  82. | Atan2
  83. | ScalarAtan2
  84. | Atan2Scalar
  85. | Hypot
  86. | Min2
  87. | Max2
  88. | Add
  89. | Sub
  90. | Mul
  91. | Div
  92. | AddScalar
  93. | SubScalar
  94. | MulScalar
  95. | DivScalar
  96. | ScalarAdd
  97. | ScalarSub
  98. | ScalarMul
  99. | ScalarDiv
  100. | FMA
  101. | EltEqual
  102. | EltNotEqual
  103. | EltLess
  104. | EltGreater
  105. | EltLessEqual
  106. | EltGreaterEqual
  107. | EltEqualScalar
  108. | EltNotEqualScalar
  109. | EltLessScalar
  110. | EltGreaterScalar
  111. | EltLessEqualScalar
  112. | EltGreaterEqualScalar
  113. | Conv1d of Owl_types_common.padding * int array
  114. | Conv2d of Owl_types_common.padding * int array
  115. | Conv3d of Owl_types_common.padding * int array
  116. | TransposeConv1d of Owl_types_common.padding * int array
  117. | TransposeConv2d of Owl_types_common.padding * int array
  118. | TransposeConv3d of Owl_types_common.padding * int array
  119. | DilatedConv1d of Owl_types_common.padding * int array * int array
  120. | DilatedConv2d of Owl_types_common.padding * int array * int array
  121. | DilatedConv3d of Owl_types_common.padding * int array * int array
  122. | MaxPool1d of Owl_types_common.padding * int array * int array
  123. | MaxPool2d of Owl_types_common.padding * int array * int array
  124. | MaxPool3d of Owl_types_common.padding * int array * int array
  125. | AvgPool1d of Owl_types_common.padding * int array * int array
  126. | AvgPool2d of Owl_types_common.padding * int array * int array
  127. | AvgPool3d of Owl_types_common.padding * int array * int array
  128. | UpSampling2d of int array
  129. | Conv1dBackwardInput of int array
  130. | Conv1dBackwardKernel of int array
  131. | Conv2dBackwardInput of int array
  132. | Conv2dBackwardKernel of int array
  133. | Conv3dBackwardInput of int array
  134. | Conv3dBackwardKernel of int array
  135. | TransposeConv1dBackwardInput of int array
  136. | TransposeConv1dBackwardKernel of int array
  137. | TransposeConv2dBackwardInput of int array
  138. | TransposeConv2dBackwardKernel of int array
  139. | TransposeConv3dBackwardInput of int array
  140. | TransposeConv3dBackwardKernel of int array
  141. | DilatedConv1dBackwardInput of int array * int array
  142. | DilatedConv1dBackwardKernel of int array * int array
  143. | DilatedConv2dBackwardInput of int array * int array
  144. | DilatedConv2dBackwardKernel of int array * int array
  145. | DilatedConv3dBackwardInput of int array * int array
  146. | DilatedConv3dBackwardKernel of int array * int array
  147. | MaxPool1dBackward of Owl_types_common.padding * int array * int array
  148. | MaxPool2dBackward of Owl_types_common.padding * int array * int array
  149. | MaxPool3dBackward of Owl_types_common.padding * int array * int array
  150. | AvgPool1dBackward of Owl_types_common.padding * int array * int array
  151. | AvgPool2dBackward of Owl_types_common.padding * int array * int array
  152. | AvgPool3dBackward of Owl_types_common.padding * int array * int array
  153. | UpSampling2dBackward of int array
  154. | RowNum
  155. | ColNum
  156. | Row
  157. | Rows of int array
  158. | CopyRowTo
  159. | CopyColTo
  160. | Dot of bool * bool * elt * elt
  161. | Inv
  162. | Trace
  163. | Transpose of int array
  164. | ToRows
  165. | OfRows
  166. | Scalar_Add
  167. | Scalar_Sub
  168. | Scalar_Mul
  169. | Scalar_Div
  170. | Scalar_Pow
  171. | Scalar_Atan2
  172. | Scalar_Abs
  173. | Scalar_Neg
  174. | Scalar_Sqr
  175. | Scalar_Sqrt
  176. | Scalar_Exp
  177. | Scalar_Log
  178. | Scalar_Log2
  179. | Scalar_Log10
  180. | Scalar_Signum
  181. | Scalar_Floor
  182. | Scalar_Ceil
  183. | Scalar_Round
  184. | Scalar_Sin
  185. | Scalar_Cos
  186. | Scalar_Tan
  187. | Scalar_Sinh
  188. | Scalar_Cosh
  189. | Scalar_Tanh
  190. | Scalar_Asin
  191. | Scalar_Acos
  192. | Scalar_Atan
  193. | Scalar_Asinh
  194. | Scalar_Acosh
  195. | Scalar_Atanh
  196. | Scalar_Relu
  197. | Scalar_Dawsn
  198. | Scalar_Sigmoid
  199. | Fused_Adagrad of float * float
    (*

    TODO

    *)
\ No newline at end of file diff --git a/owl-base/Owl_neural_compiler/Make/argument-1-E/Graph/Optimiser/Operator/Symbol/Shape/index.html b/owl-base/Owl_neural_compiler/Make/argument-1-E/Graph/Optimiser/Operator/Symbol/Shape/index.html deleted file mode 100644 index c5e9abcef..000000000 --- a/owl-base/Owl_neural_compiler/Make/argument-1-E/Graph/Optimiser/Operator/Symbol/Shape/index.html +++ /dev/null @@ -1,5 +0,0 @@ - -Shape (owl-base.Owl_neural_compiler.Make.E.Graph.Optimiser.Operator.Symbol.Shape)

Module Symbol.Shape

Core functions
val infer_shape : - Type.op -> - Type.attr Owl_graph.node array -> - int array option array

TODO

\ No newline at end of file diff --git a/owl-base/Owl_neural_compiler/Make/argument-1-E/Graph/Optimiser/Operator/Symbol/index.html b/owl-base/Owl_neural_compiler/Make/argument-1-E/Graph/Optimiser/Operator/Symbol/index.html deleted file mode 100644 index 5a7202735..000000000 --- a/owl-base/Owl_neural_compiler/Make/argument-1-E/Graph/Optimiser/Operator/Symbol/index.html +++ /dev/null @@ -1,28 +0,0 @@ - -Symbol (owl-base.Owl_neural_compiler.Make.E.Graph.Optimiser.Operator.Symbol)

Module Operator.Symbol

Core functions
val op_to_str : Shape.Type.op -> string

TODO

val is_random_variable : Shape.Type.op -> bool

TODO

val refnum : 'a Owl_graph.node -> int

TODO

val node_shape : Shape.Type.attr Owl_graph.node -> int array

TODO

val node_numel : Shape.Type.attr Owl_graph.node -> int

TODO

val is_shape_unknown : Shape.Type.attr Owl_graph.node -> bool

TODO

val infer_shape_graph : Shape.Type.attr Owl_graph.node array -> unit

TODO

val shape_to_str : int array option array -> string

TODO

val node_to_str : Shape.Type.attr Owl_graph.node -> string

TODO

val node_to_arr : Shape.Type.t -> Shape.Type.arr

TODO

val arr_to_node : Shape.Type.arr -> Shape.Type.t

TODO

val node_to_elt : Shape.Type.t -> Shape.Type.elt

TODO

val elt_to_node : Shape.Type.elt -> Shape.Type.t

TODO

val make_node : - ?name:string -> - ?value:Shape.Type.Device.value array -> - ?shape:int array option array -> - ?freeze:bool -> - ?reuse:bool -> - ?state:Shape.Type.state -> - Shape.Type.op -> - Shape.Type.attr Owl_graph.node

TODO

val make_then_connect : - ?shape:int array option array -> - Shape.Type.op -> - Shape.Type.attr Owl_graph.node array -> - Shape.Type.attr Owl_graph.node

TODO

val var_arr : ?shape:int array -> string -> Shape.Type.arr

TODO

val var_elt : string -> Shape.Type.elt

TODO

val const_arr : string -> Shape.Type.Device.A.arr -> Shape.Type.arr

TODO

val const_elt : string -> Shape.Type.Device.A.elt -> Shape.Type.elt

TODO

val new_block_id : unit -> int

new_block_id () returns an unused block id.

val make_empty_block : ?block_id:int -> int -> Shape.Type.block

make_empty_block s returns an empty block of memory of size s.

val make_value_block : - Shape.Type.Device.value -> - Shape.Type.attr Owl_graph.node -> - unit

make_value_block value node creates a block of memory initialised with value and links the new block to node.

get_block node returns the memory block allocated to node. If no block is allocated, throws an exception.

val add_node_to_block : - Shape.Type.attr Owl_graph.node -> - Shape.Type.block -> - unit

Link a node to a reusable block and initialises its memory on the memory of the block.

val get_active_node : Shape.Type.block -> Shape.Type.attr Owl_graph.node option

Return the node that is currently using the memory of the block.

val set_active_node : - Shape.Type.block -> - Shape.Type.attr Owl_graph.node -> - unit

Update the node that is currently using the block of memory.

val get_block_id : Shape.Type.attr Owl_graph.node -> int

get_block_id node returns the id of the block assigned to node. If node has not been assigned yet, returns -1.

val set_value : - Shape.Type.attr Owl_graph.node -> - Shape.Type.Device.value array -> - unit

TODO

val set_operator : Shape.Type.attr Owl_graph.node -> Shape.Type.op -> unit

TODO

TODO

val set_reuse : Shape.Type.attr Owl_graph.node -> bool -> unit

TODO

val get_reuse : Shape.Type.attr Owl_graph.node -> bool

TODO

val is_shared : Shape.Type.attr Owl_graph.node -> bool

TODO

val get_shared_nodes : - Shape.Type.attr Owl_graph.node -> - Shape.Type.attr Owl_graph.node array

get_shared_nodes node returns the nodes sharing the same block of memory as node.

val is_var : Shape.Type.attr Owl_graph.node -> bool

TODO

val is_const : Shape.Type.attr Owl_graph.node -> bool

TODO

val is_node_arr : Shape.Type.attr Owl_graph.node -> bool

TODO

val is_node_elt : Shape.Type.attr Owl_graph.node -> bool

TODO

val is_assigned : Shape.Type.attr Owl_graph.node -> bool

is_assigned node checks if a block of memory has been assigned to node.

val check_assigned : Shape.Type.attr Owl_graph.node -> unit

check_assigned node throws an exception if node has not been assigned to a block.

val is_valid : Shape.Type.attr Owl_graph.node -> bool

TODO

val validate : Shape.Type.attr Owl_graph.node -> unit

TODO

val invalidate : Shape.Type.attr Owl_graph.node -> unit

TODO

val invalidate_graph : Shape.Type.attr Owl_graph.node -> unit

TODO

val is_freeze : Shape.Type.attr Owl_graph.node -> bool

TODO

val freeze : Shape.Type.attr Owl_graph.node -> unit

TODO

val freeze_descendants : Shape.Type.attr Owl_graph.node array -> unit

TODO

val freeze_ancestors : Shape.Type.attr Owl_graph.node array -> unit

TODO

TODO

TODO

val unsafe_assign_arr : Shape.Type.arr -> Shape.Type.Device.A.arr -> unit

TODO

val assign_arr : Shape.Type.arr -> Shape.Type.Device.A.arr -> unit

TODO

val assign_elt : Shape.Type.elt -> Shape.Type.Device.A.elt -> unit

TODO

val float_to_elt : float -> Shape.Type.elt

TODO

val elt_to_float : Shape.Type.elt -> float

TODO

\ No newline at end of file diff --git a/owl-base/Owl_neural_compiler/Make/argument-1-E/Graph/Optimiser/Operator/index.html b/owl-base/Owl_neural_compiler/Make/argument-1-E/Graph/Optimiser/Operator/index.html deleted file mode 100644 index f888ce65e..000000000 --- a/owl-base/Owl_neural_compiler/Make/argument-1-E/Graph/Optimiser/Operator/index.html +++ /dev/null @@ -1,420 +0,0 @@ - -Operator (owl-base.Owl_neural_compiler.Make.E.Graph.Optimiser.Operator)

Module Optimiser.Operator

Vectorised functions
val empty : int array -> Symbol.Shape.Type.arr

TODO

val zeros : int array -> Symbol.Shape.Type.arr

TODO

val ones : int array -> Symbol.Shape.Type.arr

TODO

val create : int array -> Symbol.Shape.Type.elt -> Symbol.Shape.Type.arr

TODO

val sequential : - ?a:Symbol.Shape.Type.elt -> - ?step:Symbol.Shape.Type.elt -> - int array -> - Symbol.Shape.Type.arr

TODO

val uniform : - ?a:Symbol.Shape.Type.elt -> - ?b:Symbol.Shape.Type.elt -> - int array -> - Symbol.Shape.Type.arr

TODO

val gaussian : - ?mu:Symbol.Shape.Type.elt -> - ?sigma:Symbol.Shape.Type.elt -> - int array -> - Symbol.Shape.Type.arr

TODO

val bernoulli : ?p:Symbol.Shape.Type.elt -> int array -> Symbol.Shape.Type.arr

TODO

val init : int array -> (int -> Symbol.Shape.Type.elt) -> Symbol.Shape.Type.arr

TODO

val init_nd : - int array -> - (int array -> Symbol.Shape.Type.elt) -> - Symbol.Shape.Type.arr

TODO

val shape : Symbol.Shape.Type.arr -> int array

TODO

val numel : Symbol.Shape.Type.arr -> int

TODO

TODO

val set : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.elt -> unit

TODO

val get_slice : int list list -> Symbol.Shape.Type.arr -> Symbol.Shape.Type.arr

TODO

val set_slice : - int list list -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - unit

TODO

TODO

val set_fancy : - Owl_types.index list -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - unit

TODO

val copy_ : out:'a -> 'b -> 'c

TODO

val reset : Symbol.Shape.Type.arr -> unit

TODO

val reshape : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.arr

TODO

val tile : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.arr

TODO

val repeat : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.arr

TODO

val pad : - ?v:Symbol.Shape.Type.elt -> - int list list -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val expand : ?hi:bool -> Symbol.Shape.Type.arr -> int -> Symbol.Shape.Type.arr

TODO

val squeeze : ?axis:int array -> Symbol.Shape.Type.arr -> Symbol.Shape.Type.arr

TODO

val concatenate : - ?axis:int -> - Symbol.Shape.Type.arr array -> - Symbol.Shape.Type.arr

TODO

val stack : ?axis:int -> Symbol.Shape.Type.arr array -> Symbol.Shape.Type.arr

TODO

TODO

val split : ?axis:int -> 'a -> 'b -> 'c

TODO

val draw : - ?axis:int -> - Symbol.Shape.Type.arr -> - int -> - Symbol.Shape.Type.arr * 'a array

TODO

TODO

delay f x returns f x. It allows to use a function that is not tracked by the computation graph and delay its evaluation. The output must have the same shape as the input.

delay_array out_shape f x works in the same way as delay but is applied on an array of ndarrays. Needs the shape of the output as an argument.

val lazy_print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(Symbol.Shape.Type.Device.A.elt -> string) -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

lazy_print x prints the output of x when it is evaluated. Is implemented as an identity node. For information about the optional parameters, refer to the print function of the Ndarray module.

val print : ?max_row:'a -> ?max_col:'b -> ?header:'c -> ?fmt:'d -> 'e -> unit

TODO

val min : - ?axis:int -> - ?keep_dims:bool -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val max : - ?axis:int -> - ?keep_dims:bool -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val sum : - ?axis:int -> - ?keep_dims:bool -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val sum_reduce : - ?axis:int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val log_sum_exp : - ?axis:int -> - ?keep_dims:bool -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

TODO

TODO

TODO

TODO

val elt_less_equal_scalar : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.elt -> - Symbol.Shape.Type.arr

TODO

val elt_greater_equal_scalar : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.elt -> - Symbol.Shape.Type.arr

TODO

val conv1d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr

TODO

val conv2d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr

TODO

val conv3d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr

TODO

val transpose_conv1d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr

TODO

val transpose_conv2d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr

TODO

val transpose_conv3d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr

TODO

val dilated_conv1d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val dilated_conv2d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val dilated_conv3d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val max_pool1d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val max_pool2d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val max_pool3d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val avg_pool1d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val avg_pool2d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val avg_pool3d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val upsampling2d : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.arr

TODO

val conv1d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val conv1d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val conv2d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val conv2d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val conv3d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val conv3d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val transpose_conv1d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val transpose_conv1d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val transpose_conv2d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val transpose_conv2d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val transpose_conv3d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val transpose_conv3d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val dilated_conv1d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val dilated_conv1d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val dilated_conv2d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val dilated_conv2d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val dilated_conv3d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val dilated_conv3d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val max_pool1d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val max_pool2d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val max_pool3d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val avg_pool1d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val avg_pool2d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val avg_pool3d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val upsampling2d_backward : - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val row_num : Symbol.Shape.Type.arr -> int

TODO

val col_num : Symbol.Shape.Type.arr -> int

TODO

val rows : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.arr

TODO

val copy_row_to : Symbol.Shape.Type.arr -> 'a -> 'b -> unit

TODO

val copy_col_to : Symbol.Shape.Type.arr -> 'a -> 'b -> unit

TODO

TODO

val transpose : - ?axis:int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val to_rows : Symbol.Shape.Type.arr -> 'a array

TODO

TODO

val to_cols : Symbol.Shape.Type.arr -> 'a array

TODO

TODO

val of_array : - Symbol.Shape.Type.elt array -> - int array -> - Symbol.Shape.Type.arr

TODO

val of_arrays : Symbol.Shape.Type.elt array array -> Symbol.Shape.Type.arr

TODO

val to_arrays : Symbol.Shape.Type.arr -> Symbol.Shape.Type.elt array array

TODO

Scalar functions
module Scalar : sig ... end
module Mat : sig ... end
module Linalg : sig ... end
\ No newline at end of file diff --git a/owl-base/Owl_neural_compiler/Make/argument-1-E/Graph/Optimiser/index.html b/owl-base/Owl_neural_compiler/Make/argument-1-E/Graph/Optimiser/index.html deleted file mode 100644 index 5bdb98931..000000000 --- a/owl-base/Owl_neural_compiler/Make/argument-1-E/Graph/Optimiser/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Optimiser (owl-base.Owl_neural_compiler.Make.E.Graph.Optimiser)

Module Graph.Optimiser

Core functions
val estimate_complexity : 'a Owl_graph.node array -> int * int

TODO

val optimise_nodes : - Operator.Symbol.Shape.Type.attr Owl_graph.node array -> - unit

TODO

\ No newline at end of file diff --git a/owl-base/Owl_neural_compiler/Make/argument-1-E/Graph/index.html b/owl-base/Owl_neural_compiler/Make/argument-1-E/Graph/index.html deleted file mode 100644 index dc162779f..000000000 --- a/owl-base/Owl_neural_compiler/Make/argument-1-E/Graph/index.html +++ /dev/null @@ -1,33 +0,0 @@ - -Graph (owl-base.Owl_neural_compiler.Make.E.Graph)

Module E.Graph

Type definition
type graph

TODO

Core functions
val shape_or_value : Optimiser.Operator.Symbol.Shape.Type.t -> string

TODO

val graph_to_dot : graph -> string

TODO

val graph_to_trace : graph -> string

TODO

val save_graph : 'a -> string -> unit

TODO

val load_graph : string -> 'a * 'b

TODO

val invalidate_rvs : graph -> unit

TODO

val is_iopair_safe : 'a Owl_graph.node -> 'a Owl_graph.node -> bool

TODO

val update_iopair : graph -> unit

TODO

val remove_unused_iopair : - 'a Owl_graph.node array -> - 'b array -> - 'a Owl_graph.node array * 'b array

TODO

val optimise : graph -> unit

TODO

\ No newline at end of file diff --git a/owl-base/Owl_neural_compiler/Make/argument-1-E/index.html b/owl-base/Owl_neural_compiler/Make/argument-1-E/index.html deleted file mode 100644 index 1e96082ba..000000000 --- a/owl-base/Owl_neural_compiler/Make/argument-1-E/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -E (owl-base.Owl_neural_compiler.Make.E)

Parameter Make.E

Core evaluation functions of the engine

TODO

TODO

val eval_graph : Graph.graph -> unit

TODO

\ No newline at end of file diff --git a/owl-base/Owl_neural_compiler/Make/index.html b/owl-base/Owl_neural_compiler/Make/index.html deleted file mode 100644 index ac643e930..000000000 --- a/owl-base/Owl_neural_compiler/Make/index.html +++ /dev/null @@ -1,49 +0,0 @@ - -Make (owl-base.Owl_neural_compiler.Make)

Module Owl_neural_compiler.Make

Parameters

Signature

module Engine : sig ... end
module Neural : sig ... end

Naive compilation functions, need to pass in loss function

Shallow compilation functions, includes only gradient

Deep compilation functions, includes gs, us, ps, ch, and new weights

val make_eval_fun : - 'a -> - Neural.Algodiff.t -> - Neural.Algodiff.t -> - Engine.Graph.graph -> - Neural.Algodiff.t -> - Neural.Algodiff.t -> - 'b
val make_update_fun : Engine.graph -> unit -> unit
val model_inputs : - ?optimise:bool -> - ?batch_size:int -> - Neural.Graph.network -> - Neural.Algodiff.t array -> - Neural.Algodiff.t array
val model : - ?optimise:bool -> - ?batch_size:int -> - Neural.Graph.network -> - Neural.Algodiff.t -> - Neural.Algodiff.t
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/.dummy b/owl-base/Owl_neural_generic/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Activation/index.html b/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Activation/index.html deleted file mode 100644 index ab9fea0a5..000000000 --- a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Activation/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Activation (owl-base.Owl_neural_generic.Flatten.Graph.Neuron.Activation)

Module Neuron.Activation

type typ =
  1. | Elu
  2. | Relu
  3. | Sigmoid
  4. | HardSigmoid
  5. | Softmax of int
  6. | Softplus
  7. | Softsign
  8. | Tanh
  9. | Relu6
  10. | LeakyRelu of float
  11. | TRelu of float
  12. | Custom of Optimise.Algodiff.t -> Optimise.Algodiff.t
  13. | None
    (*

    Types of activation functions.

    *)
type neuron_typ = {
  1. mutable activation : typ;
  2. mutable in_shape : int array;
  3. mutable out_shape : int array;
}

Neuron type definition.

val create : typ -> neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val run_activation : Optimise.Algodiff.t -> typ -> Optimise.Algodiff.t

Run one specific activation function.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val activation_to_string : typ -> string

Return the name of a specific activation function.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Add/index.html b/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Add/index.html deleted file mode 100644 index 70a01cc83..000000000 --- a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Add/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Add (owl-base.Owl_neural_generic.Flatten.Graph.Neuron.Add)

Module Neuron.Add

type neuron_typ = {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
}

Neuron type definition.

val create : unit -> neuron_typ

Create the neuron.

val connect : int array array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val copy : 'a -> neuron_typ

Make a deep copy of the neuron and its parameters.

val run : Optimise.Algodiff.t array -> 'a -> Optimise.Algodiff.t

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/AlphaDropout/index.html b/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/AlphaDropout/index.html deleted file mode 100644 index 6e89124ac..000000000 --- a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/AlphaDropout/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -AlphaDropout (owl-base.Owl_neural_generic.Flatten.Graph.Neuron.AlphaDropout)

Module Neuron.AlphaDropout

type neuron_typ = {
  1. mutable rate : float;
  2. mutable in_shape : int array;
  3. mutable out_shape : int array;
}

Neuron type definition.

val create : float -> neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Average/index.html b/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Average/index.html deleted file mode 100644 index b6fd4d746..000000000 --- a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Average/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Average (owl-base.Owl_neural_generic.Flatten.Graph.Neuron.Average)

Module Neuron.Average

type neuron_typ = {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
}

Neuron type definition.

val create : unit -> neuron_typ

Create the neuron.

val connect : int array array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val copy : 'a -> neuron_typ

Make a deep copy of the neuron and its parameters.

val run : Optimise.Algodiff.t array -> 'a -> Optimise.Algodiff.t

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/AvgPool1D/index.html b/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/AvgPool1D/index.html deleted file mode 100644 index 360cd3f81..000000000 --- a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/AvgPool1D/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -AvgPool1D (owl-base.Owl_neural_generic.Flatten.Graph.Neuron.AvgPool1D)

Module Neuron.AvgPool1D

type neuron_typ = {
  1. mutable padding : Owl_types.padding;
  2. mutable kernel : int array;
  3. mutable stride : int array;
  4. mutable in_shape : int array;
  5. mutable out_shape : int array;
}

Neuron type definition.

val create : Owl_types.padding -> int array -> int array -> neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/AvgPool2D/index.html b/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/AvgPool2D/index.html deleted file mode 100644 index f90afa5b2..000000000 --- a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/AvgPool2D/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -AvgPool2D (owl-base.Owl_neural_generic.Flatten.Graph.Neuron.AvgPool2D)

Module Neuron.AvgPool2D

type neuron_typ = {
  1. mutable padding : Owl_types.padding;
  2. mutable kernel : int array;
  3. mutable stride : int array;
  4. mutable in_shape : int array;
  5. mutable out_shape : int array;
}

Neuron type definition.

val create : Owl_types.padding -> int array -> int array -> neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Concatenate/index.html b/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Concatenate/index.html deleted file mode 100644 index bf919fef0..000000000 --- a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Concatenate/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Concatenate (owl-base.Owl_neural_generic.Flatten.Graph.Neuron.Concatenate)

Module Neuron.Concatenate

type neuron_typ = {
  1. mutable axis : int;
  2. mutable in_shape : int array;
  3. mutable out_shape : int array;
}

Neuron type definition.

val create : int -> neuron_typ

Create the neuron.

val connect : int array array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Conv1D/index.html b/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Conv1D/index.html deleted file mode 100644 index a9ecc4172..000000000 --- a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Conv1D/index.html +++ /dev/null @@ -1,8 +0,0 @@ - -Conv1D (owl-base.Owl_neural_generic.Flatten.Graph.Neuron.Conv1D)

Module Neuron.Conv1D

type neuron_typ = {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable b : Optimise.Algodiff.t;
  3. mutable kernel : int array;
  4. mutable stride : int array;
  5. mutable padding : Owl_types.padding;
  6. mutable init_typ : Init.typ;
  7. mutable in_shape : int array;
  8. mutable out_shape : int array;
}

Neuron type definition.

val create : - ?inputs:int array -> - Owl_types.padding -> - int array -> - int array -> - Init.typ -> - neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val init : neuron_typ -> unit

Initialise the neuron and its parameters.

val reset : neuron_typ -> unit

Reset the parameters in a neuron.

val mktag : int -> neuron_typ -> unit

Tag the neuron, used by Algodiff module.

val mkpar : neuron_typ -> Optimise.Algodiff.t array

Assemble all the parameters in an array, used by Optimise module.

val mkpri : neuron_typ -> Optimise.Algodiff.t array

Assemble all the primial values in an array, used by Optimise module.

val mkadj : neuron_typ -> Optimise.Algodiff.t array

Assemble all the adjacent values in an array, used by Optimise module.

val update : neuron_typ -> Optimise.Algodiff.t array -> unit

Update parameters in a neuron, used by Optimise module.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Conv2D/index.html b/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Conv2D/index.html deleted file mode 100644 index de04f6d0f..000000000 --- a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Conv2D/index.html +++ /dev/null @@ -1,8 +0,0 @@ - -Conv2D (owl-base.Owl_neural_generic.Flatten.Graph.Neuron.Conv2D)

Module Neuron.Conv2D

type neuron_typ = {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable b : Optimise.Algodiff.t;
  3. mutable kernel : int array;
  4. mutable stride : int array;
  5. mutable padding : Owl_types.padding;
  6. mutable init_typ : Init.typ;
  7. mutable in_shape : int array;
  8. mutable out_shape : int array;
}

Neuron type definition.

val create : - ?inputs:int array -> - Owl_types.padding -> - int array -> - int array -> - Init.typ -> - neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val init : neuron_typ -> unit

Initialise the neuron and its parameters.

val reset : neuron_typ -> unit

Reset the parameters in a neuron.

val mktag : int -> neuron_typ -> unit

Tag the neuron, used by Algodiff module.

val mkpar : neuron_typ -> Optimise.Algodiff.t array

Assemble all the parameters in an array, used by Optimise module.

val mkpri : neuron_typ -> Optimise.Algodiff.t array

Assemble all the primial values in an array, used by Optimise module.

val mkadj : neuron_typ -> Optimise.Algodiff.t array

Assemble all the adjacent values in an array, used by Optimise module.

val update : neuron_typ -> Optimise.Algodiff.t array -> unit

Update parameters in a neuron, used by Optimise module.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Conv3D/index.html b/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Conv3D/index.html deleted file mode 100644 index 8d0ff7abe..000000000 --- a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Conv3D/index.html +++ /dev/null @@ -1,8 +0,0 @@ - -Conv3D (owl-base.Owl_neural_generic.Flatten.Graph.Neuron.Conv3D)

Module Neuron.Conv3D

type neuron_typ = {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable b : Optimise.Algodiff.t;
  3. mutable kernel : int array;
  4. mutable stride : int array;
  5. mutable padding : Owl_types.padding;
  6. mutable init_typ : Init.typ;
  7. mutable in_shape : int array;
  8. mutable out_shape : int array;
}

Neuron type definition.

val create : - ?inputs:int array -> - Owl_types.padding -> - int array -> - int array -> - Init.typ -> - neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val init : neuron_typ -> unit

Initialise the neuron and its parameters.

val reset : neuron_typ -> unit

Reset the parameters in a neuron.

val mktag : int -> neuron_typ -> unit

Tag the neuron, used by Algodiff module.

val mkpar : neuron_typ -> Optimise.Algodiff.t array

Assemble all the parameters in an array, used by Optimise module.

val mkpri : neuron_typ -> Optimise.Algodiff.t array

Assemble all the primial values in an array, used by Optimise module.

val mkadj : neuron_typ -> Optimise.Algodiff.t array

Assemble all the adjacent values in an array, used by Optimise module.

val update : neuron_typ -> Optimise.Algodiff.t array -> unit

Update parameters in a neuron, used by Optimise module.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/DilatedConv1D/index.html b/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/DilatedConv1D/index.html deleted file mode 100644 index c8b87621a..000000000 --- a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/DilatedConv1D/index.html +++ /dev/null @@ -1,9 +0,0 @@ - -DilatedConv1D (owl-base.Owl_neural_generic.Flatten.Graph.Neuron.DilatedConv1D)

Module Neuron.DilatedConv1D

type neuron_typ = {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable b : Optimise.Algodiff.t;
  3. mutable kernel : int array;
  4. mutable stride : int array;
  5. mutable rate : int array;
  6. mutable padding : Owl_types.padding;
  7. mutable init_typ : Init.typ;
  8. mutable in_shape : int array;
  9. mutable out_shape : int array;
}

Neuron type definition.

val create : - ?inputs:int array -> - Owl_types.padding -> - int array -> - int array -> - int array -> - Init.typ -> - neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val init : neuron_typ -> unit

Initialise the neuron and its parameters.

val reset : neuron_typ -> unit

Reset the parameters in a neuron.

val mktag : int -> neuron_typ -> unit

Tag the neuron, used by Algodiff module.

val mkpar : neuron_typ -> Optimise.Algodiff.t array

Assemble all the parameters in an array, used by Optimise module.

val mkpri : neuron_typ -> Optimise.Algodiff.t array

Assemble all the primial values in an array, used by Optimise module.

val mkadj : neuron_typ -> Optimise.Algodiff.t array

Assemble all the adjacent values in an array, used by Optimise module.

val update : neuron_typ -> Optimise.Algodiff.t array -> unit

Update parameters in a neuron, used by Optimise module.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/DilatedConv2D/index.html b/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/DilatedConv2D/index.html deleted file mode 100644 index 9e2c209a8..000000000 --- a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/DilatedConv2D/index.html +++ /dev/null @@ -1,9 +0,0 @@ - -DilatedConv2D (owl-base.Owl_neural_generic.Flatten.Graph.Neuron.DilatedConv2D)

Module Neuron.DilatedConv2D

type neuron_typ = {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable b : Optimise.Algodiff.t;
  3. mutable kernel : int array;
  4. mutable stride : int array;
  5. mutable rate : int array;
  6. mutable padding : Owl_types.padding;
  7. mutable init_typ : Init.typ;
  8. mutable in_shape : int array;
  9. mutable out_shape : int array;
}

Neuron type definition.

val create : - ?inputs:int array -> - Owl_types.padding -> - int array -> - int array -> - int array -> - Init.typ -> - neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val init : neuron_typ -> unit

Initialise the neuron and its parameters.

val reset : neuron_typ -> unit

Reset the parameters in a neuron.

val mktag : int -> neuron_typ -> unit

Tag the neuron, used by Algodiff module.

val mkpar : neuron_typ -> Optimise.Algodiff.t array

Assemble all the parameters in an array, used by Optimise module.

val mkpri : neuron_typ -> Optimise.Algodiff.t array

Assemble all the primial values in an array, used by Optimise module.

val mkadj : neuron_typ -> Optimise.Algodiff.t array

Assemble all the adjacent values in an array, used by Optimise module.

val update : neuron_typ -> Optimise.Algodiff.t array -> unit

Update parameters in a neuron, used by Optimise module.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/DilatedConv3D/index.html b/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/DilatedConv3D/index.html deleted file mode 100644 index affcc5aff..000000000 --- a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/DilatedConv3D/index.html +++ /dev/null @@ -1,9 +0,0 @@ - -DilatedConv3D (owl-base.Owl_neural_generic.Flatten.Graph.Neuron.DilatedConv3D)

Module Neuron.DilatedConv3D

type neuron_typ = {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable b : Optimise.Algodiff.t;
  3. mutable kernel : int array;
  4. mutable stride : int array;
  5. mutable rate : int array;
  6. mutable padding : Owl_types.padding;
  7. mutable init_typ : Init.typ;
  8. mutable in_shape : int array;
  9. mutable out_shape : int array;
}

Neuron type definition.

val create : - ?inputs:int array -> - Owl_types.padding -> - int array -> - int array -> - int array -> - Init.typ -> - neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val init : neuron_typ -> unit

Initialise the neuron and its parameters.

val reset : neuron_typ -> unit

Reset the parameters in a neuron.

val mktag : int -> neuron_typ -> unit

Tag the neuron, used by Algodiff module.

val mkpar : neuron_typ -> Optimise.Algodiff.t array

Assemble all the parameters in an array, used by Optimise module.

val mkpri : neuron_typ -> Optimise.Algodiff.t array

Assemble all the primial values in an array, used by Optimise module.

val mkadj : neuron_typ -> Optimise.Algodiff.t array

Assemble all the adjacent values in an array, used by Optimise module.

val update : neuron_typ -> Optimise.Algodiff.t array -> unit

Update parameters in a neuron, used by Optimise module.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Dot/index.html b/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Dot/index.html deleted file mode 100644 index 7caeed222..000000000 --- a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Dot/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Dot (owl-base.Owl_neural_generic.Flatten.Graph.Neuron.Dot)

Module Neuron.Dot

type neuron_typ = {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
}

Neuron type definition.

val create : unit -> neuron_typ

Create the neuron.

val connect : int array array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val copy : 'a -> neuron_typ

Make a deep copy of the neuron and its parameters.

val run : Optimise.Algodiff.t array -> 'a -> Optimise.Algodiff.t

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Dropout/index.html b/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Dropout/index.html deleted file mode 100644 index b96d9a6d8..000000000 --- a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Dropout/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Dropout (owl-base.Owl_neural_generic.Flatten.Graph.Neuron.Dropout)

Module Neuron.Dropout

type neuron_typ = {
  1. mutable rate : float;
  2. mutable in_shape : int array;
  3. mutable out_shape : int array;
}

Neuron type definition.

val create : float -> neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Embedding/index.html b/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Embedding/index.html deleted file mode 100644 index 834a24cdc..000000000 --- a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Embedding/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Embedding (owl-base.Owl_neural_generic.Flatten.Graph.Neuron.Embedding)

Module Neuron.Embedding

type neuron_typ = {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable init_typ : Init.typ;
  3. mutable in_dim : int;
  4. mutable in_shape : int array;
  5. mutable out_shape : int array;
}

Neuron type definition.

val create : ?inputs:int -> int -> int -> Init.typ -> neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val init : neuron_typ -> unit

Initialise the neuron and its parameters.

val reset : neuron_typ -> unit

Reset the parameters in a neuron.

val mktag : int -> neuron_typ -> unit

Tag the neuron, used by Algodiff module.

val mkpar : neuron_typ -> Optimise.Algodiff.t array

Assemble all the parameters in an array, used by Optimise module.

val mkpri : neuron_typ -> Optimise.Algodiff.t array

Assemble all the primial values in an array, used by Optimise module.

val mkadj : neuron_typ -> Optimise.Algodiff.t array

Assemble all the adjacent values in an array, used by Optimise module.

val update : neuron_typ -> Optimise.Algodiff.t array -> unit

Update parameters in a neuron, used by Optimise module.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Flatten/index.html b/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Flatten/index.html deleted file mode 100644 index 32f68b386..000000000 --- a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Flatten/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Flatten (owl-base.Owl_neural_generic.Flatten.Graph.Neuron.Flatten)

Module Neuron.Flatten

type neuron_typ = {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
}

Neuron type definition.

val create : unit -> neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val copy : 'a -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/FullyConnected/index.html b/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/FullyConnected/index.html deleted file mode 100644 index 2ff2abdf4..000000000 --- a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/FullyConnected/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -FullyConnected (owl-base.Owl_neural_generic.Flatten.Graph.Neuron.FullyConnected)

Module Neuron.FullyConnected

type neuron_typ = {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable b : Optimise.Algodiff.t;
  3. mutable init_typ : Init.typ;
  4. mutable in_shape : int array;
  5. mutable out_shape : int array;
}

Neuron type definition.

val create : ?inputs:int -> int -> Init.typ -> neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val init : neuron_typ -> unit

Initialise the neuron and its parameters.

val reset : neuron_typ -> unit

Reset the parameters in a neuron.

val mktag : int -> neuron_typ -> unit

Tag the neuron, used by Algodiff module.

val mkpar : neuron_typ -> Optimise.Algodiff.t array

Assemble all the parameters in an array, used by Optimise module.

val mkpri : neuron_typ -> Optimise.Algodiff.t array

Assemble all the primial values in an array, used by Optimise module.

val mkadj : neuron_typ -> Optimise.Algodiff.t array

Assemble all the adjacent values in an array, used by Optimise module.

val update : neuron_typ -> Optimise.Algodiff.t array -> unit

Update parameters in a neuron, used by Optimise module.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/GRU/index.html b/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/GRU/index.html deleted file mode 100644 index ada5f46d4..000000000 --- a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/GRU/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -GRU (owl-base.Owl_neural_generic.Flatten.Graph.Neuron.GRU)

Module Neuron.GRU

type neuron_typ = {
  1. mutable wxz : Optimise.Algodiff.t;
  2. mutable whz : Optimise.Algodiff.t;
  3. mutable wxr : Optimise.Algodiff.t;
  4. mutable whr : Optimise.Algodiff.t;
  5. mutable wxh : Optimise.Algodiff.t;
  6. mutable whh : Optimise.Algodiff.t;
  7. mutable bz : Optimise.Algodiff.t;
  8. mutable br : Optimise.Algodiff.t;
  9. mutable bh : Optimise.Algodiff.t;
  10. mutable h : Optimise.Algodiff.t;
  11. mutable init_typ : Init.typ;
  12. mutable in_shape : int array;
  13. mutable out_shape : int array;
}

Neuron type definition.

val create : ?time_steps:int -> ?inputs:int -> int -> Init.typ -> neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val init : neuron_typ -> unit

Initialise the neuron and its parameters.

val reset : neuron_typ -> unit

Reset the parameters in a neuron.

val mktag : int -> neuron_typ -> unit

Tag the neuron, used by Algodiff module.

val mkpar : neuron_typ -> Optimise.Algodiff.t array

Assemble all the parameters in an array, used by Optimise module.

val mkpri : neuron_typ -> Optimise.Algodiff.t array

Assemble all the primial values in an array, used by Optimise module.

val mkadj : neuron_typ -> Optimise.Algodiff.t array

Assemble all the adjacent values in an array, used by Optimise module.

val update : neuron_typ -> Optimise.Algodiff.t array -> unit

Update parameters in a neuron, used by Optimise module.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/GaussianDropout/index.html b/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/GaussianDropout/index.html deleted file mode 100644 index cbb75de06..000000000 --- a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/GaussianDropout/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -GaussianDropout (owl-base.Owl_neural_generic.Flatten.Graph.Neuron.GaussianDropout)

Module Neuron.GaussianDropout

type neuron_typ = {
  1. mutable rate : float;
  2. mutable in_shape : int array;
  3. mutable out_shape : int array;
}

Neuron type definition.

val create : float -> neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/GaussianNoise/index.html b/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/GaussianNoise/index.html deleted file mode 100644 index f5c99b1cd..000000000 --- a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/GaussianNoise/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -GaussianNoise (owl-base.Owl_neural_generic.Flatten.Graph.Neuron.GaussianNoise)

Module Neuron.GaussianNoise

type neuron_typ = {
  1. mutable sigma : float;
  2. mutable in_shape : int array;
  3. mutable out_shape : int array;
}

Neuron type definition.

val create : float -> neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/GlobalAvgPool1D/index.html b/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/GlobalAvgPool1D/index.html deleted file mode 100644 index 915c14215..000000000 --- a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/GlobalAvgPool1D/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -GlobalAvgPool1D (owl-base.Owl_neural_generic.Flatten.Graph.Neuron.GlobalAvgPool1D)

Module Neuron.GlobalAvgPool1D

type neuron_typ = {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
}

Neuron type definition.

val create : unit -> neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val copy : 'a -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/GlobalAvgPool2D/index.html b/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/GlobalAvgPool2D/index.html deleted file mode 100644 index e266b5573..000000000 --- a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/GlobalAvgPool2D/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -GlobalAvgPool2D (owl-base.Owl_neural_generic.Flatten.Graph.Neuron.GlobalAvgPool2D)

Module Neuron.GlobalAvgPool2D

type neuron_typ = {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
}

Neuron type definition.

val create : unit -> neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val copy : 'a -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/GlobalMaxPool1D/index.html b/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/GlobalMaxPool1D/index.html deleted file mode 100644 index 83b189ccc..000000000 --- a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/GlobalMaxPool1D/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -GlobalMaxPool1D (owl-base.Owl_neural_generic.Flatten.Graph.Neuron.GlobalMaxPool1D)

Module Neuron.GlobalMaxPool1D

type neuron_typ = {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
}

Neuron type definition.

val create : unit -> neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val copy : 'a -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/GlobalMaxPool2D/index.html b/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/GlobalMaxPool2D/index.html deleted file mode 100644 index 0491c6e71..000000000 --- a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/GlobalMaxPool2D/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -GlobalMaxPool2D (owl-base.Owl_neural_generic.Flatten.Graph.Neuron.GlobalMaxPool2D)

Module Neuron.GlobalMaxPool2D

type neuron_typ = {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
}

Neuron type definition.

val create : unit -> neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val copy : 'a -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Init/index.html b/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Init/index.html deleted file mode 100644 index 673ad6758..000000000 --- a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Init/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Init (owl-base.Owl_neural_generic.Flatten.Graph.Neuron.Init)

Module Neuron.Init

type typ =
  1. | Uniform of float * float
  2. | Gaussian of float * float
  3. | Standard
  4. | Tanh
  5. | GlorotNormal
  6. | GlorotUniform
  7. | LecunNormal
  8. | HeNormal
  9. | Custom of int array -> Optimise.Algodiff.t
    (*

    Initialisation types

    *)
val calc_fans : int array -> float * float

Calculate fan-in and fan-out of weights.

val run : typ -> int array -> Optimise.Algodiff.t -> Optimise.Algodiff.t

Execute the computation in this neuron.

val to_string : typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Input/index.html b/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Input/index.html deleted file mode 100644 index ee73cae97..000000000 --- a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Input/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Input (owl-base.Owl_neural_generic.Flatten.Graph.Neuron.Input)

Module Neuron.Input

type neuron_typ = {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
}

Neuron type definition.

val create : int array -> neuron_typ

Create the neuron.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/LSTM/index.html b/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/LSTM/index.html deleted file mode 100644 index c4a148723..000000000 --- a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/LSTM/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -LSTM (owl-base.Owl_neural_generic.Flatten.Graph.Neuron.LSTM)

Module Neuron.LSTM

type neuron_typ = {
  1. mutable wxi : Optimise.Algodiff.t;
  2. mutable whi : Optimise.Algodiff.t;
  3. mutable wxc : Optimise.Algodiff.t;
  4. mutable whc : Optimise.Algodiff.t;
  5. mutable wxf : Optimise.Algodiff.t;
  6. mutable whf : Optimise.Algodiff.t;
  7. mutable wxo : Optimise.Algodiff.t;
  8. mutable who : Optimise.Algodiff.t;
  9. mutable bi : Optimise.Algodiff.t;
  10. mutable bc : Optimise.Algodiff.t;
  11. mutable bf : Optimise.Algodiff.t;
  12. mutable bo : Optimise.Algodiff.t;
  13. mutable c : Optimise.Algodiff.t;
  14. mutable h : Optimise.Algodiff.t;
  15. mutable init_typ : Init.typ;
  16. mutable in_shape : int array;
  17. mutable out_shape : int array;
}

Neuron type definition.

val create : ?time_steps:int -> ?inputs:int -> int -> Init.typ -> neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val init : neuron_typ -> unit

Initialise the neuron and its parameters.

val reset : neuron_typ -> unit

Reset the parameters in a neuron.

val mktag : int -> neuron_typ -> unit

Tag the neuron, used by Algodiff module.

val mkpar : neuron_typ -> Optimise.Algodiff.t array

Assemble all the parameters in an array, used by Optimise module.

val mkpri : neuron_typ -> Optimise.Algodiff.t array

Assemble all the primial values in an array, used by Optimise module.

val mkadj : neuron_typ -> Optimise.Algodiff.t array

Assemble all the adjacent values in an array, used by Optimise module.

val update : neuron_typ -> Optimise.Algodiff.t array -> unit

Update parameters in a neuron, used by Optimise module.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Lambda/index.html b/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Lambda/index.html deleted file mode 100644 index d4d6da9cc..000000000 --- a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Lambda/index.html +++ /dev/null @@ -1,5 +0,0 @@ - -Lambda (owl-base.Owl_neural_generic.Flatten.Graph.Neuron.Lambda)

Module Neuron.Lambda

type neuron_typ = {
  1. mutable lambda : Optimise.Algodiff.t -> Optimise.Algodiff.t;
  2. mutable in_shape : int array;
  3. mutable out_shape : int array;
}

Neuron type definition.

val create : - ?out_shape:int array -> - (Optimise.Algodiff.t -> Optimise.Algodiff.t) -> - neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/LambdaArray/index.html b/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/LambdaArray/index.html deleted file mode 100644 index 692db12cf..000000000 --- a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/LambdaArray/index.html +++ /dev/null @@ -1,5 +0,0 @@ - -LambdaArray (owl-base.Owl_neural_generic.Flatten.Graph.Neuron.LambdaArray)

Module Neuron.LambdaArray

type neuron_typ = {
  1. mutable lambda : Optimise.Algodiff.t array -> Optimise.Algodiff.t;
  2. mutable in_shape : int array;
  3. mutable out_shape : int array;
}

Neuron type definition.

val create : - int array -> - (Optimise.Algodiff.t array -> Optimise.Algodiff.t) -> - neuron_typ

Create the neuron.

val connect : int array array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Linear/index.html b/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Linear/index.html deleted file mode 100644 index dd343b720..000000000 --- a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Linear/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Linear (owl-base.Owl_neural_generic.Flatten.Graph.Neuron.Linear)

Module Neuron.Linear

type neuron_typ = {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable b : Optimise.Algodiff.t;
  3. mutable init_typ : Init.typ;
  4. mutable in_shape : int array;
  5. mutable out_shape : int array;
}

Neuron type definition.

val create : ?inputs:int -> int -> Init.typ -> neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val init : neuron_typ -> unit

Initialise the neuron and its parameters.

val reset : neuron_typ -> unit

Reset the parameters in a neuron.

val mktag : int -> neuron_typ -> unit

Tag the neuron, used by Algodiff module.

val mkpar : neuron_typ -> Optimise.Algodiff.t array

Assemble all the parameters in an array, used by Optimise module.

val mkpri : neuron_typ -> Optimise.Algodiff.t array

Assemble all the primial values in an array, used by Optimise module.

val mkadj : neuron_typ -> Optimise.Algodiff.t array

Assemble all the adjacent values in an array, used by Optimise module.

val update : neuron_typ -> Optimise.Algodiff.t array -> unit

Update parameters in a neuron, used by Optimise module.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/LinearNoBias/index.html b/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/LinearNoBias/index.html deleted file mode 100644 index c39eeec49..000000000 --- a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/LinearNoBias/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -LinearNoBias (owl-base.Owl_neural_generic.Flatten.Graph.Neuron.LinearNoBias)

Module Neuron.LinearNoBias

type neuron_typ = {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable init_typ : Init.typ;
  3. mutable in_shape : int array;
  4. mutable out_shape : int array;
}

Neuron type definition.

val create : ?inputs:int -> int -> Init.typ -> neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val init : neuron_typ -> unit

Initialise the neuron and its parameters.

val reset : neuron_typ -> unit

Reset the parameters in a neuron.

val mktag : int -> neuron_typ -> unit

Tag the neuron, used by Algodiff module.

val mkpar : neuron_typ -> Optimise.Algodiff.t array

Assemble all the parameters in an array, used by Optimise module.

val mkpri : neuron_typ -> Optimise.Algodiff.t array

Assemble all the primial values in an array, used by Optimise module.

val mkadj : neuron_typ -> Optimise.Algodiff.t array

Assemble all the adjacent values in an array, used by Optimise module.

val update : neuron_typ -> Optimise.Algodiff.t array -> unit

Update parameters in a neuron, used by Optimise module.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Masking/index.html b/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Masking/index.html deleted file mode 100644 index b795e816d..000000000 --- a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Masking/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Masking (owl-base.Owl_neural_generic.Flatten.Graph.Neuron.Masking)

Module Neuron.Masking

\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Max/index.html b/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Max/index.html deleted file mode 100644 index 88d2404ac..000000000 --- a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Max/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Max (owl-base.Owl_neural_generic.Flatten.Graph.Neuron.Max)

Module Neuron.Max

type neuron_typ = {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
}

Neuron type definition.

val create : unit -> neuron_typ

Create the neuron.

val connect : int array array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val copy : 'a -> neuron_typ

Make a deep copy of the neuron and its parameters.

val run : Optimise.Algodiff.t array -> 'a -> Optimise.Algodiff.t

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/MaxPool1D/index.html b/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/MaxPool1D/index.html deleted file mode 100644 index 1d09ef18b..000000000 --- a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/MaxPool1D/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -MaxPool1D (owl-base.Owl_neural_generic.Flatten.Graph.Neuron.MaxPool1D)

Module Neuron.MaxPool1D

type neuron_typ = {
  1. mutable padding : Owl_types.padding;
  2. mutable kernel : int array;
  3. mutable stride : int array;
  4. mutable in_shape : int array;
  5. mutable out_shape : int array;
}

Neuron type definition.

val create : Owl_types.padding -> int array -> int array -> neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/MaxPool2D/index.html b/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/MaxPool2D/index.html deleted file mode 100644 index 1ba461429..000000000 --- a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/MaxPool2D/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -MaxPool2D (owl-base.Owl_neural_generic.Flatten.Graph.Neuron.MaxPool2D)

Module Neuron.MaxPool2D

type neuron_typ = {
  1. mutable padding : Owl_types.padding;
  2. mutable kernel : int array;
  3. mutable stride : int array;
  4. mutable in_shape : int array;
  5. mutable out_shape : int array;
}

Neuron type definition.

val create : Owl_types.padding -> int array -> int array -> neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Mul/index.html b/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Mul/index.html deleted file mode 100644 index c223ac0e6..000000000 --- a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Mul/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mul (owl-base.Owl_neural_generic.Flatten.Graph.Neuron.Mul)

Module Neuron.Mul

type neuron_typ = {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
}

Neuron type definition.

val create : unit -> neuron_typ

Create the neuron.

val connect : int array array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val copy : 'a -> neuron_typ

Make a deep copy of the neuron and its parameters.

val run : Optimise.Algodiff.t array -> 'a -> Optimise.Algodiff.t

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Normalisation/index.html b/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Normalisation/index.html deleted file mode 100644 index bf2be7454..000000000 --- a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Normalisation/index.html +++ /dev/null @@ -1,8 +0,0 @@ - -Normalisation (owl-base.Owl_neural_generic.Flatten.Graph.Neuron.Normalisation)

Module Neuron.Normalisation

type neuron_typ = {
  1. mutable axis : int;
  2. mutable beta : Optimise.Algodiff.t;
  3. mutable gamma : Optimise.Algodiff.t;
  4. mutable mu : Optimise.Algodiff.t;
  5. mutable var : Optimise.Algodiff.t;
  6. mutable decay : Optimise.Algodiff.t;
  7. mutable training : bool;
  8. mutable in_shape : int array;
  9. mutable out_shape : int array;
}

Neuron type definition.

val create : - ?training:bool -> - ?decay:float -> - ?mu:Optimise.Algodiff.A.arr -> - ?var:Optimise.Algodiff.A.arr -> - int -> - neuron_typ

Create the neuron. Note that axis 0 is the batch axis.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val init : neuron_typ -> unit

Initialise the neuron and its parameters.

val reset : neuron_typ -> unit

Reset the parameters in a neuron.

val mktag : int -> neuron_typ -> unit

Tag the neuron, used by Algodiff module.

val mkpar : neuron_typ -> Optimise.Algodiff.t array

Assemble all the trainable parameters in an array, used by Optimise module.

val mkpri : neuron_typ -> Optimise.Algodiff.t array

Assemble all the primial values in an array, used by Optimise module.

val mkadj : neuron_typ -> Optimise.Algodiff.t array

Assemble all the adjacent values in an array, used by Optimise module.

val update : neuron_typ -> Optimise.Algodiff.t array -> unit

Update trainable parameters of the neuron, used by Optimise module.

val load_weights : neuron_typ -> Optimise.Algodiff.t array -> unit

Load both trainable and non-trainable parameters into the neuron.

val save_weights : neuron_typ -> Optimise.Algodiff.t array

Assemble both trainable and non-trainable parameters of the neuron.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Optimise/Algodiff/A/Linalg/index.html b/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Optimise/Algodiff/A/Linalg/index.html deleted file mode 100644 index 04059e77a..000000000 --- a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Optimise/Algodiff/A/Linalg/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -Linalg (owl-base.Owl_neural_generic.Flatten.Graph.Neuron.Optimise.Algodiff.A.Linalg)

Module A.Linalg

val inv : arr -> arr
val logdet : arr -> elt
val chol : ?upper:bool -> arr -> arr
val svd : ?thin:bool -> arr -> arr * arr * arr
val qr : arr -> arr * arr
val lq : arr -> arr * arr
val sylvester : arr -> arr -> arr -> arr
val lyapunov : arr -> arr -> arr
val discrete_lyapunov : - ?solver:[ `default | `bilinear | `direct ] -> - arr -> - arr -> - arr
val linsolve : ?trans:bool -> ?typ:[ `n | `u | `l ] -> arr -> arr -> arr
val care : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
val dare : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Optimise/Algodiff/A/Mat/index.html b/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Optimise/Algodiff/A/Mat/index.html deleted file mode 100644 index 09ada62d9..000000000 --- a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Optimise/Algodiff/A/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl-base.Owl_neural_generic.Flatten.Graph.Neuron.Optimise.Algodiff.A.Mat)

Module A.Mat

val diagm : ?k:int -> arr -> arr
val triu : ?k:int -> arr -> arr
val tril : ?k:int -> arr -> arr
val eye : int -> arr
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Optimise/Algodiff/A/Scalar/index.html b/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Optimise/Algodiff/A/Scalar/index.html deleted file mode 100644 index e323bb243..000000000 --- a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Optimise/Algodiff/A/Scalar/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Scalar (owl-base.Owl_neural_generic.Flatten.Graph.Neuron.Optimise.Algodiff.A.Scalar)

Module A.Scalar

val add : elt -> elt -> elt
val sub : elt -> elt -> elt
val mul : elt -> elt -> elt
val div : elt -> elt -> elt
val pow : elt -> elt -> elt
val atan2 : elt -> elt -> elt
val abs : elt -> elt
val neg : elt -> elt
val sqr : elt -> elt
val sqrt : elt -> elt
val exp : elt -> elt
val log : elt -> elt
val log2 : elt -> elt
val log10 : elt -> elt
val signum : elt -> elt
val floor : elt -> elt
val ceil : elt -> elt
val round : elt -> elt
val sin : elt -> elt
val cos : elt -> elt
val tan : elt -> elt
val sinh : elt -> elt
val cosh : elt -> elt
val tanh : elt -> elt
val asin : elt -> elt
val acos : elt -> elt
val atan : elt -> elt
val asinh : elt -> elt
val acosh : elt -> elt
val atanh : elt -> elt
val relu : elt -> elt
val dawsn : elt -> elt
val sigmoid : elt -> elt
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Optimise/Algodiff/A/index.html b/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Optimise/Algodiff/A/index.html deleted file mode 100644 index 08e4fd141..000000000 --- a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Optimise/Algodiff/A/index.html +++ /dev/null @@ -1,158 +0,0 @@ - -A (owl-base.Owl_neural_generic.Flatten.Graph.Neuron.Optimise.Algodiff.A)

Module Algodiff.A

include Owl_types_ndarray_eltcmp.Sig
include Owl_types_ndarray_basic.Sig
type arr
type elt
val empty : int array -> arr
val zeros : int array -> arr
val ones : int array -> arr
val create : int array -> elt -> arr
val sequential : ?a:elt -> ?step:elt -> int array -> arr
val uniform : ?a:elt -> ?b:elt -> int array -> arr
val gaussian : ?mu:elt -> ?sigma:elt -> int array -> arr
val bernoulli : ?p:elt -> int array -> arr
val init : int array -> (int -> elt) -> arr
val init_nd : int array -> (int array -> elt) -> arr
val shape : arr -> int array
val numel : arr -> int
val get : arr -> int array -> elt
val set : arr -> int array -> elt -> unit
val get_slice : int list list -> arr -> arr
val set_slice : int list list -> arr -> arr -> unit
val get_fancy : Owl_types_common.index list -> arr -> arr
val set_fancy : Owl_types_common.index list -> arr -> arr -> unit
val copy : arr -> arr
val copy_ : out:arr -> arr -> unit
val reset : arr -> unit
val reshape : arr -> int array -> arr
val reverse : arr -> arr
val tile : arr -> int array -> arr
val repeat : arr -> int array -> arr
val concatenate : ?axis:int -> arr array -> arr
val stack : ?axis:int -> arr array -> arr
val split : ?axis:int -> int array -> arr -> arr array
val expand : ?hi:bool -> arr -> int -> arr
val squeeze : ?axis:int array -> arr -> arr
val draw : ?axis:int -> arr -> int -> arr * int array
val map : (elt -> elt) -> arr -> arr
val fold : ?axis:int -> (elt -> elt -> elt) -> elt -> arr -> arr
val scan : ?axis:int -> (elt -> elt -> elt) -> arr -> arr
val one_hot : int -> arr -> arr
val pad : ?v:elt -> int list list -> arr -> arr
val print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(elt -> string) -> - arr -> - unit
val abs : arr -> arr
val neg : arr -> arr
val floor : arr -> arr
val ceil : arr -> arr
val round : arr -> arr
val sqr : arr -> arr
val sqrt : arr -> arr
val log : arr -> arr
val log2 : arr -> arr
val log10 : arr -> arr
val exp : arr -> arr
val sin : arr -> arr
val cos : arr -> arr
val tan : arr -> arr
val sinh : arr -> arr
val cosh : arr -> arr
val tanh : arr -> arr
val asin : arr -> arr
val acos : arr -> arr
val atan : arr -> arr
val asinh : arr -> arr
val acosh : arr -> arr
val atanh : arr -> arr
val min : ?axis:int -> ?keep_dims:bool -> arr -> arr
val max : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum_reduce : ?axis:int array -> arr -> arr
val signum : arr -> arr
val sigmoid : arr -> arr
val relu : arr -> arr
val dawsn : arr -> arr
val min' : arr -> elt
val max' : arr -> elt
val sum' : arr -> elt
val log_sum_exp' : arr -> elt
val log_sum_exp : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm' : arr -> elt
val l2norm' : arr -> elt
val l2norm_sqr' : arr -> elt
val clip_by_value : ?amin:elt -> ?amax:elt -> arr -> arr
val clip_by_l2norm : elt -> arr -> arr
val pow : arr -> arr -> arr
val scalar_pow : elt -> arr -> arr
val pow_scalar : arr -> elt -> arr
val atan2 : arr -> arr -> arr
val scalar_atan2 : elt -> arr -> arr
val atan2_scalar : arr -> elt -> arr
val add : arr -> arr -> arr
val sub : arr -> arr -> arr
val mul : arr -> arr -> arr
val div : arr -> arr -> arr
val add_scalar : arr -> elt -> arr
val sub_scalar : arr -> elt -> arr
val mul_scalar : arr -> elt -> arr
val div_scalar : arr -> elt -> arr
val scalar_add : elt -> arr -> arr
val scalar_sub : elt -> arr -> arr
val scalar_mul : elt -> arr -> arr
val scalar_div : elt -> arr -> arr
val fma : arr -> arr -> arr -> arr
val conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val dilated_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val transpose_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val max_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val upsampling2d : arr -> int array -> arr
val conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val dilated_conv1d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv1d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val transpose_conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val max_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val upsampling2d_backward : arr -> int array -> arr -> arr
val row_num : arr -> int
val col_num : arr -> int
val row : arr -> int -> arr
val rows : arr -> int array -> arr
val copy_row_to : arr -> arr -> int -> unit
val copy_col_to : arr -> arr -> int -> unit
val dot : arr -> arr -> arr
val diag : ?k:int -> arr -> arr
val trace : arr -> elt
val transpose : ?axis:int array -> arr -> arr
val to_rows : arr -> arr array
val of_rows : arr array -> arr
val to_cols : arr -> arr array
val of_cols : arr array -> arr
val of_array : elt array -> int array -> arr
val of_arrays : elt array array -> arr
val float_to_elt : float -> elt
val elt_to_float : elt -> float
val elt_equal : arr -> arr -> arr
val elt_not_equal : arr -> arr -> arr
val elt_less : arr -> arr -> arr
val elt_greater : arr -> arr -> arr
val elt_less_equal : arr -> arr -> arr
val elt_greater_equal : arr -> arr -> arr
val elt_equal_scalar : arr -> elt -> arr
val elt_not_equal_scalar : arr -> elt -> arr
val elt_less_scalar : arr -> elt -> arr
val elt_greater_scalar : arr -> elt -> arr
val elt_less_equal_scalar : arr -> elt -> arr
val elt_greater_equal_scalar : arr -> elt -> arr
module Scalar : sig ... end
module Mat : sig ... end
module Linalg : sig ... end
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Optimise/Algodiff/Arr/index.html b/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Optimise/Algodiff/Arr/index.html deleted file mode 100644 index 09f517e0c..000000000 --- a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Optimise/Algodiff/Arr/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Arr (owl-base.Owl_neural_generic.Flatten.Graph.Neuron.Optimise.Algodiff.Arr)

Module Algodiff.Arr

val empty : int array -> t
val zeros : int array -> t
val ones : int array -> t
val uniform : ?a:A.elt -> ?b:A.elt -> int array -> t
val gaussian : ?mu:A.elt -> ?sigma:A.elt -> int array -> t
val shape : t -> int array
val numel : t -> int
val reset : t -> unit
val reshape : t -> int array -> t
val add : t -> t -> t
val sub : t -> t -> t
val mul : t -> t -> t
val div : t -> t -> t
val dot : t -> t -> t
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Optimise/Algodiff/Builder/index.html b/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Optimise/Algodiff/Builder/index.html deleted file mode 100644 index ec58cbc9b..000000000 --- a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Optimise/Algodiff/Builder/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Builder (owl-base.Owl_neural_generic.Flatten.Graph.Neuron.Optimise.Algodiff.Builder)

Module Algodiff.Builder

Ops Builder
module type Siso = sig ... end
val build_siso : (module Siso) -> t -> t

build single input single output operations

module type Sipo = sig ... end
val build_sipo : (module Sipo) -> t -> t * t

build single input pair outputs operations

module type Sito = sig ... end
val build_sito : (module Sito) -> t -> t * t * t

build single input triple outputs operations

module type Siao = sig ... end
val build_siao : (module Siao) -> t -> t array

build single input array output operations

module type Piso = sig ... end
val build_piso : (module Piso) -> t -> t -> t

build pair inputs single output operations

module type Aiso = sig ... end
val build_aiso : (module Aiso) -> t array -> t

build array input single output operations

\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Optimise/Algodiff/Builder/module-type-Aiso/index.html b/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Optimise/Algodiff/Builder/module-type-Aiso/index.html deleted file mode 100644 index 08da696bc..000000000 --- a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Optimise/Algodiff/Builder/module-type-Aiso/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Aiso (owl-base.Owl_neural_generic.Flatten.Graph.Neuron.Optimise.Algodiff.Builder.Aiso)

Module type Builder.Aiso

val label : string
val ff : t array -> t
val df : int list -> t -> t array -> t array -> t
val dr : int list -> t array -> t -> t Stdlib.ref -> t list
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Optimise/Algodiff/Builder/module-type-Piso/index.html b/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Optimise/Algodiff/Builder/module-type-Piso/index.html deleted file mode 100644 index fc7d59157..000000000 --- a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Optimise/Algodiff/Builder/module-type-Piso/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Piso (owl-base.Owl_neural_generic.Flatten.Graph.Neuron.Optimise.Algodiff.Builder.Piso)

Module type Builder.Piso

val label : string
val ff_aa : A.elt -> A.elt -> t
val ff_ab : A.elt -> A.arr -> t
val ff_ba : A.arr -> A.elt -> t
val ff_bb : A.arr -> A.arr -> t
val df_da : t -> t -> t -> t -> t
val df_db : t -> t -> t -> t -> t
val df_dab : t -> t -> t -> t -> t -> t
val dr_ab : t -> t -> t -> t Stdlib.ref -> t * t
val dr_a : t -> t -> t -> t Stdlib.ref -> t
val dr_b : t -> t -> t -> t Stdlib.ref -> t
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Optimise/Algodiff/Builder/module-type-Siao/index.html b/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Optimise/Algodiff/Builder/module-type-Siao/index.html deleted file mode 100644 index bac71b555..000000000 --- a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Optimise/Algodiff/Builder/module-type-Siao/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Siao (owl-base.Owl_neural_generic.Flatten.Graph.Neuron.Optimise.Algodiff.Builder.Siao)

Module type Builder.Siao

val label : string
val ff_f : A.elt -> t array
val ff_arr : A.arr -> t array
val df : t array -> t -> t -> t array
val dr : t -> t -> t Stdlib.ref array -> t Stdlib.ref array -> t
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Optimise/Algodiff/Builder/module-type-Sipo/index.html b/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Optimise/Algodiff/Builder/module-type-Sipo/index.html deleted file mode 100644 index 481bc159a..000000000 --- a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Optimise/Algodiff/Builder/module-type-Sipo/index.html +++ /dev/null @@ -1,7 +0,0 @@ - -Sipo (owl-base.Owl_neural_generic.Flatten.Graph.Neuron.Optimise.Algodiff.Builder.Sipo)

Module type Builder.Sipo

val label : string
val ff_f : A.elt -> t * t
val ff_arr : A.arr -> t * t
val df : t -> t -> t -> t
val dr : - t -> - t -> - (t Stdlib.ref * t Stdlib.ref) -> - (t Stdlib.ref * t Stdlib.ref) -> - t
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Optimise/Algodiff/Builder/module-type-Siso/index.html b/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Optimise/Algodiff/Builder/module-type-Siso/index.html deleted file mode 100644 index 3c91cb3aa..000000000 --- a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Optimise/Algodiff/Builder/module-type-Siso/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Siso (owl-base.Owl_neural_generic.Flatten.Graph.Neuron.Optimise.Algodiff.Builder.Siso)

Module type Builder.Siso

val label : string
val ff_f : A.elt -> t
val ff_arr : A.arr -> t
val df : t -> t -> t -> t
val dr : t -> t -> t Stdlib.ref -> t
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Optimise/Algodiff/Builder/module-type-Sito/index.html b/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Optimise/Algodiff/Builder/module-type-Sito/index.html deleted file mode 100644 index bc59dc088..000000000 --- a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Optimise/Algodiff/Builder/module-type-Sito/index.html +++ /dev/null @@ -1,7 +0,0 @@ - -Sito (owl-base.Owl_neural_generic.Flatten.Graph.Neuron.Optimise.Algodiff.Builder.Sito)

Module type Builder.Sito

val label : string
val ff_f : A.elt -> t * t * t
val ff_arr : A.arr -> t * t * t
val df : t -> t -> t -> t
val dr : - t -> - t -> - (t Stdlib.ref * t Stdlib.ref * t Stdlib.ref) -> - (t Stdlib.ref * t Stdlib.ref * t Stdlib.ref) -> - t
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Optimise/Algodiff/Linalg/index.html b/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Optimise/Algodiff/Linalg/index.html deleted file mode 100644 index f38bf1dd2..000000000 --- a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Optimise/Algodiff/Linalg/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -Linalg (owl-base.Owl_neural_generic.Flatten.Graph.Neuron.Optimise.Algodiff.Linalg)

Module Algodiff.Linalg

val inv : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val logdet : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val chol : ?upper:bool -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val qr : t -> t * t

Refer to :doc:`owl_dense_ndarray_generic`

val lq : t -> t * t

Refer to :doc:`owl_dense_ndarray_generic`

val svd : ?thin:bool -> t -> t * t * t

Refer to :doc:`owl_dense_ndarray_generic`

val sylvester : t -> t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val lyapunov : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val discrete_lyapunov : - ?solver:[ `default | `bilinear | `direct ] -> - t -> - t -> - t

Refer to :doc:`owl_dense_ndarray_generic`

val (/@) : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val linsolve : ?trans:bool -> ?typ:[ `n | `u | `l ] -> t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val care : ?diag_r:bool -> t -> t -> t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val dare : ?diag_r:bool -> t -> t -> t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Optimise/Algodiff/Mat/index.html b/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Optimise/Algodiff/Mat/index.html deleted file mode 100644 index 738df55ea..000000000 --- a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Optimise/Algodiff/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl-base.Owl_neural_generic.Flatten.Graph.Neuron.Optimise.Algodiff.Mat)

Module Algodiff.Mat

val empty : int -> int -> t
val zeros : int -> int -> t
val eye : int -> t
val ones : int -> int -> t
val uniform : ?a:A.elt -> ?b:A.elt -> int -> int -> t
val gaussian : ?mu:A.elt -> ?sigma:A.elt -> int -> int -> t
val shape : t -> int * int
val numel : t -> int
val row_num : t -> int
val col_num : t -> int
val reset : t -> unit
val reshape : int -> int -> t -> t
val get : t -> int -> int -> t
val set : t -> int -> int -> t -> t
val row : t -> int -> t
val mean : t -> t
val add : t -> t -> t
val sub : t -> t -> t
val mul : t -> t -> t
val div : t -> t -> t
val dot : t -> t -> t
val map_by_row : (t -> t) -> t -> t
val of_arrays : A.elt array array -> t
val init_2d : int -> int -> (int -> int -> t) -> t
val print : t -> unit
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Optimise/Algodiff/Maths/index.html b/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Optimise/Algodiff/Maths/index.html deleted file mode 100644 index cd33726a8..000000000 --- a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Optimise/Algodiff/Maths/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Maths (owl-base.Owl_neural_generic.Flatten.Graph.Neuron.Optimise.Algodiff.Maths)

Module Algodiff.Maths

val (+) : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val (-) : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val (*) : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val (/) : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val (*@) : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val (**) : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val add : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val sub : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val mul : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val div : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val kron : t -> t -> t

Refer to :doc:`owl_dense_matrix_generic`

val dot : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val pow : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val atan2 : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val min2 : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val max2 : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val cross_entropy : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val inv : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val neg : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val abs : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val signum : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val floor : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val ceil : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val round : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val sqr : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val sqrt : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val log : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val log2 : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val log10 : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val exp : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val sin : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val cos : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val tan : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val sinh : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val cosh : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val tanh : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val asin : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val acos : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val atan : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val asinh : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val acosh : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val atanh : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val sum' : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val log_sum_exp' : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val log_sum_exp : ?axis:int -> ?keep_dims:bool -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val sum : ?axis:int -> ?keep_dims:bool -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val sum_reduce : ?axis:int array -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val mean : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val transpose : ?axis:int array -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val swap : int -> int -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val l1norm' : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val l2norm' : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val l2norm_sqr' : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val sigmoid : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val relu : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val dawsn : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val softplus : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val softsign : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val softmax : ?axis:int -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val reshape : t -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val flatten : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val get_item : t -> int -> int -> t

Refer to :doc:`owl_dense_ndarray_generic`

val get_row : t -> int -> t

Refer to :doc:`owl_dense_ndarray_generic`

val concat : axis:int -> t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val split : axis:int -> int array -> t -> t array

Refer to :doc:`owl_dense_ndarray_generic`

val of_arrays : t array array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val to_arrays : t -> t array array

Refer to :doc:`owl_dense_ndarray_generic`

val concatenate : axis:int -> t array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val stack : axis:int -> t array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val get_slice : int list list -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val set_slice : int list list -> t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val get_fancy : Owl_types.index list -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val set_fancy : Owl_types.index list -> t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val diag : ?k:int -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val diagm : ?k:int -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val trace : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val triu : ?k:int -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val tril : ?k:int -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Optimise/Algodiff/NN/index.html b/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Optimise/Algodiff/NN/index.html deleted file mode 100644 index 7ebb4e767..000000000 --- a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Optimise/Algodiff/NN/index.html +++ /dev/null @@ -1,20 +0,0 @@ - -NN (owl-base.Owl_neural_generic.Flatten.Graph.Neuron.Optimise.Algodiff.NN)

Module Algodiff.NN

val dropout : ?rate:float -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val conv1d : ?padding:Owl_types.padding -> t -> t -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val conv2d : ?padding:Owl_types.padding -> t -> t -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val conv3d : ?padding:Owl_types.padding -> t -> t -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val dilated_conv1d : - ?padding:Owl_types.padding -> - t -> - t -> - int array -> - int array -> - t

Refer to :doc:`owl_dense_ndarray_generic`

val dilated_conv2d : - ?padding:Owl_types.padding -> - t -> - t -> - int array -> - int array -> - t

Refer to :doc:`owl_dense_ndarray_generic`

val dilated_conv3d : - ?padding:Owl_types.padding -> - t -> - t -> - int array -> - int array -> - t

Refer to :doc:`owl_dense_ndarray_generic`

val transpose_conv1d : ?padding:Owl_types.padding -> t -> t -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val transpose_conv2d : ?padding:Owl_types.padding -> t -> t -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val transpose_conv3d : ?padding:Owl_types.padding -> t -> t -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val max_pool1d : Owl_types.padding -> t -> int array -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val max_pool2d : Owl_types.padding -> t -> int array -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val max_pool3d : Owl_types.padding -> t -> int array -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val avg_pool1d : Owl_types.padding -> t -> int array -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val avg_pool2d : Owl_types.padding -> t -> int array -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val avg_pool3d : Owl_types.padding -> t -> int array -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val upsampling2d : t -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val pad : ?v:A.elt -> int list list -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Optimise/Algodiff/index.html b/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Optimise/Algodiff/index.html deleted file mode 100644 index bc26ea79e..000000000 --- a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Optimise/Algodiff/index.html +++ /dev/null @@ -1,11 +0,0 @@ - -Algodiff (owl-base.Owl_neural_generic.Flatten.Graph.Neuron.Optimise.Algodiff)

Module Optimise.Algodiff

include Owl_algodiff_core_sig.Sig
Type definition
include Owl_algodiff_types_sig.Sig with type elt := A.elt and type arr := A.arr
type t =
  1. | F of A.elt
  2. | Arr of A.arr
  3. | DF of t * t * int
  4. | DR of t * t Stdlib.ref * op * int Stdlib.ref * int * int Stdlib.ref
and adjoint = t -> t Stdlib.ref -> (t * t) list -> (t * t) list
and register = t list -> t list
and label = string * t list
and op = adjoint * register * label
Core functions
val tag : unit -> int

TODO

val primal : t -> t

TODO

val primal' : t -> t

TODO

val zero : t -> t

TODO

val reset_zero : t -> t

TODO

val tangent : t -> t

TODO

val adjref : t -> t Stdlib.ref

TODO

val adjval : t -> t

TODO

val shape : t -> int array

TODO

val is_float : t -> bool

TODO

val is_arr : t -> bool

TODO

val row_num : t -> int

number of rows

val col_num : t -> int

number of columns

val numel : t -> int

number of elements

val clip_by_value : amin:A.elt -> amax:A.elt -> t -> t

other functions, without tracking gradient

val clip_by_l2norm : A.elt -> t -> t

other functions, without tracking gradient

val copy_primal' : t -> t

TODO

val tile : t -> int array -> t

TODO

val repeat : t -> int array -> t

TODO

val pack_elt : A.elt -> t

convert from elt type to t type.

val unpack_elt : t -> A.elt

convert from t type to elt type.

val pack_flt : float -> t

convert from float type to t type.

val _f : float -> t

A shortcut function for F A.(float_to_elt x).

val unpack_flt : t -> float

convert from t type to float type.

val pack_arr : A.arr -> t

convert from arr type to t type.

val unpack_arr : t -> A.arr

convert from t type to arr type.

val deep_info : t -> string

TODO

val type_info : t -> string

TODO

val error_binop : string -> t -> t -> 'a

TODO

val error_uniop : string -> t -> 'a

TODO

val make_forward : t -> t -> int -> t

TODO

val make_reverse : t -> int -> t

TODO

val reverse_prop : t -> t -> unit

TODO

val diff : (t -> t) -> t -> t

diff f x returns the exat derivative of a function f : scalar -> scalar at point x. Simply calling diff f will return its derivative function g of the same type, i.e. g : scalar -> scalar.

Keep calling this function will give you higher-order derivatives of f, i.e. f |> diff |> diff |> diff |> ...

val diff' : (t -> t) -> t -> t * t

similar to diff, but return (f x, diff f x).

val grad : (t -> t) -> t -> t

gradient of f : (vector -> scalar) at x, reverse ad.

val grad' : (t -> t) -> t -> t * t

similar to grad, but return (f x, grad f x).

val jacobian : (t -> t) -> t -> t

jacobian of f : (vector -> vector) at x, both x and y are row vectors.

val jacobian' : (t -> t) -> t -> t * t

similar to jacobian, but return (f x, jacobian f x)

val jacobianv : (t -> t) -> t -> t -> t

jacobian vector product of f : (vector -> vector) at x along v, forward ad. Namely, it calcultes (jacobian x) v

val jacobianv' : (t -> t) -> t -> t -> t * t

similar to jacobianv', but return (f x, jacobianv f x v)

val jacobianTv : (t -> t) -> t -> t -> t

transposed jacobian vector product of f : (vector -> vector) at x along v, backward ad. Namely, it calculates transpose ((jacobianv f x v)).

val jacobianTv' : (t -> t) -> t -> t -> t * t

similar to jacobianTv, but return (f x, transpose (jacobianv f x v))

val hessian : (t -> t) -> t -> t

hessian of f : (scalar -> scalar) at x.

val hessian' : (t -> t) -> t -> t * t

simiarl to hessian, but return (f x, hessian f x)

val hessianv : (t -> t) -> t -> t -> t

hessian vector product of f : (scalar -> scalar) at x along v. Namely, it calculates (hessian x) v.

val hessianv' : (t -> t) -> t -> t -> t * t

similar to hessianv, but return (f x, hessianv f x v).

val laplacian : (t -> t) -> t -> t

laplacian of f : (scalar -> scalar) at x.

val laplacian' : (t -> t) -> t -> t * t

similar to laplacian, but return (f x, laplacian f x).

val gradhessian : (t -> t) -> t -> t * t

return (grad f x, hessian f x), f : (scalar -> scalar)

val gradhessian' : (t -> t) -> t -> t * t * t

return (f x, grad f x, hessian f x)

val gradhessianv : (t -> t) -> t -> t -> t * t

return (grad f x v, hessian f x v)

val gradhessianv' : (t -> t) -> t -> t -> t * t * t

return (f x, grad f x v, hessian f x v)

include Owl_algodiff_ops_sig.Sig - with type t := t - and type elt := A.elt - and type arr := A.arr - and type op := op
module Builder : - Owl_algodiff_ops_builder_sig.Sig - with type t := t - and type elt := A.elt - and type arr := A.arr - and type op := op
Supported Maths functions
module Maths : sig ... end
Supported Linalg functions
module Linalg : sig ... end
Supported Neural Network functions
module NN : sig ... end
Supported Mat functions
module Mat : sig ... end
Supported Arr functions
module Arr : sig ... end
Helper functions
include Owl_algodiff_graph_convert_sig.Sig with type t := t
val to_trace : t list -> string

to_trace [t0; t1; ...] outputs the trace of computation graph on the terminal in a human-readable format.

val to_dot : t list -> string

to_dot [t0; t1; ...] outputs the trace of computation graph in the dot file format which you can use other tools further visualisation, such as Graphviz.

val pp_num : Stdlib.Format.formatter -> t -> unit

pp_num t pretty prints the abstract number used in Algodiff.

\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Optimise/Batch/index.html b/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Optimise/Batch/index.html deleted file mode 100644 index 68990a947..000000000 --- a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Optimise/Batch/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Batch (owl-base.Owl_neural_generic.Flatten.Graph.Neuron.Optimise.Batch)

Module Optimise.Batch

Batch module

type typ =
  1. | Full
  2. | Mini of int
  3. | Sample of int
  4. | Stochastic

Types of batches.

val run : typ -> Algodiff.t -> Algodiff.t -> int -> Algodiff.t * Algodiff.t

Execute the computations defined in module typ.

val batches : typ -> Algodiff.t -> int

Return the total number of batches given a batch typ.

val to_string : typ -> string

Convert the module typ to its string representation.

\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Optimise/Checkpoint/index.html b/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Optimise/Checkpoint/index.html deleted file mode 100644 index 302f82bac..000000000 --- a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Optimise/Checkpoint/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Checkpoint (owl-base.Owl_neural_generic.Flatten.Graph.Neuron.Optimise.Checkpoint)

Module Optimise.Checkpoint

Checkpoint module

type state = {
  1. mutable current_batch : int;
  2. mutable batches_per_epoch : int;
  3. mutable epochs : float;
  4. mutable batches : int;
  5. mutable loss : Algodiff.t array;
  6. mutable start_at : float;
  7. mutable stop : bool;
  8. mutable gs : Algodiff.t array array;
  9. mutable ps : Algodiff.t array array;
  10. mutable us : Algodiff.t array array;
  11. mutable ch : Algodiff.t array array array;
}

Type definition of checkpoint

type typ =
  1. | Batch of int
  2. | Epoch of float
  3. | Custom of state -> unit
  4. | None

Batch type.

val init_state : int -> float -> state

init_state batches_per_epoch epochs initialises a state by specifying the number of batches per epoch and the number of epochs in total.

val default_checkpoint_fun : (string -> 'a) -> 'a

This function is used for saving intermediate files during optimisation.

val print_state_info : state -> unit

Print out the detail information of current state.

val print_summary : state -> unit

Print out the summary of current state.

val run : typ -> (string -> unit) -> int -> Algodiff.t -> state -> unit

Execute the computations defined in module typ.

val to_string : typ -> string

Convert the module typ to its string representation.

\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Optimise/Clipping/index.html b/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Optimise/Clipping/index.html deleted file mode 100644 index 49be118ec..000000000 --- a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Optimise/Clipping/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Clipping (owl-base.Owl_neural_generic.Flatten.Graph.Neuron.Optimise.Clipping)

Module Optimise.Clipping

Clipping module

type typ =
  1. | L2norm of float
  2. | Value of float * float
  3. | None

Types of clipping functions.

val run : typ -> Algodiff.t -> Algodiff.t

Execute the computations defined in module typ.

val default : typ -> typ

Create module typ with default values.

val to_string : typ -> string

Convert the module typ to its string representation.

\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Optimise/Gradient/index.html b/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Optimise/Gradient/index.html deleted file mode 100644 index d92e66f50..000000000 --- a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Optimise/Gradient/index.html +++ /dev/null @@ -1,9 +0,0 @@ - -Gradient (owl-base.Owl_neural_generic.Flatten.Graph.Neuron.Optimise.Gradient)

Module Optimise.Gradient

Gradient module

type typ =
  1. | GD
  2. | CG
  3. | CD
  4. | NonlinearCG
  5. | DaiYuanCG
  6. | NewtonCG
  7. | Newton

Types of gradient function.

val run : - typ -> - (Algodiff.t -> Algodiff.t) -> - Algodiff.t -> - Algodiff.t -> - Algodiff.t -> - Algodiff.t -> - Algodiff.t

Execute the computations defined in module typ.

val to_string : typ -> string

Convert the module typ to its string representation.

\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Optimise/Learning_Rate/index.html b/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Optimise/Learning_Rate/index.html deleted file mode 100644 index d0caa230b..000000000 --- a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Optimise/Learning_Rate/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Learning_Rate (owl-base.Owl_neural_generic.Flatten.Graph.Neuron.Optimise.Learning_Rate)

Module Optimise.Learning_Rate

Strategies for learning rate update

type typ =
  1. | Adagrad of float
  2. | Const of float
  3. | Decay of float * float
  4. | Exp_decay of float * float
  5. | RMSprop of float * float
  6. | Adam of float * float * float
  7. | Schedule of float array

Representation of learning rate update strategies. Possible values include:

  • Adam (alpha, beta1, beta2), see ref for parameter meaning
val run : typ -> int -> Algodiff.t -> Algodiff.t array -> Algodiff.t

Execute the computations defined in module typ.

val default : typ -> typ

Create module typ with default values.

val update_ch : typ -> Algodiff.t -> Algodiff.t array -> Algodiff.t array

Update the cache of gradients.

val to_string : typ -> string

Convert the module typ to its string representation.

\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Optimise/Loss/index.html b/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Optimise/Loss/index.html deleted file mode 100644 index 13f7140b5..000000000 --- a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Optimise/Loss/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Loss (owl-base.Owl_neural_generic.Flatten.Graph.Neuron.Optimise.Loss)

Module Optimise.Loss

Loss module

type typ =
  1. | Hinge
  2. | L1norm
  3. | L2norm
  4. | Quadratic
  5. | Cross_entropy
  6. | Custom of Algodiff.t -> Algodiff.t -> Algodiff.t

Types of loss functions.

val run : typ -> Algodiff.t -> Algodiff.t -> Algodiff.t

Execute the computations defined in module typ.

val to_string : typ -> string

Convert the module typ to its string representation.

\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Optimise/Momentum/index.html b/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Optimise/Momentum/index.html deleted file mode 100644 index d85c188c2..000000000 --- a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Optimise/Momentum/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Momentum (owl-base.Owl_neural_generic.Flatten.Graph.Neuron.Optimise.Momentum)

Module Optimise.Momentum

Momentum module

type typ =
  1. | Standard of float
  2. | Nesterov of float
  3. | None

Types of momentum functions.

val run : typ -> Algodiff.t -> Algodiff.t -> Algodiff.t

Execute the computations defined in module typ.

val default : typ -> typ

Create module typ with default values.

val to_string : typ -> string

Convert the module typ to its string representation.

\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Optimise/Params/index.html b/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Optimise/Params/index.html deleted file mode 100644 index 9d5539656..000000000 --- a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Optimise/Params/index.html +++ /dev/null @@ -1,14 +0,0 @@ - -Params (owl-base.Owl_neural_generic.Flatten.Graph.Neuron.Optimise.Params)

Module Optimise.Params

Params module

type typ = {
  1. mutable epochs : float;
  2. mutable batch : Batch.typ;
  3. mutable gradient : Gradient.typ;
  4. mutable loss : Loss.typ;
  5. mutable learning_rate : Learning_Rate.typ;
  6. mutable regularisation : Regularisation.typ;
  7. mutable momentum : Momentum.typ;
  8. mutable clipping : Clipping.typ;
  9. mutable stopping : Stopping.typ;
  10. mutable checkpoint : Checkpoint.typ;
  11. mutable verbosity : bool;
}

Type definition of parameter.

val default : unit -> typ

Create module typ with default values.

val config : - ?batch:Batch.typ -> - ?gradient:Gradient.typ -> - ?loss:Loss.typ -> - ?learning_rate:Learning_Rate.typ -> - ?regularisation:Regularisation.typ -> - ?momentum:Momentum.typ -> - ?clipping:Clipping.typ -> - ?stopping:Stopping.typ -> - ?checkpoint:Checkpoint.typ -> - ?verbosity:bool -> - float -> - typ

This function creates a parameter object with many configurations.

val to_string : typ -> string

Convert the module typ to its string representation.

\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Optimise/Regularisation/index.html b/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Optimise/Regularisation/index.html deleted file mode 100644 index 4b4f616cb..000000000 --- a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Optimise/Regularisation/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Regularisation (owl-base.Owl_neural_generic.Flatten.Graph.Neuron.Optimise.Regularisation)

Module Optimise.Regularisation

Regularisation module

type typ =
  1. | L1norm of float
  2. | L2norm of float
  3. | Elastic_net of float * float
  4. | None

Types of regularisation functions.

val run : typ -> Algodiff.t -> Algodiff.t

Execute the computations defined in module typ.

val to_string : typ -> string

Convert the module typ to its string representation.

\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Optimise/Stopping/index.html b/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Optimise/Stopping/index.html deleted file mode 100644 index cfbd86a96..000000000 --- a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Optimise/Stopping/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Stopping (owl-base.Owl_neural_generic.Flatten.Graph.Neuron.Optimise.Stopping)

Module Optimise.Stopping

Stopping module

type typ =
  1. | Const of float
  2. | Early of int * int
  3. | None

Types of stopping functions.

val run : typ -> float -> bool

Execute the computations defined in module typ.

val default : typ -> typ

Create module typ with default values.

val to_string : typ -> string

Convert the module typ to its string representation.

\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Optimise/Utils/index.html b/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Optimise/Utils/index.html deleted file mode 100644 index 434ec0450..000000000 --- a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Optimise/Utils/index.html +++ /dev/null @@ -1,7 +0,0 @@ - -Utils (owl-base.Owl_neural_generic.Flatten.Graph.Neuron.Optimise.Utils)

Module Optimise.Utils

Utils module

val sample_num : Algodiff.t -> int

Return the total number of samples in passed in ndarray.

val draw_samples : Algodiff.t -> Algodiff.t -> int -> Algodiff.t * Algodiff.t

draw_samples x y draws samples from both x (observations) and y (labels). The samples will be drew along axis 0, so x and y must agree along axis 0.

val get_chunk : - Algodiff.t -> - Algodiff.t -> - int -> - int -> - Algodiff.t * Algodiff.t

get_chunk x y i c gets a continuous chunk of c samples from position i from x (observations) and y (labels).

\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Optimise/index.html b/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Optimise/index.html deleted file mode 100644 index d5f65bfd2..000000000 --- a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Optimise/index.html +++ /dev/null @@ -1,31 +0,0 @@ - -Optimise (owl-base.Owl_neural_generic.Flatten.Graph.Neuron.Optimise)

Module Neuron.Optimise

module Utils : sig ... end

Utils module

module Learning_Rate : sig ... end

Strategies for learning rate update

module Batch : sig ... end

Batch module

module Loss : sig ... end

Loss module

module Gradient : sig ... end

Gradient module

module Momentum : sig ... end

Momentum module

module Regularisation : sig ... end

Regularisation module

module Clipping : sig ... end

Clipping module

module Stopping : sig ... end

Stopping module

module Checkpoint : sig ... end

Checkpoint module

module Params : sig ... end

Params module

Core functions
val minimise_weight : - ?state:Checkpoint.state -> - Params.typ -> - (Algodiff.t -> Algodiff.t -> Algodiff.t) -> - Algodiff.t -> - Algodiff.t -> - Algodiff.t -> - Checkpoint.state * Algodiff.t

This function minimises the weight w of passed-in function f.

* f is a function f : w -> x -> y. * w is a row vector but y can have any shape.

val minimise_network : - ?state:Checkpoint.state -> - Params.typ -> - (Algodiff.t -> Algodiff.t * Algodiff.t array array) -> - (Algodiff.t -> Algodiff.t array array * Algodiff.t array array) -> - (Algodiff.t array array -> unit) -> - (string -> unit) -> - Algodiff.t -> - Algodiff.t -> - Checkpoint.state

This function is specifically designed for minimising the weights in a neural network of graph structure. In Owl's earlier versions, the functions in the regression module were actually implemented using this function.

val minimise_fun : - ?state:Checkpoint.state -> - Params.typ -> - (Algodiff.t -> Algodiff.t) -> - Algodiff.t -> - Checkpoint.state * Algodiff.t

This function minimises f : x -> y w.r.t x.

x is an ndarray; and y is an scalar value.

val minimise_compiled_network : - ?state:Checkpoint.state -> - Params.typ -> - (Algodiff.t -> Algodiff.t -> Algodiff.t) -> - (unit -> unit) -> - (string -> unit) -> - Algodiff.t -> - Algodiff.t -> - Checkpoint.state

TODO

\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Padding1D/index.html b/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Padding1D/index.html deleted file mode 100644 index f8ad58ab1..000000000 --- a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Padding1D/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Padding1D (owl-base.Owl_neural_generic.Flatten.Graph.Neuron.Padding1D)

Module Neuron.Padding1D

\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Padding2D/index.html b/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Padding2D/index.html deleted file mode 100644 index e28946a84..000000000 --- a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Padding2D/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Padding2D (owl-base.Owl_neural_generic.Flatten.Graph.Neuron.Padding2D)

Module Neuron.Padding2D

type neuron_typ = {
  1. mutable padding : int array array;
  2. mutable in_shape : int array;
  3. mutable out_shape : int array;
}

Neuron type definition.

val create : int array array -> neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Padding3D/index.html b/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Padding3D/index.html deleted file mode 100644 index b08a43529..000000000 --- a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Padding3D/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Padding3D (owl-base.Owl_neural_generic.Flatten.Graph.Neuron.Padding3D)

Module Neuron.Padding3D

\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Recurrent/index.html b/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Recurrent/index.html deleted file mode 100644 index 96528d85e..000000000 --- a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Recurrent/index.html +++ /dev/null @@ -1,9 +0,0 @@ - -Recurrent (owl-base.Owl_neural_generic.Flatten.Graph.Neuron.Recurrent)

Module Neuron.Recurrent

type neuron_typ = {
  1. mutable whh : Optimise.Algodiff.t;
  2. mutable wxh : Optimise.Algodiff.t;
  3. mutable why : Optimise.Algodiff.t;
  4. mutable bh : Optimise.Algodiff.t;
  5. mutable by : Optimise.Algodiff.t;
  6. mutable h : Optimise.Algodiff.t;
  7. mutable hiddens : int;
  8. mutable act : Activation.typ;
  9. mutable init_typ : Init.typ;
  10. mutable in_shape : int array;
  11. mutable out_shape : int array;
}

Neuron type definition.

val create : - ?time_steps:int -> - ?inputs:int -> - int -> - int -> - Activation.typ -> - Init.typ -> - neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val init : neuron_typ -> unit

Initialise the neuron and its parameters.

val reset : neuron_typ -> unit

Reset the parameters in a neuron.

val mktag : int -> neuron_typ -> unit

Tag the neuron, used by Algodiff module.

val mkpar : neuron_typ -> Optimise.Algodiff.t array

Assemble all the parameters in an array, used by Optimise module.

val mkpri : neuron_typ -> Optimise.Algodiff.t array

Assemble all the primial values in an array, used by Optimise module.

val mkadj : neuron_typ -> Optimise.Algodiff.t array

Assemble all the adjacent values in an array, used by Optimise module.

val update : neuron_typ -> Optimise.Algodiff.t array -> unit

Update parameters in a neuron, used by Optimise module.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Reshape/index.html b/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Reshape/index.html deleted file mode 100644 index c4c43b007..000000000 --- a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Reshape/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Reshape (owl-base.Owl_neural_generic.Flatten.Graph.Neuron.Reshape)

Module Neuron.Reshape

type neuron_typ = {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
}

Neuron type definition.

val create : ?inputs:int array -> int array -> neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Slice/index.html b/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Slice/index.html deleted file mode 100644 index 06cb295a7..000000000 --- a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/Slice/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Slice (owl-base.Owl_neural_generic.Flatten.Graph.Neuron.Slice)

Module Neuron.Slice

type neuron_typ = {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
  3. mutable slice : int list list;
}

Neuron type definition.

val create : int list list -> neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/TransposeConv1D/index.html b/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/TransposeConv1D/index.html deleted file mode 100644 index cf0d07e54..000000000 --- a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/TransposeConv1D/index.html +++ /dev/null @@ -1,8 +0,0 @@ - -TransposeConv1D (owl-base.Owl_neural_generic.Flatten.Graph.Neuron.TransposeConv1D)

Module Neuron.TransposeConv1D

type neuron_typ = {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable b : Optimise.Algodiff.t;
  3. mutable kernel : int array;
  4. mutable stride : int array;
  5. mutable padding : Owl_types.padding;
  6. mutable init_typ : Init.typ;
  7. mutable in_shape : int array;
  8. mutable out_shape : int array;
}

Neuron type definition.

val create : - ?inputs:int array -> - Owl_types.padding -> - int array -> - int array -> - Init.typ -> - neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val init : neuron_typ -> unit

Initialise the neuron and its parameters.

val reset : neuron_typ -> unit

Reset the parameters in a neuron.

val mktag : int -> neuron_typ -> unit

Tag the neuron, used by Algodiff module.

val mkpar : neuron_typ -> Optimise.Algodiff.t array

Assemble all the parameters in an array, used by Optimise module.

val mkpri : neuron_typ -> Optimise.Algodiff.t array

Assemble all the primial values in an array, used by Optimise module.

val mkadj : neuron_typ -> Optimise.Algodiff.t array

Assemble all the adjacent values in an array, used by Optimise module.

val update : neuron_typ -> Optimise.Algodiff.t array -> unit

Update parameters in a neuron, used by Optimise module.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/TransposeConv2D/index.html b/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/TransposeConv2D/index.html deleted file mode 100644 index dbc479093..000000000 --- a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/TransposeConv2D/index.html +++ /dev/null @@ -1,8 +0,0 @@ - -TransposeConv2D (owl-base.Owl_neural_generic.Flatten.Graph.Neuron.TransposeConv2D)

Module Neuron.TransposeConv2D

type neuron_typ = {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable b : Optimise.Algodiff.t;
  3. mutable kernel : int array;
  4. mutable stride : int array;
  5. mutable padding : Owl_types.padding;
  6. mutable init_typ : Init.typ;
  7. mutable in_shape : int array;
  8. mutable out_shape : int array;
}

Neuron type definition.

val create : - ?inputs:int array -> - Owl_types.padding -> - int array -> - int array -> - Init.typ -> - neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val init : neuron_typ -> unit

Initialise the neuron and its parameters.

val reset : neuron_typ -> unit

Reset the parameters in a neuron.

val mktag : int -> neuron_typ -> unit

Tag the neuron, used by Algodiff module.

val mkpar : neuron_typ -> Optimise.Algodiff.t array

Assemble all the parameters in an array, used by Optimise module.

val mkpri : neuron_typ -> Optimise.Algodiff.t array

Assemble all the primial values in an array, used by Optimise module.

val mkadj : neuron_typ -> Optimise.Algodiff.t array

Assemble all the adjacent values in an array, used by Optimise module.

val update : neuron_typ -> Optimise.Algodiff.t array -> unit

Update parameters in a neuron, used by Optimise module.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/TransposeConv3D/index.html b/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/TransposeConv3D/index.html deleted file mode 100644 index 101b2f3c0..000000000 --- a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/TransposeConv3D/index.html +++ /dev/null @@ -1,8 +0,0 @@ - -TransposeConv3D (owl-base.Owl_neural_generic.Flatten.Graph.Neuron.TransposeConv3D)

Module Neuron.TransposeConv3D

type neuron_typ = {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable b : Optimise.Algodiff.t;
  3. mutable kernel : int array;
  4. mutable stride : int array;
  5. mutable padding : Owl_types.padding;
  6. mutable init_typ : Init.typ;
  7. mutable in_shape : int array;
  8. mutable out_shape : int array;
}

Neuron type definition.

val create : - ?inputs:int array -> - Owl_types.padding -> - int array -> - int array -> - Init.typ -> - neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val init : neuron_typ -> unit

Initialise the neuron and its parameters.

val reset : neuron_typ -> unit

Reset the parameters in a neuron.

val mktag : int -> neuron_typ -> unit

Tag the neuron, used by Algodiff module.

val mkpar : neuron_typ -> Optimise.Algodiff.t array

Assemble all the parameters in an array, used by Optimise module.

val mkpri : neuron_typ -> Optimise.Algodiff.t array

Assemble all the primial values in an array, used by Optimise module.

val mkadj : neuron_typ -> Optimise.Algodiff.t array

Assemble all the adjacent values in an array, used by Optimise module.

val update : neuron_typ -> Optimise.Algodiff.t array -> unit

Update parameters in a neuron, used by Optimise module.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/UpSampling1D/index.html b/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/UpSampling1D/index.html deleted file mode 100644 index 794819850..000000000 --- a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/UpSampling1D/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -UpSampling1D (owl-base.Owl_neural_generic.Flatten.Graph.Neuron.UpSampling1D)

Module Neuron.UpSampling1D

\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/UpSampling2D/index.html b/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/UpSampling2D/index.html deleted file mode 100644 index d70f6daa9..000000000 --- a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/UpSampling2D/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -UpSampling2D (owl-base.Owl_neural_generic.Flatten.Graph.Neuron.UpSampling2D)

Module Neuron.UpSampling2D

type neuron_typ = {
  1. mutable size : int array;
  2. mutable in_shape : int array;
  3. mutable out_shape : int array;
}

Neuron type definition.

val create : int array -> neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/UpSampling3D/index.html b/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/UpSampling3D/index.html deleted file mode 100644 index 376654721..000000000 --- a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/UpSampling3D/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -UpSampling3D (owl-base.Owl_neural_generic.Flatten.Graph.Neuron.UpSampling3D)

Module Neuron.UpSampling3D

\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/index.html b/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/index.html deleted file mode 100644 index 8b89afbc2..000000000 --- a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/Neuron/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Neuron (owl-base.Owl_neural_generic.Flatten.Graph.Neuron)

Module Graph.Neuron

Init neuron
module Init : sig ... end
Input neuron
module Input : sig ... end
Activation neuron
module Activation : sig ... end
Linear neuron
module Linear : sig ... end
LinearNoBias neuron
module LinearNoBias : sig ... end
Recurrent neuron
module Recurrent : sig ... end
LSTM neuron
module LSTM : sig ... end
GRU neuron
module GRU : sig ... end
Conv1D neuron
module Conv1D : sig ... end
Conv2D neuron
module Conv2D : sig ... end
Conv3D neuron
module Conv3D : sig ... end
DilatedConv1D neuron
module DilatedConv1D : sig ... end
DilatedConv2D neuron
module DilatedConv2D : sig ... end
DilatedConv3D neuron
module DilatedConv3D : sig ... end
TransposeConv1D neuron
module TransposeConv1D : sig ... end
TransposeConv2D neuron
module TransposeConv2D : sig ... end
TransposeConv3D neuron
module TransposeConv3D : sig ... end
FullyConnected neuron
module FullyConnected : sig ... end
MaxPool1D neuron
module MaxPool1D : sig ... end
MaxPool2D neuron
module MaxPool2D : sig ... end
AvgPool1D neuron
module AvgPool1D : sig ... end
AvgPool2D neuron
module AvgPool2D : sig ... end
GlobalMaxPool1D neuron
module GlobalMaxPool1D : sig ... end
GlobalMaxPool2D neuron
module GlobalMaxPool2D : sig ... end
GlobalAvgPool1D neuron
module GlobalAvgPool1D : sig ... end
GlobalAvgPool2D neuron
module GlobalAvgPool2D : sig ... end
UpSampling1D neuron
module UpSampling1D : sig ... end
UpSampling2D neuron
module UpSampling2D : sig ... end
UpSampling3D neuron
module UpSampling3D : sig ... end
Padding1D neuron
module Padding1D : sig ... end
Padding2D neuron
module Padding2D : sig ... end
Padding3D neuron
module Padding3D : sig ... end
Lambda neuron
module Lambda : sig ... end
LambdaArray neuron
module LambdaArray : sig ... end
Dropout neuron
module Dropout : sig ... end
Reshape neuron
module Reshape : sig ... end
Flatten neuron
module Flatten : sig ... end
Slice neuron
module Slice : sig ... end
Add neuron
module Add : sig ... end
Mul neuron
module Mul : sig ... end
Dot neuron
module Dot : sig ... end
Max neuron
module Max : sig ... end
Average neuron
module Average : sig ... end
Concatenate neuron
module Concatenate : sig ... end
Normalisation neuron
module Normalisation : sig ... end
GaussianNoise neuron
module GaussianNoise : sig ... end
GaussianDropout neuron
module GaussianDropout : sig ... end
AlphaDropout neuron
module AlphaDropout : sig ... end
Embedding neuron
module Embedding : sig ... end
Masking neuron
module Masking : sig ... end
Core functions
type neuron =
  1. | Input of Input.neuron_typ
  2. | Linear of Linear.neuron_typ
  3. | LinearNoBias of LinearNoBias.neuron_typ
  4. | Embedding of Embedding.neuron_typ
  5. | LSTM of LSTM.neuron_typ
  6. | GRU of GRU.neuron_typ
  7. | Recurrent of Recurrent.neuron_typ
  8. | Conv1D of Conv1D.neuron_typ
  9. | Conv2D of Conv2D.neuron_typ
  10. | Conv3D of Conv3D.neuron_typ
  11. | DilatedConv1D of DilatedConv1D.neuron_typ
  12. | DilatedConv2D of DilatedConv2D.neuron_typ
  13. | DilatedConv3D of DilatedConv3D.neuron_typ
  14. | TransposeConv1D of TransposeConv1D.neuron_typ
  15. | TransposeConv2D of TransposeConv2D.neuron_typ
  16. | TransposeConv3D of TransposeConv3D.neuron_typ
  17. | FullyConnected of FullyConnected.neuron_typ
  18. | MaxPool1D of MaxPool1D.neuron_typ
  19. | MaxPool2D of MaxPool2D.neuron_typ
  20. | AvgPool1D of AvgPool1D.neuron_typ
  21. | AvgPool2D of AvgPool2D.neuron_typ
  22. | GlobalMaxPool1D of GlobalMaxPool1D.neuron_typ
  23. | GlobalMaxPool2D of GlobalMaxPool2D.neuron_typ
  24. | GlobalAvgPool1D of GlobalAvgPool1D.neuron_typ
  25. | GlobalAvgPool2D of GlobalAvgPool2D.neuron_typ
  26. | UpSampling2D of UpSampling2D.neuron_typ
  27. | Padding2D of Padding2D.neuron_typ
  28. | Dropout of Dropout.neuron_typ
  29. | Reshape of Reshape.neuron_typ
  30. | Flatten of Flatten.neuron_typ
  31. | Slice of Slice.neuron_typ
  32. | Lambda of Lambda.neuron_typ
  33. | LambdaArray of LambdaArray.neuron_typ
  34. | Activation of Activation.neuron_typ
  35. | GaussianNoise of GaussianNoise.neuron_typ
  36. | GaussianDropout of GaussianDropout.neuron_typ
  37. | AlphaDropout of AlphaDropout.neuron_typ
  38. | Normalisation of Normalisation.neuron_typ
  39. | Add of Add.neuron_typ
  40. | Mul of Mul.neuron_typ
  41. | Dot of Dot.neuron_typ
  42. | Max of Max.neuron_typ
  43. | Average of Average.neuron_typ
  44. | Concatenate of Concatenate.neuron_typ
    (*

    Types of neuron.

    *)
val get_in_out_shape : neuron -> int array * int array

Get both input and output shapes of a neuron.

val get_in_shape : neuron -> int array

Get the input shape of a neuron.

val get_out_shape : neuron -> int array

Get the output shape of a neuron.

val connect : int array array -> neuron -> unit

Connect this neuron to others in a neural network.

val init : neuron -> unit

Initialise the neuron and its parameters.

val reset : neuron -> unit

Reset the parameters in a neuron.

val mktag : int -> neuron -> unit

Tag the neuron, used by Algodiff module.

val mkpar : neuron -> Optimise.Algodiff.t array

Assemble all the trainable parameters in an array, used by Optimise module.

val mkpri : neuron -> Optimise.Algodiff.t array

Assemble all the primal values in an array, used by Optimise module.

val mkadj : neuron -> Optimise.Algodiff.t array

Assemble all the adjacent values in an array, used by Optimise module.

val update : neuron -> Optimise.Algodiff.t array -> unit

Update trainable parameters in a neuron, used by Optimise module.

val load_weights : neuron -> Optimise.Algodiff.t array -> unit

Load both trainable and non-trainable parameters into the neuron.

val save_weights : neuron -> Optimise.Algodiff.t array

Assemble both trainable and non-trainable parameters of the neuron.

val copy : neuron -> neuron

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : neuron -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/index.html b/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/index.html deleted file mode 100644 index 96abb3e93..000000000 --- a/owl-base/Owl_neural_generic/Flatten/argument-1-Graph/index.html +++ /dev/null @@ -1,243 +0,0 @@ - -Graph (owl-base.Owl_neural_generic.Flatten.Graph)

Parameter Flatten.Graph

Type definition
type node = {
  1. mutable name : string;
  2. mutable prev : node array;
  3. mutable next : node array;
  4. mutable neuron : Neuron.neuron;
  5. mutable output : Neuron.Optimise.Algodiff.t option;
  6. mutable network : network;
  7. mutable train : bool;
}
and network = {
  1. mutable nnid : string;
  2. mutable size : int;
  3. mutable roots : node array;
  4. mutable outputs : node array;
  5. mutable topo : node array;
}

Type definition of a node and a neural network.

Manipulate networks
val make_network : ?nnid:string -> int -> node array -> node array -> network

Create an empty neural network.

val make_node : - ?name:string -> - ?train:bool -> - node array -> - node array -> - Neuron.neuron -> - Neuron.Optimise.Algodiff.t option -> - network -> - node

Create a node in a neural network.

val get_roots : network -> node array

Get the roots of the neural network.

val get_outputs : network -> node array

Get the outputs of the neural network.

val get_node : network -> string -> node

Get a node in a network with the given name.

val get_network : ?name:string -> node -> network

Get the neural network of a given node associated with.

val outputs : ?name:string -> node array -> network

Get the neural network associated with the given output nodes.

val get_network_name : network -> string

get_network_name n returns the name of the network n.

val set_network_name : network -> string -> unit

set_network_name n s sets the name of the network n to s.

val collect_output : node array -> Neuron.Optimise.Algodiff.t array

Collect the output values of given nodes.

val connect_pair : node -> node -> unit

Connect two nodes in a neural network.

val connect_to_parents : node array -> node -> unit

Connect a node to a list of parents.

val add_node : - ?act_typ:Neuron.Activation.typ -> - network -> - node array -> - node -> - node

Add a node to the given network.

val input_shape : network -> int array

Get input shape of a network (without batch dimension), i.e. shape of input neuron.

val input_shapes : network -> int array array

Get input shapes of a network (without batch dimension), i.e. shape of input neurons.

Interface to optimisation engine
val init : network -> unit

Initialise the network.

val reset : network -> unit

Reset the network, i.e. all the parameters in the neurons.

val mktag : int -> network -> unit

Tag the neurons, used by Algodiff module.

val mkpar : network -> Neuron.Optimise.Algodiff.t array array

Collect the parameters of neurons, used by Optimise module.

val mkpri : network -> Neuron.Optimise.Algodiff.t array array

Collect the primal values of neurons, used by Optimise module.

val mkadj : network -> Neuron.Optimise.Algodiff.t array array

Collect the adjacent values of neurons, used by Optimise module.

val update : network -> Neuron.Optimise.Algodiff.t array array -> unit

Update the parameters of neurons, used by Optimise module.

Execute the computations in all the neurons in a network with the given input.

val run_inputs : - Neuron.Optimise.Algodiff.t array -> - network -> - Neuron.Optimise.Algodiff.t array

Execute the computations in all the neurons in a network with the given inputs.

Run the forward pass of a network.

val forward_inputs : - network -> - Neuron.Optimise.Algodiff.t array -> - Neuron.Optimise.Algodiff.t array * Neuron.Optimise.Algodiff.t array array

Run the forward pass of a network (multi-input/output version).

val backward : - network -> - Neuron.Optimise.Algodiff.t -> - Neuron.Optimise.Algodiff.t array array - * Neuron.Optimise.Algodiff.t array array

Run the backward pass of a network.

val copy : network -> network

Make a deep copy of the given network.

Make a deep copy of the given network, excluding the neurons marked with training = true.

val model_inputs : - network -> - Neuron.Optimise.Algodiff.A.arr array -> - Neuron.Optimise.Algodiff.A.arr array

Make a deep copy of the given network, excluding the neurons marked with training = true.

Create Neurons
val input : ?name:string -> int array -> node

input shape creates an input node for input data. Note that if your network has multiple inputs, you should use inputs instead.

Arguments: * shape: shape of input data.

val inputs : ?names:string array -> int array array -> node array

input shapes creates an array of input nodes for input data.

Arguments: * shapes: array of shapes of input data.

val activation : ?name:string -> Neuron.Activation.typ -> node -> node

Applies an activation function to an output.

Arguments: * activation: name of activation function to use.

val linear : - ?name:string -> - ?init_typ:Neuron.Init.typ -> - ?act_typ:Neuron.Activation.typ -> - int -> - node -> - node

linear ?act_typ units node adds the regular densely-connected NN node to node.

Arguments: * units: Positive integer, dimensionality of the output space. * act_typ: Activation function to use.

val linear_nobias : - ?name:string -> - ?init_typ:Neuron.Init.typ -> - ?act_typ:Neuron.Activation.typ -> - int -> - node -> - node

Similar to linear, but does not use the bias vector.

val embedding : - ?name:string -> - ?init_typ:Neuron.Init.typ -> - ?act_typ:Neuron.Activation.typ -> - int -> - int -> - node -> - node

Create a node for embedding neuron.

val recurrent : - ?name:string -> - ?init_typ:Neuron.Init.typ -> - act_typ:Neuron.Activation.typ -> - int -> - int -> - node -> - node

Create a node for recurrent neuron.

val lstm : ?name:string -> ?init_typ:Neuron.Init.typ -> int -> node -> node

lstm units node adds a LSTM node on previous node.

Arguments: * units: Positive integer, dimensionality of the output space.

val gru : ?name:string -> ?init_typ:Neuron.Init.typ -> int -> node -> node

gru units node adds a Gated Recurrent Unit node on previous node.

Arguments: * units: Positive integer, dimensionality of the output space.

val conv1d : - ?name:string -> - ?padding:Owl_types.padding -> - ?init_typ:Neuron.Init.typ -> - ?act_typ:Neuron.Activation.typ -> - int array -> - int array -> - node -> - node

conv1d kernel stride node adds a 1D convolution node (e.g. temporal convolution) on previous node.

Arguments: * kernel: int array consists of h, i, o. h specifies the dimension of the 1D convolution window. i and o are the dimensionalities of the input and output space. * stride: int array of 1 integer.

val conv2d : - ?name:string -> - ?padding:Owl_types.padding -> - ?init_typ:Neuron.Init.typ -> - ?act_typ:Neuron.Activation.typ -> - int array -> - int array -> - node -> - node

conv2d kernel stride node adds a 2D convolution node (e.g. spatial convolution over images) on previous node.

Arguments: * kernel: int array consists of w, h, i, o. w and h specify the width and height of the 2D convolution window. i and o are the dimensionality of the input and output space. * stride: int array of 2 integers.

val conv3d : - ?name:string -> - ?padding:Owl_types.padding -> - ?init_typ:Neuron.Init.typ -> - ?act_typ:Neuron.Activation.typ -> - int array -> - int array -> - node -> - node

conv3d kernel stride node adds a 3D convolution node (e.g. spatial convolution over volumes) on previous node.

Arguments: * kernel: int array consists of w, h, d, i, o. w, h, and d specify the 3 dimensionality of the 3D convolution window. i and o are the dimensionality of the input and output space. * stride: int array of 3 integers.

val dilated_conv1d : - ?name:string -> - ?padding:Owl_types.padding -> - ?init_typ:Neuron.Init.typ -> - ?act_typ:Neuron.Activation.typ -> - int array -> - int array -> - int array -> - node -> - node

dilated_conv1d kernel stride rate node adds a 1D dilated convolution node (e.g. temporal convolution) on previous node.

Arguments: * kernel: int array consists of h, i, o. h specifies the dimension of the 1D convolution window. i and o are the dimensionalities of the input and output space. * stride: int array of 1 integer. * rate: int array of 1 integer.

val dilated_conv2d : - ?name:string -> - ?padding:Owl_types.padding -> - ?init_typ:Neuron.Init.typ -> - ?act_typ:Neuron.Activation.typ -> - int array -> - int array -> - int array -> - node -> - node

dilated_conv2d kernel stride rate node adds a 2D dilated convolution node (e.g. spatial convolution over images) on previous node.

Arguments: * kernel`: int array consists of w, h, i, o. w and h specify the width and height of the 2D convolution window. i and o`` are the dimensionality of the input and output space. * stride: int array of 2 integers. * rate: int array of 2 integers.

val dilated_conv3d : - ?name:string -> - ?padding:Owl_types.padding -> - ?init_typ:Neuron.Init.typ -> - ?act_typ:Neuron.Activation.typ -> - int array -> - int array -> - int array -> - node -> - node

dilated_conv3d kernel stride rate node adds a 3D dilated convolution node (e.g. spatial convolution over volumes) on previous node.

Arguments: * kernel: int array consists of w, h, d, i, o. w, h, and d specify the 3 dimensionality of the 3D convolution window. i and o are the dimensionality of the input and output space. * stride: int array of 3 integers. * rate: int array of 3 integers.

val transpose_conv1d : - ?name:string -> - ?padding:Owl_types.padding -> - ?init_typ:Neuron.Init.typ -> - ?act_typ:Neuron.Activation.typ -> - int array -> - int array -> - node -> - node

transpose_conv1d kernel stride node adds a 1D transpose convolution node (e.g. temporal convolution) on previous node.

Arguments: * kernel: int array consists of h, i, o. h specifies the dimension of the 1D convolution window. i and o are the dimensionalities of the input and output space. * stride: int array of 1 integer.

val transpose_conv2d : - ?name:string -> - ?padding:Owl_types.padding -> - ?init_typ:Neuron.Init.typ -> - ?act_typ:Neuron.Activation.typ -> - int array -> - int array -> - node -> - node

transpose_conv2d kernel stride node adds a 2D transpose convolution node on previous node.

Arguments: * kernel: int array consists of w, h, i, o. w and h specify the width and height of the 2D convolution window. i and o are the dimensionality of the input and output space. * stride: int array of 2 integers.

val transpose_conv3d : - ?name:string -> - ?padding:Owl_types.padding -> - ?init_typ:Neuron.Init.typ -> - ?act_typ:Neuron.Activation.typ -> - int array -> - int array -> - node -> - node

transpose_conv3d kernel stride node adds a 3D transpose convolution node (e.g. spatial convolution over volumes) on previous node.

Arguments: * kernel: int array consists of w, h, d, i, o. w, h, and d specify the 3 dimensionality of the 3D convolution window. i and o are the dimensionality of the input and output space. * stride: int array of 3 integers.

val fully_connected : - ?name:string -> - ?init_typ:Neuron.Init.typ -> - ?act_typ:Neuron.Activation.typ -> - int -> - node -> - node

fully_connected outputs node adds a fully connected node to node.

Arguments: * outputs: integer, the number of output units in the node.

val max_pool1d : - ?name:string -> - ?padding:Owl_types.padding -> - ?act_typ:Neuron.Activation.typ -> - int array -> - int array -> - node -> - node

max_pool1d ~padding ~act_typ pool_size stride node adds a max pooling operation for temporal data to node.

Arguments: * pool_size: Array of one integer, size of the max pooling windows. * stride: Array of one integer, factor by which to downscale.

val max_pool2d : - ?name:string -> - ?padding:Owl_types.padding -> - ?act_typ:Neuron.Activation.typ -> - int array -> - int array -> - node -> - node

max_pool2d ~padding ~act_typ pool_size stride node adds a max pooling operation for spatial data to node.

Arguments: * pool_size: Array of 2 integers, size of the max pooling windows. * stride: Array of 2 integers, factor by which to downscale.

val avg_pool1d : - ?name:string -> - ?padding:Owl_types.padding -> - ?act_typ:Neuron.Activation.typ -> - int array -> - int array -> - node -> - node

avg_pool1d ~padding ~act_typ pool_size stride node adds a average pooling operation for temporal data to node.

Arguments: * pool_size: Array of one integer, size of the max pooling windows. * stride: Array of one integer, factor by which to downscale.

val avg_pool2d : - ?name:string -> - ?padding:Owl_types.padding -> - ?act_typ:Neuron.Activation.typ -> - int array -> - int array -> - node -> - node

avg_pool2d ~padding ~act_typ pool_size stride node adds a average pooling operation for spatial data to node.

Arguments: * pool_size: Array of 2 integers, size of the max pooling windows. * stride: Array of 2 integers, factor by which to downscale.

val global_max_pool1d : - ?name:string -> - ?act_typ:Neuron.Activation.typ -> - node -> - node

global_max_pool1d adds global max pooling operation for temporal data.

val global_max_pool2d : - ?name:string -> - ?act_typ:Neuron.Activation.typ -> - node -> - node

global_max_poo2d global max pooling operation for spatial data.

val global_avg_pool1d : - ?name:string -> - ?act_typ:Neuron.Activation.typ -> - node -> - node

global_avg_pool1d adds global average pooling operation for temporal data.

val global_avg_pool2d : - ?name:string -> - ?act_typ:Neuron.Activation.typ -> - node -> - node

global_avg_poo2d global average pooling operation for spatial data.

val upsampling2d : - ?name:string -> - ?act_typ:Neuron.Activation.typ -> - int array -> - node -> - node

upsampling2d ~act_typ size node adds a upsampling operation for spatial data to node.

Arguments: * size: array of two integers, namely the upsampling factors for columns and rows.

val padding2d : - ?name:string -> - ?act_typ:Neuron.Activation.typ -> - int array array -> - node -> - node

padding2d ~act_typ padding node adds rows and columns of zeros at the top, bottom, left and right side of an image tensor.

Arguments: * padding: array of 2 arrays of 2 integers, interpreted as | [|top_pad; bottom_pad|]; [|left_pad; right_pad|]|.

val dropout : ?name:string -> float -> node -> node

dropout rate node applies Dropout to the input to prevent overfitting.

Arguments: * rate: float between 0 and 1. Fraction of the input units to drop.

val gaussian_noise : ?name:string -> float -> node -> node

gaussian_noise stddev node applies additive zero-centered Gaussian noise.

Arguments: * stddev: float, standard deviation of the noise distribution.

val gaussian_dropout : ?name:string -> float -> node -> node

gaussian_dropout rate node applies multiplicative 1-centered Gaussian noise. Only active at training time.

Arguments: * rates: float, drop probability

val alpha_dropout : ?name:string -> float -> node -> node

alpha_dropout rate node applies Alpha Dropout to the input node. Only active at training time.

Arguments: * rates: float, drop probability

val normalisation : - ?name:string -> - ?axis:int -> - ?training:bool -> - ?decay:float -> - ?mu:Neuron.Optimise.Algodiff.A.arr -> - ?var:Neuron.Optimise.Algodiff.A.arr -> - node -> - node

normalisation axis node normalise the activations of the previous node at each batch.

Arguments: * axis: Integer, the axis that should be normalised (typically the features axis). Default value is 0.

val reshape : ?name:string -> int array -> node -> node

reshape target_shape node reshapes an output to a certain shape.

Arguments: * target_shape: target shape. Array of integers. Does not include the batch axis.

val flatten : ?name:string -> node -> node

flatten node flattens the input. Does not affect the batch size.

val slice : ?name:string -> int list list -> node -> node

slice node slices the input. Does not affect the batch size.

val lambda : - ?name:string -> - ?act_typ:Neuron.Activation.typ -> - ?out_shape:int array -> - (Neuron.Optimise.Algodiff.t -> Neuron.Optimise.Algodiff.t) -> - node -> - node

lambda ?target_shape func node wraps arbitrary expression as a Node object.

Arguments: * func: The function to be evaluated. Takes input tensor as first argument. * target_shape: the shape of the tensor returned by func; set to the same as input shape if not specified.

val lambda_array : - ?name:string -> - ?act_typ:Neuron.Activation.typ -> - int array -> - (Neuron.Optimise.Algodiff.t array -> Neuron.Optimise.Algodiff.t) -> - node array -> - node

lambda_array target_shape func node wraps arbitrary expression as a Node object.

Arguments: * target_shape: the shape of the tensor returned by func. * func: The function to be evaluated. Takes input tensor array as first argument.

val add : ?name:string -> ?act_typ:Neuron.Activation.typ -> node array -> node

Node that adds a list of inputs.

It takes as input an array of nodes, all of the same shape, and returns a single node (also of the same shape).

val mul : ?name:string -> ?act_typ:Neuron.Activation.typ -> node array -> node

Node that multiplies (element-wise) a list of inputs.

It takes as input an array of nodes, all of the same shape, and returns a single node (also of the same shape).

val dot : ?name:string -> ?act_typ:Neuron.Activation.typ -> node array -> node

Node that computes a dot product between samples in two nodes.

val max : ?name:string -> ?act_typ:Neuron.Activation.typ -> node array -> node

Node that computes the maximum (element-wise) a list of inputs.

val average : - ?name:string -> - ?act_typ:Neuron.Activation.typ -> - node array -> - node

Node that averages a list of inputs.

It takes as input an array of nodes, all of the same shape, and returns a single node (also of the same shape).

val concatenate : - ?name:string -> - ?act_typ:Neuron.Activation.typ -> - int -> - node array -> - node

concatenate axis nodes concatenates a array of nodes and return as a single node.

Arguments: * axis: Axis along which to concatenate.

Helper functions
val to_string : network -> string

Convert a neural network to its string representation.

val pp_network : Stdlib.Format.formatter -> network -> unit

Pretty printing function a neural network.

val print : network -> unit

Print the string representation of a neural network to the standard output.

val save : ?unsafe:bool -> network -> string -> unit

Serialise a network and save it to the a file with the given name. Set the unsafe flag to true if network contains Lambda layer.

val load : string -> network

Load the neural network from a file with the given name.

val save_weights : network -> string -> unit

Save all the weights in a neural network to a file. The weights and the name of their associated neurons are saved as key-value pairs in a hash table.

val load_weights : network -> string -> unit

Load the weights from a file of the given name. Note that the weights and the name of their associated neurons are saved as key-value pairs in a hash table.

val make_subnetwork : - ?copy:bool -> - ?make_inputs:string array -> - network -> - string array -> - network

get_subnetwork ?copy ?make_inputs network output_names constructs a subnetwork of nodes on which output_names depend, replacing nodes with names in make_inputs with input nodes.

Arguments: copy: Whether to copy or reference the original node weights. Defaults to true. make_inputs: Names of nodes to use as inputs to the subnetwork. Defaults to ||, which uses the original inputs. nn: The neural network from which the subnetwork is constructed. output_names: Names of nodes to use as outputs.

Train Networks

Generic function of training a neural network.

Train a neural network with various configurations.

\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Flatten/index.html b/owl-base/Owl_neural_generic/Flatten/index.html deleted file mode 100644 index 119677bcc..000000000 --- a/owl-base/Owl_neural_generic/Flatten/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Flatten (owl-base.Owl_neural_generic.Flatten)

Module Owl_neural_generic.Flatten

Parameters

Signature

module Graph = Graph
module Optimise = Graph.Neuron.Optimise
module Init = Graph.Neuron.Init
module Activation = Graph.Neuron.Activation
module Regularisation = Graph.Neuron.Optimise.Regularisation
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make/Graph/Neuron/Activation/index.html b/owl-base/Owl_neural_generic/Make/Graph/Neuron/Activation/index.html deleted file mode 100644 index 7b8438a77..000000000 --- a/owl-base/Owl_neural_generic/Make/Graph/Neuron/Activation/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Activation (owl-base.Owl_neural_generic.Make.Graph.Neuron.Activation)

Module Neuron.Activation

type typ = Make_Embedded(A).Neuron.Activation.typ =
  1. | Elu
  2. | Relu
  3. | Sigmoid
  4. | HardSigmoid
  5. | Softmax of int
  6. | Softplus
  7. | Softsign
  8. | Tanh
  9. | Relu6
  10. | LeakyRelu of float
  11. | TRelu of float
  12. | Custom of Optimise.Algodiff.t -> Optimise.Algodiff.t
  13. | None
type neuron_typ = Make_Embedded(A).Neuron.Activation.neuron_typ = {
  1. mutable activation : typ;
  2. mutable in_shape : int array;
  3. mutable out_shape : int array;
}
val create : typ -> neuron_typ
val connect : int array -> neuron_typ -> unit
val run_activation : Optimise.Algodiff.t -> typ -> Optimise.Algodiff.t
val copy : neuron_typ -> neuron_typ
val activation_to_string : typ -> string
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make/Graph/Neuron/Add/index.html b/owl-base/Owl_neural_generic/Make/Graph/Neuron/Add/index.html deleted file mode 100644 index ed4786fe7..000000000 --- a/owl-base/Owl_neural_generic/Make/Graph/Neuron/Add/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Add (owl-base.Owl_neural_generic.Make.Graph.Neuron.Add)

Module Neuron.Add

type neuron_typ = Make_Embedded(A).Neuron.Add.neuron_typ = {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
}
val create : unit -> neuron_typ
val connect : int array array -> neuron_typ -> unit
val copy : 'a -> neuron_typ
val run : Optimise.Algodiff.t array -> 'a -> Optimise.Algodiff.t
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make/Graph/Neuron/AlphaDropout/index.html b/owl-base/Owl_neural_generic/Make/Graph/Neuron/AlphaDropout/index.html deleted file mode 100644 index a0927ae1b..000000000 --- a/owl-base/Owl_neural_generic/Make/Graph/Neuron/AlphaDropout/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -AlphaDropout (owl-base.Owl_neural_generic.Make.Graph.Neuron.AlphaDropout)

Module Neuron.AlphaDropout

type neuron_typ = Make_Embedded(A).Neuron.AlphaDropout.neuron_typ = {
  1. mutable rate : float;
  2. mutable in_shape : int array;
  3. mutable out_shape : int array;
}
val create : float -> neuron_typ
val connect : int array -> neuron_typ -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make/Graph/Neuron/Average/index.html b/owl-base/Owl_neural_generic/Make/Graph/Neuron/Average/index.html deleted file mode 100644 index b1be88e0c..000000000 --- a/owl-base/Owl_neural_generic/Make/Graph/Neuron/Average/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Average (owl-base.Owl_neural_generic.Make.Graph.Neuron.Average)

Module Neuron.Average

type neuron_typ = Make_Embedded(A).Neuron.Average.neuron_typ = {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
}
val create : unit -> neuron_typ
val connect : int array array -> neuron_typ -> unit
val copy : 'a -> neuron_typ
val run : Optimise.Algodiff.t array -> 'a -> Optimise.Algodiff.t
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make/Graph/Neuron/AvgPool1D/index.html b/owl-base/Owl_neural_generic/Make/Graph/Neuron/AvgPool1D/index.html deleted file mode 100644 index 4371c2db3..000000000 --- a/owl-base/Owl_neural_generic/Make/Graph/Neuron/AvgPool1D/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -AvgPool1D (owl-base.Owl_neural_generic.Make.Graph.Neuron.AvgPool1D)

Module Neuron.AvgPool1D

type neuron_typ = Make_Embedded(A).Neuron.AvgPool1D.neuron_typ = {
  1. mutable padding : Owl_types.padding;
  2. mutable kernel : int array;
  3. mutable stride : int array;
  4. mutable in_shape : int array;
  5. mutable out_shape : int array;
}
val create : Owl_types.padding -> int array -> int array -> neuron_typ
val connect : int array -> neuron_typ -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make/Graph/Neuron/AvgPool2D/index.html b/owl-base/Owl_neural_generic/Make/Graph/Neuron/AvgPool2D/index.html deleted file mode 100644 index ca7ac8f26..000000000 --- a/owl-base/Owl_neural_generic/Make/Graph/Neuron/AvgPool2D/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -AvgPool2D (owl-base.Owl_neural_generic.Make.Graph.Neuron.AvgPool2D)

Module Neuron.AvgPool2D

type neuron_typ = Make_Embedded(A).Neuron.AvgPool2D.neuron_typ = {
  1. mutable padding : Owl_types.padding;
  2. mutable kernel : int array;
  3. mutable stride : int array;
  4. mutable in_shape : int array;
  5. mutable out_shape : int array;
}
val create : Owl_types.padding -> int array -> int array -> neuron_typ
val connect : int array -> neuron_typ -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make/Graph/Neuron/Concatenate/index.html b/owl-base/Owl_neural_generic/Make/Graph/Neuron/Concatenate/index.html deleted file mode 100644 index 9f77fa4cb..000000000 --- a/owl-base/Owl_neural_generic/Make/Graph/Neuron/Concatenate/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Concatenate (owl-base.Owl_neural_generic.Make.Graph.Neuron.Concatenate)

Module Neuron.Concatenate

type neuron_typ = Make_Embedded(A).Neuron.Concatenate.neuron_typ = {
  1. mutable axis : int;
  2. mutable in_shape : int array;
  3. mutable out_shape : int array;
}
val create : int -> neuron_typ
val connect : int array array -> neuron_typ -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make/Graph/Neuron/Conv1D/index.html b/owl-base/Owl_neural_generic/Make/Graph/Neuron/Conv1D/index.html deleted file mode 100644 index 3d76e40d2..000000000 --- a/owl-base/Owl_neural_generic/Make/Graph/Neuron/Conv1D/index.html +++ /dev/null @@ -1,8 +0,0 @@ - -Conv1D (owl-base.Owl_neural_generic.Make.Graph.Neuron.Conv1D)

Module Neuron.Conv1D

type neuron_typ = Make_Embedded(A).Neuron.Conv1D.neuron_typ = {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable b : Optimise.Algodiff.t;
  3. mutable kernel : int array;
  4. mutable stride : int array;
  5. mutable padding : Owl_types.padding;
  6. mutable init_typ : Init.typ;
  7. mutable in_shape : int array;
  8. mutable out_shape : int array;
}
val create : - ?inputs:int array -> - Owl_types.padding -> - int array -> - int array -> - Init.typ -> - neuron_typ
val connect : int array -> neuron_typ -> unit
val init : neuron_typ -> unit
val reset : neuron_typ -> unit
val mktag : int -> neuron_typ -> unit
val mkpar : neuron_typ -> Optimise.Algodiff.t array
val mkpri : neuron_typ -> Optimise.Algodiff.t array
val mkadj : neuron_typ -> Optimise.Algodiff.t array
val update : neuron_typ -> Optimise.Algodiff.t array -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make/Graph/Neuron/Conv2D/index.html b/owl-base/Owl_neural_generic/Make/Graph/Neuron/Conv2D/index.html deleted file mode 100644 index 99633dd95..000000000 --- a/owl-base/Owl_neural_generic/Make/Graph/Neuron/Conv2D/index.html +++ /dev/null @@ -1,8 +0,0 @@ - -Conv2D (owl-base.Owl_neural_generic.Make.Graph.Neuron.Conv2D)

Module Neuron.Conv2D

type neuron_typ = Make_Embedded(A).Neuron.Conv2D.neuron_typ = {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable b : Optimise.Algodiff.t;
  3. mutable kernel : int array;
  4. mutable stride : int array;
  5. mutable padding : Owl_types.padding;
  6. mutable init_typ : Init.typ;
  7. mutable in_shape : int array;
  8. mutable out_shape : int array;
}
val create : - ?inputs:int array -> - Owl_types.padding -> - int array -> - int array -> - Init.typ -> - neuron_typ
val connect : int array -> neuron_typ -> unit
val init : neuron_typ -> unit
val reset : neuron_typ -> unit
val mktag : int -> neuron_typ -> unit
val mkpar : neuron_typ -> Optimise.Algodiff.t array
val mkpri : neuron_typ -> Optimise.Algodiff.t array
val mkadj : neuron_typ -> Optimise.Algodiff.t array
val update : neuron_typ -> Optimise.Algodiff.t array -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make/Graph/Neuron/Conv3D/index.html b/owl-base/Owl_neural_generic/Make/Graph/Neuron/Conv3D/index.html deleted file mode 100644 index 4d85ad202..000000000 --- a/owl-base/Owl_neural_generic/Make/Graph/Neuron/Conv3D/index.html +++ /dev/null @@ -1,8 +0,0 @@ - -Conv3D (owl-base.Owl_neural_generic.Make.Graph.Neuron.Conv3D)

Module Neuron.Conv3D

type neuron_typ = Make_Embedded(A).Neuron.Conv3D.neuron_typ = {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable b : Optimise.Algodiff.t;
  3. mutable kernel : int array;
  4. mutable stride : int array;
  5. mutable padding : Owl_types.padding;
  6. mutable init_typ : Init.typ;
  7. mutable in_shape : int array;
  8. mutable out_shape : int array;
}
val create : - ?inputs:int array -> - Owl_types.padding -> - int array -> - int array -> - Init.typ -> - neuron_typ
val connect : int array -> neuron_typ -> unit
val init : neuron_typ -> unit
val reset : neuron_typ -> unit
val mktag : int -> neuron_typ -> unit
val mkpar : neuron_typ -> Optimise.Algodiff.t array
val mkpri : neuron_typ -> Optimise.Algodiff.t array
val mkadj : neuron_typ -> Optimise.Algodiff.t array
val update : neuron_typ -> Optimise.Algodiff.t array -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make/Graph/Neuron/DilatedConv1D/index.html b/owl-base/Owl_neural_generic/Make/Graph/Neuron/DilatedConv1D/index.html deleted file mode 100644 index 315c56cf0..000000000 --- a/owl-base/Owl_neural_generic/Make/Graph/Neuron/DilatedConv1D/index.html +++ /dev/null @@ -1,9 +0,0 @@ - -DilatedConv1D (owl-base.Owl_neural_generic.Make.Graph.Neuron.DilatedConv1D)

Module Neuron.DilatedConv1D

type neuron_typ = Make_Embedded(A).Neuron.DilatedConv1D.neuron_typ = {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable b : Optimise.Algodiff.t;
  3. mutable kernel : int array;
  4. mutable stride : int array;
  5. mutable rate : int array;
  6. mutable padding : Owl_types.padding;
  7. mutable init_typ : Init.typ;
  8. mutable in_shape : int array;
  9. mutable out_shape : int array;
}
val create : - ?inputs:int array -> - Owl_types.padding -> - int array -> - int array -> - int array -> - Init.typ -> - neuron_typ
val connect : int array -> neuron_typ -> unit
val init : neuron_typ -> unit
val reset : neuron_typ -> unit
val mktag : int -> neuron_typ -> unit
val mkpar : neuron_typ -> Optimise.Algodiff.t array
val mkpri : neuron_typ -> Optimise.Algodiff.t array
val mkadj : neuron_typ -> Optimise.Algodiff.t array
val update : neuron_typ -> Optimise.Algodiff.t array -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make/Graph/Neuron/DilatedConv2D/index.html b/owl-base/Owl_neural_generic/Make/Graph/Neuron/DilatedConv2D/index.html deleted file mode 100644 index 9e5108dce..000000000 --- a/owl-base/Owl_neural_generic/Make/Graph/Neuron/DilatedConv2D/index.html +++ /dev/null @@ -1,9 +0,0 @@ - -DilatedConv2D (owl-base.Owl_neural_generic.Make.Graph.Neuron.DilatedConv2D)

Module Neuron.DilatedConv2D

type neuron_typ = Make_Embedded(A).Neuron.DilatedConv2D.neuron_typ = {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable b : Optimise.Algodiff.t;
  3. mutable kernel : int array;
  4. mutable stride : int array;
  5. mutable rate : int array;
  6. mutable padding : Owl_types.padding;
  7. mutable init_typ : Init.typ;
  8. mutable in_shape : int array;
  9. mutable out_shape : int array;
}
val create : - ?inputs:int array -> - Owl_types.padding -> - int array -> - int array -> - int array -> - Init.typ -> - neuron_typ
val connect : int array -> neuron_typ -> unit
val init : neuron_typ -> unit
val reset : neuron_typ -> unit
val mktag : int -> neuron_typ -> unit
val mkpar : neuron_typ -> Optimise.Algodiff.t array
val mkpri : neuron_typ -> Optimise.Algodiff.t array
val mkadj : neuron_typ -> Optimise.Algodiff.t array
val update : neuron_typ -> Optimise.Algodiff.t array -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make/Graph/Neuron/DilatedConv3D/index.html b/owl-base/Owl_neural_generic/Make/Graph/Neuron/DilatedConv3D/index.html deleted file mode 100644 index 844e06fd4..000000000 --- a/owl-base/Owl_neural_generic/Make/Graph/Neuron/DilatedConv3D/index.html +++ /dev/null @@ -1,9 +0,0 @@ - -DilatedConv3D (owl-base.Owl_neural_generic.Make.Graph.Neuron.DilatedConv3D)

Module Neuron.DilatedConv3D

type neuron_typ = Make_Embedded(A).Neuron.DilatedConv3D.neuron_typ = {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable b : Optimise.Algodiff.t;
  3. mutable kernel : int array;
  4. mutable stride : int array;
  5. mutable rate : int array;
  6. mutable padding : Owl_types.padding;
  7. mutable init_typ : Init.typ;
  8. mutable in_shape : int array;
  9. mutable out_shape : int array;
}
val create : - ?inputs:int array -> - Owl_types.padding -> - int array -> - int array -> - int array -> - Init.typ -> - neuron_typ
val connect : int array -> neuron_typ -> unit
val init : neuron_typ -> unit
val reset : neuron_typ -> unit
val mktag : int -> neuron_typ -> unit
val mkpar : neuron_typ -> Optimise.Algodiff.t array
val mkpri : neuron_typ -> Optimise.Algodiff.t array
val mkadj : neuron_typ -> Optimise.Algodiff.t array
val update : neuron_typ -> Optimise.Algodiff.t array -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make/Graph/Neuron/Dot/index.html b/owl-base/Owl_neural_generic/Make/Graph/Neuron/Dot/index.html deleted file mode 100644 index becf5b611..000000000 --- a/owl-base/Owl_neural_generic/Make/Graph/Neuron/Dot/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Dot (owl-base.Owl_neural_generic.Make.Graph.Neuron.Dot)

Module Neuron.Dot

type neuron_typ = Make_Embedded(A).Neuron.Dot.neuron_typ = {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
}
val create : unit -> neuron_typ
val connect : int array array -> neuron_typ -> unit
val copy : 'a -> neuron_typ
val run : Optimise.Algodiff.t array -> 'a -> Optimise.Algodiff.t
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make/Graph/Neuron/Dropout/index.html b/owl-base/Owl_neural_generic/Make/Graph/Neuron/Dropout/index.html deleted file mode 100644 index 772455ea3..000000000 --- a/owl-base/Owl_neural_generic/Make/Graph/Neuron/Dropout/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Dropout (owl-base.Owl_neural_generic.Make.Graph.Neuron.Dropout)

Module Neuron.Dropout

type neuron_typ = Make_Embedded(A).Neuron.Dropout.neuron_typ = {
  1. mutable rate : float;
  2. mutable in_shape : int array;
  3. mutable out_shape : int array;
}
val create : float -> neuron_typ
val connect : int array -> neuron_typ -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make/Graph/Neuron/Embedding/index.html b/owl-base/Owl_neural_generic/Make/Graph/Neuron/Embedding/index.html deleted file mode 100644 index 7011f6cfd..000000000 --- a/owl-base/Owl_neural_generic/Make/Graph/Neuron/Embedding/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Embedding (owl-base.Owl_neural_generic.Make.Graph.Neuron.Embedding)

Module Neuron.Embedding

type neuron_typ = Make_Embedded(A).Neuron.Embedding.neuron_typ = {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable init_typ : Init.typ;
  3. mutable in_dim : int;
  4. mutable in_shape : int array;
  5. mutable out_shape : int array;
}
val create : ?inputs:int -> int -> int -> Init.typ -> neuron_typ
val connect : int array -> neuron_typ -> unit
val init : neuron_typ -> unit
val reset : neuron_typ -> unit
val mktag : int -> neuron_typ -> unit
val mkpar : neuron_typ -> Optimise.Algodiff.t array
val mkpri : neuron_typ -> Optimise.Algodiff.t array
val mkadj : neuron_typ -> Optimise.Algodiff.t array
val update : neuron_typ -> Optimise.Algodiff.t array -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make/Graph/Neuron/Flatten/index.html b/owl-base/Owl_neural_generic/Make/Graph/Neuron/Flatten/index.html deleted file mode 100644 index b12f2eb9f..000000000 --- a/owl-base/Owl_neural_generic/Make/Graph/Neuron/Flatten/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Flatten (owl-base.Owl_neural_generic.Make.Graph.Neuron.Flatten)

Module Neuron.Flatten

type neuron_typ = Make_Embedded(A).Neuron.Flatten.neuron_typ = {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
}
val create : unit -> neuron_typ
val connect : int array -> neuron_typ -> unit
val copy : 'a -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make/Graph/Neuron/FullyConnected/index.html b/owl-base/Owl_neural_generic/Make/Graph/Neuron/FullyConnected/index.html deleted file mode 100644 index ccc48609a..000000000 --- a/owl-base/Owl_neural_generic/Make/Graph/Neuron/FullyConnected/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -FullyConnected (owl-base.Owl_neural_generic.Make.Graph.Neuron.FullyConnected)

Module Neuron.FullyConnected

type neuron_typ = Make_Embedded(A).Neuron.FullyConnected.neuron_typ = {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable b : Optimise.Algodiff.t;
  3. mutable init_typ : Init.typ;
  4. mutable in_shape : int array;
  5. mutable out_shape : int array;
}
val create : ?inputs:int -> int -> Init.typ -> neuron_typ
val connect : int array -> neuron_typ -> unit
val init : neuron_typ -> unit
val reset : neuron_typ -> unit
val mktag : int -> neuron_typ -> unit
val mkpar : neuron_typ -> Optimise.Algodiff.t array
val mkpri : neuron_typ -> Optimise.Algodiff.t array
val mkadj : neuron_typ -> Optimise.Algodiff.t array
val update : neuron_typ -> Optimise.Algodiff.t array -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make/Graph/Neuron/GRU/index.html b/owl-base/Owl_neural_generic/Make/Graph/Neuron/GRU/index.html deleted file mode 100644 index ae0b7de82..000000000 --- a/owl-base/Owl_neural_generic/Make/Graph/Neuron/GRU/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -GRU (owl-base.Owl_neural_generic.Make.Graph.Neuron.GRU)

Module Neuron.GRU

type neuron_typ = Make_Embedded(A).Neuron.GRU.neuron_typ = {
  1. mutable wxz : Optimise.Algodiff.t;
  2. mutable whz : Optimise.Algodiff.t;
  3. mutable wxr : Optimise.Algodiff.t;
  4. mutable whr : Optimise.Algodiff.t;
  5. mutable wxh : Optimise.Algodiff.t;
  6. mutable whh : Optimise.Algodiff.t;
  7. mutable bz : Optimise.Algodiff.t;
  8. mutable br : Optimise.Algodiff.t;
  9. mutable bh : Optimise.Algodiff.t;
  10. mutable h : Optimise.Algodiff.t;
  11. mutable init_typ : Init.typ;
  12. mutable in_shape : int array;
  13. mutable out_shape : int array;
}
val create : ?time_steps:int -> ?inputs:int -> int -> Init.typ -> neuron_typ
val connect : int array -> neuron_typ -> unit
val init : neuron_typ -> unit
val reset : neuron_typ -> unit
val mktag : int -> neuron_typ -> unit
val mkpar : neuron_typ -> Optimise.Algodiff.t array
val mkpri : neuron_typ -> Optimise.Algodiff.t array
val mkadj : neuron_typ -> Optimise.Algodiff.t array
val update : neuron_typ -> Optimise.Algodiff.t array -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make/Graph/Neuron/GaussianDropout/index.html b/owl-base/Owl_neural_generic/Make/Graph/Neuron/GaussianDropout/index.html deleted file mode 100644 index a95651516..000000000 --- a/owl-base/Owl_neural_generic/Make/Graph/Neuron/GaussianDropout/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -GaussianDropout (owl-base.Owl_neural_generic.Make.Graph.Neuron.GaussianDropout)

Module Neuron.GaussianDropout

type neuron_typ = Make_Embedded(A).Neuron.GaussianDropout.neuron_typ = {
  1. mutable rate : float;
  2. mutable in_shape : int array;
  3. mutable out_shape : int array;
}
val create : float -> neuron_typ
val connect : int array -> neuron_typ -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make/Graph/Neuron/GaussianNoise/index.html b/owl-base/Owl_neural_generic/Make/Graph/Neuron/GaussianNoise/index.html deleted file mode 100644 index 6ed0c1712..000000000 --- a/owl-base/Owl_neural_generic/Make/Graph/Neuron/GaussianNoise/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -GaussianNoise (owl-base.Owl_neural_generic.Make.Graph.Neuron.GaussianNoise)

Module Neuron.GaussianNoise

type neuron_typ = Make_Embedded(A).Neuron.GaussianNoise.neuron_typ = {
  1. mutable sigma : float;
  2. mutable in_shape : int array;
  3. mutable out_shape : int array;
}
val create : float -> neuron_typ
val connect : int array -> neuron_typ -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make/Graph/Neuron/GlobalAvgPool1D/index.html b/owl-base/Owl_neural_generic/Make/Graph/Neuron/GlobalAvgPool1D/index.html deleted file mode 100644 index 5645e627c..000000000 --- a/owl-base/Owl_neural_generic/Make/Graph/Neuron/GlobalAvgPool1D/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -GlobalAvgPool1D (owl-base.Owl_neural_generic.Make.Graph.Neuron.GlobalAvgPool1D)

Module Neuron.GlobalAvgPool1D

type neuron_typ = Make_Embedded(A).Neuron.GlobalAvgPool1D.neuron_typ = {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
}
val create : unit -> neuron_typ
val connect : int array -> neuron_typ -> unit
val copy : 'a -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make/Graph/Neuron/GlobalAvgPool2D/index.html b/owl-base/Owl_neural_generic/Make/Graph/Neuron/GlobalAvgPool2D/index.html deleted file mode 100644 index 4a437669e..000000000 --- a/owl-base/Owl_neural_generic/Make/Graph/Neuron/GlobalAvgPool2D/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -GlobalAvgPool2D (owl-base.Owl_neural_generic.Make.Graph.Neuron.GlobalAvgPool2D)

Module Neuron.GlobalAvgPool2D

type neuron_typ = Make_Embedded(A).Neuron.GlobalAvgPool2D.neuron_typ = {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
}
val create : unit -> neuron_typ
val connect : int array -> neuron_typ -> unit
val copy : 'a -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make/Graph/Neuron/GlobalMaxPool1D/index.html b/owl-base/Owl_neural_generic/Make/Graph/Neuron/GlobalMaxPool1D/index.html deleted file mode 100644 index 15265e6b4..000000000 --- a/owl-base/Owl_neural_generic/Make/Graph/Neuron/GlobalMaxPool1D/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -GlobalMaxPool1D (owl-base.Owl_neural_generic.Make.Graph.Neuron.GlobalMaxPool1D)

Module Neuron.GlobalMaxPool1D

type neuron_typ = Make_Embedded(A).Neuron.GlobalMaxPool1D.neuron_typ = {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
}
val create : unit -> neuron_typ
val connect : int array -> neuron_typ -> unit
val copy : 'a -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make/Graph/Neuron/GlobalMaxPool2D/index.html b/owl-base/Owl_neural_generic/Make/Graph/Neuron/GlobalMaxPool2D/index.html deleted file mode 100644 index 64936f970..000000000 --- a/owl-base/Owl_neural_generic/Make/Graph/Neuron/GlobalMaxPool2D/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -GlobalMaxPool2D (owl-base.Owl_neural_generic.Make.Graph.Neuron.GlobalMaxPool2D)

Module Neuron.GlobalMaxPool2D

type neuron_typ = Make_Embedded(A).Neuron.GlobalMaxPool2D.neuron_typ = {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
}
val create : unit -> neuron_typ
val connect : int array -> neuron_typ -> unit
val copy : 'a -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make/Graph/Neuron/Init/index.html b/owl-base/Owl_neural_generic/Make/Graph/Neuron/Init/index.html deleted file mode 100644 index eb284a5eb..000000000 --- a/owl-base/Owl_neural_generic/Make/Graph/Neuron/Init/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Init (owl-base.Owl_neural_generic.Make.Graph.Neuron.Init)

Module Neuron.Init

type typ = Make_Embedded(A).Neuron.Init.typ =
  1. | Uniform of float * float
  2. | Gaussian of float * float
  3. | Standard
  4. | Tanh
  5. | GlorotNormal
  6. | GlorotUniform
  7. | LecunNormal
  8. | HeNormal
  9. | Custom of int array -> Optimise.Algodiff.t
val calc_fans : int array -> float * float
val run : typ -> int array -> Optimise.Algodiff.t -> Optimise.Algodiff.t
val to_string : typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make/Graph/Neuron/Input/index.html b/owl-base/Owl_neural_generic/Make/Graph/Neuron/Input/index.html deleted file mode 100644 index ea826f586..000000000 --- a/owl-base/Owl_neural_generic/Make/Graph/Neuron/Input/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Input (owl-base.Owl_neural_generic.Make.Graph.Neuron.Input)

Module Neuron.Input

type neuron_typ = Make_Embedded(A).Neuron.Input.neuron_typ = {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
}
val create : int array -> neuron_typ
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make/Graph/Neuron/LSTM/index.html b/owl-base/Owl_neural_generic/Make/Graph/Neuron/LSTM/index.html deleted file mode 100644 index ff1d11140..000000000 --- a/owl-base/Owl_neural_generic/Make/Graph/Neuron/LSTM/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -LSTM (owl-base.Owl_neural_generic.Make.Graph.Neuron.LSTM)

Module Neuron.LSTM

type neuron_typ = Make_Embedded(A).Neuron.LSTM.neuron_typ = {
  1. mutable wxi : Optimise.Algodiff.t;
  2. mutable whi : Optimise.Algodiff.t;
  3. mutable wxc : Optimise.Algodiff.t;
  4. mutable whc : Optimise.Algodiff.t;
  5. mutable wxf : Optimise.Algodiff.t;
  6. mutable whf : Optimise.Algodiff.t;
  7. mutable wxo : Optimise.Algodiff.t;
  8. mutable who : Optimise.Algodiff.t;
  9. mutable bi : Optimise.Algodiff.t;
  10. mutable bc : Optimise.Algodiff.t;
  11. mutable bf : Optimise.Algodiff.t;
  12. mutable bo : Optimise.Algodiff.t;
  13. mutable c : Optimise.Algodiff.t;
  14. mutable h : Optimise.Algodiff.t;
  15. mutable init_typ : Init.typ;
  16. mutable in_shape : int array;
  17. mutable out_shape : int array;
}
val create : ?time_steps:int -> ?inputs:int -> int -> Init.typ -> neuron_typ
val connect : int array -> neuron_typ -> unit
val init : neuron_typ -> unit
val reset : neuron_typ -> unit
val mktag : int -> neuron_typ -> unit
val mkpar : neuron_typ -> Optimise.Algodiff.t array
val mkpri : neuron_typ -> Optimise.Algodiff.t array
val mkadj : neuron_typ -> Optimise.Algodiff.t array
val update : neuron_typ -> Optimise.Algodiff.t array -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make/Graph/Neuron/Lambda/index.html b/owl-base/Owl_neural_generic/Make/Graph/Neuron/Lambda/index.html deleted file mode 100644 index 596bb9b62..000000000 --- a/owl-base/Owl_neural_generic/Make/Graph/Neuron/Lambda/index.html +++ /dev/null @@ -1,5 +0,0 @@ - -Lambda (owl-base.Owl_neural_generic.Make.Graph.Neuron.Lambda)

Module Neuron.Lambda

type neuron_typ = Make_Embedded(A).Neuron.Lambda.neuron_typ = {
  1. mutable lambda : Optimise.Algodiff.t -> Optimise.Algodiff.t;
  2. mutable in_shape : int array;
  3. mutable out_shape : int array;
}
val create : - ?out_shape:int array -> - (Optimise.Algodiff.t -> Optimise.Algodiff.t) -> - neuron_typ
val connect : int array -> neuron_typ -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make/Graph/Neuron/LambdaArray/index.html b/owl-base/Owl_neural_generic/Make/Graph/Neuron/LambdaArray/index.html deleted file mode 100644 index 8b17d5cd7..000000000 --- a/owl-base/Owl_neural_generic/Make/Graph/Neuron/LambdaArray/index.html +++ /dev/null @@ -1,5 +0,0 @@ - -LambdaArray (owl-base.Owl_neural_generic.Make.Graph.Neuron.LambdaArray)

Module Neuron.LambdaArray

type neuron_typ = Make_Embedded(A).Neuron.LambdaArray.neuron_typ = {
  1. mutable lambda : Optimise.Algodiff.t array -> Optimise.Algodiff.t;
  2. mutable in_shape : int array;
  3. mutable out_shape : int array;
}
val create : - int array -> - (Optimise.Algodiff.t array -> Optimise.Algodiff.t) -> - neuron_typ
val connect : int array array -> neuron_typ -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make/Graph/Neuron/Linear/index.html b/owl-base/Owl_neural_generic/Make/Graph/Neuron/Linear/index.html deleted file mode 100644 index 3d2fd4105..000000000 --- a/owl-base/Owl_neural_generic/Make/Graph/Neuron/Linear/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Linear (owl-base.Owl_neural_generic.Make.Graph.Neuron.Linear)

Module Neuron.Linear

type neuron_typ = Make_Embedded(A).Neuron.Linear.neuron_typ = {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable b : Optimise.Algodiff.t;
  3. mutable init_typ : Init.typ;
  4. mutable in_shape : int array;
  5. mutable out_shape : int array;
}
val create : ?inputs:int -> int -> Init.typ -> neuron_typ
val connect : int array -> neuron_typ -> unit
val init : neuron_typ -> unit
val reset : neuron_typ -> unit
val mktag : int -> neuron_typ -> unit
val mkpar : neuron_typ -> Optimise.Algodiff.t array
val mkpri : neuron_typ -> Optimise.Algodiff.t array
val mkadj : neuron_typ -> Optimise.Algodiff.t array
val update : neuron_typ -> Optimise.Algodiff.t array -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make/Graph/Neuron/LinearNoBias/index.html b/owl-base/Owl_neural_generic/Make/Graph/Neuron/LinearNoBias/index.html deleted file mode 100644 index af15bce2e..000000000 --- a/owl-base/Owl_neural_generic/Make/Graph/Neuron/LinearNoBias/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -LinearNoBias (owl-base.Owl_neural_generic.Make.Graph.Neuron.LinearNoBias)

Module Neuron.LinearNoBias

type neuron_typ = Make_Embedded(A).Neuron.LinearNoBias.neuron_typ = {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable init_typ : Init.typ;
  3. mutable in_shape : int array;
  4. mutable out_shape : int array;
}
val create : ?inputs:int -> int -> Init.typ -> neuron_typ
val connect : int array -> neuron_typ -> unit
val init : neuron_typ -> unit
val reset : neuron_typ -> unit
val mktag : int -> neuron_typ -> unit
val mkpar : neuron_typ -> Optimise.Algodiff.t array
val mkpri : neuron_typ -> Optimise.Algodiff.t array
val mkadj : neuron_typ -> Optimise.Algodiff.t array
val update : neuron_typ -> Optimise.Algodiff.t array -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make/Graph/Neuron/Masking/index.html b/owl-base/Owl_neural_generic/Make/Graph/Neuron/Masking/index.html deleted file mode 100644 index 0c7aca218..000000000 --- a/owl-base/Owl_neural_generic/Make/Graph/Neuron/Masking/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Masking (owl-base.Owl_neural_generic.Make.Graph.Neuron.Masking)

Module Neuron.Masking

\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make/Graph/Neuron/Max/index.html b/owl-base/Owl_neural_generic/Make/Graph/Neuron/Max/index.html deleted file mode 100644 index 2dd3b898b..000000000 --- a/owl-base/Owl_neural_generic/Make/Graph/Neuron/Max/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Max (owl-base.Owl_neural_generic.Make.Graph.Neuron.Max)

Module Neuron.Max

type neuron_typ = Make_Embedded(A).Neuron.Max.neuron_typ = {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
}
val create : unit -> neuron_typ
val connect : int array array -> neuron_typ -> unit
val copy : 'a -> neuron_typ
val run : Optimise.Algodiff.t array -> 'a -> Optimise.Algodiff.t
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make/Graph/Neuron/MaxPool1D/index.html b/owl-base/Owl_neural_generic/Make/Graph/Neuron/MaxPool1D/index.html deleted file mode 100644 index ca8b3bbd4..000000000 --- a/owl-base/Owl_neural_generic/Make/Graph/Neuron/MaxPool1D/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -MaxPool1D (owl-base.Owl_neural_generic.Make.Graph.Neuron.MaxPool1D)

Module Neuron.MaxPool1D

type neuron_typ = Make_Embedded(A).Neuron.MaxPool1D.neuron_typ = {
  1. mutable padding : Owl_types.padding;
  2. mutable kernel : int array;
  3. mutable stride : int array;
  4. mutable in_shape : int array;
  5. mutable out_shape : int array;
}
val create : Owl_types.padding -> int array -> int array -> neuron_typ
val connect : int array -> neuron_typ -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make/Graph/Neuron/MaxPool2D/index.html b/owl-base/Owl_neural_generic/Make/Graph/Neuron/MaxPool2D/index.html deleted file mode 100644 index 04a65ee3b..000000000 --- a/owl-base/Owl_neural_generic/Make/Graph/Neuron/MaxPool2D/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -MaxPool2D (owl-base.Owl_neural_generic.Make.Graph.Neuron.MaxPool2D)

Module Neuron.MaxPool2D

type neuron_typ = Make_Embedded(A).Neuron.MaxPool2D.neuron_typ = {
  1. mutable padding : Owl_types.padding;
  2. mutable kernel : int array;
  3. mutable stride : int array;
  4. mutable in_shape : int array;
  5. mutable out_shape : int array;
}
val create : Owl_types.padding -> int array -> int array -> neuron_typ
val connect : int array -> neuron_typ -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make/Graph/Neuron/Mul/index.html b/owl-base/Owl_neural_generic/Make/Graph/Neuron/Mul/index.html deleted file mode 100644 index d90ce5ce7..000000000 --- a/owl-base/Owl_neural_generic/Make/Graph/Neuron/Mul/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mul (owl-base.Owl_neural_generic.Make.Graph.Neuron.Mul)

Module Neuron.Mul

type neuron_typ = Make_Embedded(A).Neuron.Mul.neuron_typ = {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
}
val create : unit -> neuron_typ
val connect : int array array -> neuron_typ -> unit
val copy : 'a -> neuron_typ
val run : Optimise.Algodiff.t array -> 'a -> Optimise.Algodiff.t
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make/Graph/Neuron/Normalisation/index.html b/owl-base/Owl_neural_generic/Make/Graph/Neuron/Normalisation/index.html deleted file mode 100644 index 470e83dfe..000000000 --- a/owl-base/Owl_neural_generic/Make/Graph/Neuron/Normalisation/index.html +++ /dev/null @@ -1,8 +0,0 @@ - -Normalisation (owl-base.Owl_neural_generic.Make.Graph.Neuron.Normalisation)

Module Neuron.Normalisation

type neuron_typ = Make_Embedded(A).Neuron.Normalisation.neuron_typ = {
  1. mutable axis : int;
  2. mutable beta : Optimise.Algodiff.t;
  3. mutable gamma : Optimise.Algodiff.t;
  4. mutable mu : Optimise.Algodiff.t;
  5. mutable var : Optimise.Algodiff.t;
  6. mutable decay : Optimise.Algodiff.t;
  7. mutable training : bool;
  8. mutable in_shape : int array;
  9. mutable out_shape : int array;
}
val create : - ?training:bool -> - ?decay:float -> - ?mu:Optimise.Algodiff.A.arr -> - ?var:Optimise.Algodiff.A.arr -> - int -> - neuron_typ
val connect : int array -> neuron_typ -> unit
val init : neuron_typ -> unit
val reset : neuron_typ -> unit
val mktag : int -> neuron_typ -> unit
val mkpar : neuron_typ -> Optimise.Algodiff.t array
val mkpri : neuron_typ -> Optimise.Algodiff.t array
val mkadj : neuron_typ -> Optimise.Algodiff.t array
val update : neuron_typ -> Optimise.Algodiff.t array -> unit
val load_weights : neuron_typ -> Optimise.Algodiff.t array -> unit
val save_weights : neuron_typ -> Optimise.Algodiff.t array
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make/Graph/Neuron/Optimise/Algodiff/A/Linalg/index.html b/owl-base/Owl_neural_generic/Make/Graph/Neuron/Optimise/Algodiff/A/Linalg/index.html deleted file mode 100644 index 5016d63c2..000000000 --- a/owl-base/Owl_neural_generic/Make/Graph/Neuron/Optimise/Algodiff/A/Linalg/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -Linalg (owl-base.Owl_neural_generic.Make.Graph.Neuron.Optimise.Algodiff.A.Linalg)

Module A.Linalg

val inv : arr -> arr
val logdet : arr -> elt
val chol : ?upper:bool -> arr -> arr
val svd : ?thin:bool -> arr -> arr * arr * arr
val qr : arr -> arr * arr
val lq : arr -> arr * arr
val sylvester : arr -> arr -> arr -> arr
val lyapunov : arr -> arr -> arr
val discrete_lyapunov : - ?solver:[ `bilinear | `default | `direct ] -> - arr -> - arr -> - arr
val linsolve : ?trans:bool -> ?typ:[ `l | `n | `u ] -> arr -> arr -> arr
val care : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
val dare : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make/Graph/Neuron/Optimise/Algodiff/A/Mat/index.html b/owl-base/Owl_neural_generic/Make/Graph/Neuron/Optimise/Algodiff/A/Mat/index.html deleted file mode 100644 index 6fc6e1dec..000000000 --- a/owl-base/Owl_neural_generic/Make/Graph/Neuron/Optimise/Algodiff/A/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl-base.Owl_neural_generic.Make.Graph.Neuron.Optimise.Algodiff.A.Mat)

Module A.Mat

val diagm : ?k:int -> arr -> arr
val triu : ?k:int -> arr -> arr
val tril : ?k:int -> arr -> arr
val eye : int -> arr
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make/Graph/Neuron/Optimise/Algodiff/A/Scalar/index.html b/owl-base/Owl_neural_generic/Make/Graph/Neuron/Optimise/Algodiff/A/Scalar/index.html deleted file mode 100644 index 5727d38b1..000000000 --- a/owl-base/Owl_neural_generic/Make/Graph/Neuron/Optimise/Algodiff/A/Scalar/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Scalar (owl-base.Owl_neural_generic.Make.Graph.Neuron.Optimise.Algodiff.A.Scalar)

Module A.Scalar

val add : elt -> elt -> elt
val sub : elt -> elt -> elt
val mul : elt -> elt -> elt
val div : elt -> elt -> elt
val pow : elt -> elt -> elt
val atan2 : elt -> elt -> elt
val abs : elt -> elt
val neg : elt -> elt
val sqr : elt -> elt
val sqrt : elt -> elt
val exp : elt -> elt
val log : elt -> elt
val log2 : elt -> elt
val log10 : elt -> elt
val signum : elt -> elt
val floor : elt -> elt
val ceil : elt -> elt
val round : elt -> elt
val sin : elt -> elt
val cos : elt -> elt
val tan : elt -> elt
val sinh : elt -> elt
val cosh : elt -> elt
val tanh : elt -> elt
val asin : elt -> elt
val acos : elt -> elt
val atan : elt -> elt
val asinh : elt -> elt
val acosh : elt -> elt
val atanh : elt -> elt
val relu : elt -> elt
val dawsn : elt -> elt
val sigmoid : elt -> elt
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make/Graph/Neuron/Optimise/Algodiff/A/index.html b/owl-base/Owl_neural_generic/Make/Graph/Neuron/Optimise/Algodiff/A/index.html deleted file mode 100644 index 2779035d1..000000000 --- a/owl-base/Owl_neural_generic/Make/Graph/Neuron/Optimise/Algodiff/A/index.html +++ /dev/null @@ -1,158 +0,0 @@ - -A (owl-base.Owl_neural_generic.Make.Graph.Neuron.Optimise.Algodiff.A)

Module Algodiff.A

val empty : int array -> arr
val zeros : int array -> arr
val ones : int array -> arr
val create : int array -> elt -> arr
val sequential : ?a:elt -> ?step:elt -> int array -> arr
val uniform : ?a:elt -> ?b:elt -> int array -> arr
val gaussian : ?mu:elt -> ?sigma:elt -> int array -> arr
val bernoulli : ?p:elt -> int array -> arr
val init : int array -> (int -> elt) -> arr
val init_nd : int array -> (int array -> elt) -> arr
val shape : arr -> int array
val numel : arr -> int
val get : arr -> int array -> elt
val set : arr -> int array -> elt -> unit
val get_slice : int list list -> arr -> arr
val set_slice : int list list -> arr -> arr -> unit
val get_fancy : Owl_types_common.index list -> arr -> arr
val set_fancy : Owl_types_common.index list -> arr -> arr -> unit
val copy : arr -> arr
val copy_ : out:arr -> arr -> unit
val reset : arr -> unit
val reshape : arr -> int array -> arr
val reverse : arr -> arr
val tile : arr -> int array -> arr
val repeat : arr -> int array -> arr
val concatenate : ?axis:int -> arr array -> arr
val stack : ?axis:int -> arr array -> arr
val split : ?axis:int -> int array -> arr -> arr array
val expand : ?hi:bool -> arr -> int -> arr
val squeeze : ?axis:int array -> arr -> arr
val draw : ?axis:int -> arr -> int -> arr * int array
val map : (elt -> elt) -> arr -> arr
val fold : ?axis:int -> (elt -> elt -> elt) -> elt -> arr -> arr
val scan : ?axis:int -> (elt -> elt -> elt) -> arr -> arr
val one_hot : int -> arr -> arr
val pad : ?v:elt -> int list list -> arr -> arr
val print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(elt -> string) -> - arr -> - unit
val abs : arr -> arr
val neg : arr -> arr
val floor : arr -> arr
val ceil : arr -> arr
val round : arr -> arr
val sqr : arr -> arr
val sqrt : arr -> arr
val log : arr -> arr
val log2 : arr -> arr
val log10 : arr -> arr
val exp : arr -> arr
val sin : arr -> arr
val cos : arr -> arr
val tan : arr -> arr
val sinh : arr -> arr
val cosh : arr -> arr
val tanh : arr -> arr
val asin : arr -> arr
val acos : arr -> arr
val atan : arr -> arr
val asinh : arr -> arr
val acosh : arr -> arr
val atanh : arr -> arr
val min : ?axis:int -> ?keep_dims:bool -> arr -> arr
val max : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum_reduce : ?axis:int array -> arr -> arr
val signum : arr -> arr
val sigmoid : arr -> arr
val relu : arr -> arr
val dawsn : arr -> arr
val min' : arr -> elt
val max' : arr -> elt
val sum' : arr -> elt
val log_sum_exp' : arr -> elt
val log_sum_exp : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm' : arr -> elt
val l2norm' : arr -> elt
val l2norm_sqr' : arr -> elt
val clip_by_value : ?amin:elt -> ?amax:elt -> arr -> arr
val clip_by_l2norm : elt -> arr -> arr
val pow : arr -> arr -> arr
val scalar_pow : elt -> arr -> arr
val pow_scalar : arr -> elt -> arr
val atan2 : arr -> arr -> arr
val scalar_atan2 : elt -> arr -> arr
val atan2_scalar : arr -> elt -> arr
val add : arr -> arr -> arr
val sub : arr -> arr -> arr
val mul : arr -> arr -> arr
val div : arr -> arr -> arr
val add_scalar : arr -> elt -> arr
val sub_scalar : arr -> elt -> arr
val mul_scalar : arr -> elt -> arr
val div_scalar : arr -> elt -> arr
val scalar_add : elt -> arr -> arr
val scalar_sub : elt -> arr -> arr
val scalar_mul : elt -> arr -> arr
val scalar_div : elt -> arr -> arr
val fma : arr -> arr -> arr -> arr
val conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val dilated_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val transpose_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val max_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val upsampling2d : arr -> int array -> arr
val conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val dilated_conv1d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv1d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val transpose_conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val max_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val upsampling2d_backward : arr -> int array -> arr -> arr
val row_num : arr -> int
val col_num : arr -> int
val row : arr -> int -> arr
val rows : arr -> int array -> arr
val copy_row_to : arr -> arr -> int -> unit
val copy_col_to : arr -> arr -> int -> unit
val dot : arr -> arr -> arr
val diag : ?k:int -> arr -> arr
val trace : arr -> elt
val transpose : ?axis:int array -> arr -> arr
val to_rows : arr -> arr array
val of_rows : arr array -> arr
val to_cols : arr -> arr array
val of_cols : arr array -> arr
val of_array : elt array -> int array -> arr
val of_arrays : elt array array -> arr
val float_to_elt : float -> elt
val elt_to_float : elt -> float
val elt_equal : arr -> arr -> arr
val elt_not_equal : arr -> arr -> arr
val elt_less : arr -> arr -> arr
val elt_greater : arr -> arr -> arr
val elt_less_equal : arr -> arr -> arr
val elt_greater_equal : arr -> arr -> arr
val elt_equal_scalar : arr -> elt -> arr
val elt_not_equal_scalar : arr -> elt -> arr
val elt_less_scalar : arr -> elt -> arr
val elt_greater_scalar : arr -> elt -> arr
val elt_less_equal_scalar : arr -> elt -> arr
val elt_greater_equal_scalar : arr -> elt -> arr
module Scalar : sig ... end
module Mat : sig ... end
module Linalg : sig ... end
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make/Graph/Neuron/Optimise/Algodiff/Arr/index.html b/owl-base/Owl_neural_generic/Make/Graph/Neuron/Optimise/Algodiff/Arr/index.html deleted file mode 100644 index 252479483..000000000 --- a/owl-base/Owl_neural_generic/Make/Graph/Neuron/Optimise/Algodiff/Arr/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Arr (owl-base.Owl_neural_generic.Make.Graph.Neuron.Optimise.Algodiff.Arr)

Module Algodiff.Arr

val empty : int array -> t
val zeros : int array -> t
val ones : int array -> t
val uniform : ?a:A.elt -> ?b:A.elt -> int array -> t
val gaussian : ?mu:A.elt -> ?sigma:A.elt -> int array -> t
val shape : t -> int array
val numel : t -> int
val reset : t -> unit
val reshape : t -> int array -> t
val add : t -> t -> t
val sub : t -> t -> t
val mul : t -> t -> t
val div : t -> t -> t
val dot : t -> t -> t
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make/Graph/Neuron/Optimise/Algodiff/Builder/index.html b/owl-base/Owl_neural_generic/Make/Graph/Neuron/Optimise/Algodiff/Builder/index.html deleted file mode 100644 index 140bc7d19..000000000 --- a/owl-base/Owl_neural_generic/Make/Graph/Neuron/Optimise/Algodiff/Builder/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Builder (owl-base.Owl_neural_generic.Make.Graph.Neuron.Optimise.Algodiff.Builder)

Module Algodiff.Builder

module type Siso = sig ... end
val build_siso : (module Siso) -> t -> t
module type Sipo = sig ... end
val build_sipo : (module Sipo) -> t -> t * t
module type Sito = sig ... end
val build_sito : (module Sito) -> t -> t * t * t
module type Siao = sig ... end
val build_siao : (module Siao) -> t -> t array
module type Piso = sig ... end
val build_piso : (module Piso) -> t -> t -> t
module type Aiso = sig ... end
val build_aiso : (module Aiso) -> t array -> t
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make/Graph/Neuron/Optimise/Algodiff/Builder/module-type-Aiso/index.html b/owl-base/Owl_neural_generic/Make/Graph/Neuron/Optimise/Algodiff/Builder/module-type-Aiso/index.html deleted file mode 100644 index 249acb783..000000000 --- a/owl-base/Owl_neural_generic/Make/Graph/Neuron/Optimise/Algodiff/Builder/module-type-Aiso/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Aiso (owl-base.Owl_neural_generic.Make.Graph.Neuron.Optimise.Algodiff.Builder.Aiso)

Module type Builder.Aiso

val label : string
val ff : t array -> t
val df : int list -> t -> t array -> t array -> t
val dr : int list -> t array -> t -> t Stdlib.ref -> t list
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make/Graph/Neuron/Optimise/Algodiff/Builder/module-type-Piso/index.html b/owl-base/Owl_neural_generic/Make/Graph/Neuron/Optimise/Algodiff/Builder/module-type-Piso/index.html deleted file mode 100644 index b0a6638cf..000000000 --- a/owl-base/Owl_neural_generic/Make/Graph/Neuron/Optimise/Algodiff/Builder/module-type-Piso/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Piso (owl-base.Owl_neural_generic.Make.Graph.Neuron.Optimise.Algodiff.Builder.Piso)

Module type Builder.Piso

val label : string
val ff_aa : A.elt -> A.elt -> t
val ff_ab : A.elt -> A.arr -> t
val ff_ba : A.arr -> A.elt -> t
val ff_bb : A.arr -> A.arr -> t
val df_da : t -> t -> t -> t -> t
val df_db : t -> t -> t -> t -> t
val df_dab : t -> t -> t -> t -> t -> t
val dr_ab : t -> t -> t -> t Stdlib.ref -> t * t
val dr_a : t -> t -> t -> t Stdlib.ref -> t
val dr_b : t -> t -> t -> t Stdlib.ref -> t
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make/Graph/Neuron/Optimise/Algodiff/Builder/module-type-Siao/index.html b/owl-base/Owl_neural_generic/Make/Graph/Neuron/Optimise/Algodiff/Builder/module-type-Siao/index.html deleted file mode 100644 index 22e250b10..000000000 --- a/owl-base/Owl_neural_generic/Make/Graph/Neuron/Optimise/Algodiff/Builder/module-type-Siao/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Siao (owl-base.Owl_neural_generic.Make.Graph.Neuron.Optimise.Algodiff.Builder.Siao)

Module type Builder.Siao

val label : string
val ff_f : A.elt -> t array
val ff_arr : A.arr -> t array
val df : t array -> t -> t -> t array
val dr : t -> t -> t Stdlib.ref array -> t Stdlib.ref array -> t
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make/Graph/Neuron/Optimise/Algodiff/Builder/module-type-Sipo/index.html b/owl-base/Owl_neural_generic/Make/Graph/Neuron/Optimise/Algodiff/Builder/module-type-Sipo/index.html deleted file mode 100644 index bde567169..000000000 --- a/owl-base/Owl_neural_generic/Make/Graph/Neuron/Optimise/Algodiff/Builder/module-type-Sipo/index.html +++ /dev/null @@ -1,7 +0,0 @@ - -Sipo (owl-base.Owl_neural_generic.Make.Graph.Neuron.Optimise.Algodiff.Builder.Sipo)

Module type Builder.Sipo

val label : string
val ff_f : A.elt -> t * t
val ff_arr : A.arr -> t * t
val df : t -> t -> t -> t
val dr : - t -> - t -> - (t Stdlib.ref * t Stdlib.ref) -> - (t Stdlib.ref * t Stdlib.ref) -> - t
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make/Graph/Neuron/Optimise/Algodiff/Builder/module-type-Siso/index.html b/owl-base/Owl_neural_generic/Make/Graph/Neuron/Optimise/Algodiff/Builder/module-type-Siso/index.html deleted file mode 100644 index 7c7d28a7b..000000000 --- a/owl-base/Owl_neural_generic/Make/Graph/Neuron/Optimise/Algodiff/Builder/module-type-Siso/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Siso (owl-base.Owl_neural_generic.Make.Graph.Neuron.Optimise.Algodiff.Builder.Siso)

Module type Builder.Siso

val label : string
val ff_f : A.elt -> t
val ff_arr : A.arr -> t
val df : t -> t -> t -> t
val dr : t -> t -> t Stdlib.ref -> t
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make/Graph/Neuron/Optimise/Algodiff/Builder/module-type-Sito/index.html b/owl-base/Owl_neural_generic/Make/Graph/Neuron/Optimise/Algodiff/Builder/module-type-Sito/index.html deleted file mode 100644 index ff0477539..000000000 --- a/owl-base/Owl_neural_generic/Make/Graph/Neuron/Optimise/Algodiff/Builder/module-type-Sito/index.html +++ /dev/null @@ -1,7 +0,0 @@ - -Sito (owl-base.Owl_neural_generic.Make.Graph.Neuron.Optimise.Algodiff.Builder.Sito)

Module type Builder.Sito

val label : string
val ff_f : A.elt -> t * t * t
val ff_arr : A.arr -> t * t * t
val df : t -> t -> t -> t
val dr : - t -> - t -> - (t Stdlib.ref * t Stdlib.ref * t Stdlib.ref) -> - (t Stdlib.ref * t Stdlib.ref * t Stdlib.ref) -> - t
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make/Graph/Neuron/Optimise/Algodiff/Linalg/index.html b/owl-base/Owl_neural_generic/Make/Graph/Neuron/Optimise/Algodiff/Linalg/index.html deleted file mode 100644 index f24afee8f..000000000 --- a/owl-base/Owl_neural_generic/Make/Graph/Neuron/Optimise/Algodiff/Linalg/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -Linalg (owl-base.Owl_neural_generic.Make.Graph.Neuron.Optimise.Algodiff.Linalg)

Module Algodiff.Linalg

val inv : t -> t
val logdet : t -> t
val chol : ?upper:bool -> t -> t
val qr : t -> t * t
val lq : t -> t * t
val svd : ?thin:bool -> t -> t * t * t
val sylvester : t -> t -> t -> t
val lyapunov : t -> t -> t
val discrete_lyapunov : - ?solver:[ `bilinear | `default | `direct ] -> - t -> - t -> - t
val (/@) : t -> t -> t
val linsolve : ?trans:bool -> ?typ:[ `l | `n | `u ] -> t -> t -> t
val care : ?diag_r:bool -> t -> t -> t -> t -> t
val dare : ?diag_r:bool -> t -> t -> t -> t -> t
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make/Graph/Neuron/Optimise/Algodiff/Mat/index.html b/owl-base/Owl_neural_generic/Make/Graph/Neuron/Optimise/Algodiff/Mat/index.html deleted file mode 100644 index c54e72778..000000000 --- a/owl-base/Owl_neural_generic/Make/Graph/Neuron/Optimise/Algodiff/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl-base.Owl_neural_generic.Make.Graph.Neuron.Optimise.Algodiff.Mat)

Module Algodiff.Mat

val empty : int -> int -> t
val zeros : int -> int -> t
val eye : int -> t
val ones : int -> int -> t
val uniform : ?a:A.elt -> ?b:A.elt -> int -> int -> t
val gaussian : ?mu:A.elt -> ?sigma:A.elt -> int -> int -> t
val shape : t -> int * int
val numel : t -> int
val row_num : t -> int
val col_num : t -> int
val reset : t -> unit
val reshape : int -> int -> t -> t
val get : t -> int -> int -> t
val set : t -> int -> int -> t -> t
val row : t -> int -> t
val mean : t -> t
val add : t -> t -> t
val sub : t -> t -> t
val mul : t -> t -> t
val div : t -> t -> t
val dot : t -> t -> t
val map_by_row : (t -> t) -> t -> t
val of_arrays : A.elt array array -> t
val init_2d : int -> int -> (int -> int -> t) -> t
val print : t -> unit
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make/Graph/Neuron/Optimise/Algodiff/Maths/index.html b/owl-base/Owl_neural_generic/Make/Graph/Neuron/Optimise/Algodiff/Maths/index.html deleted file mode 100644 index 69d565f89..000000000 --- a/owl-base/Owl_neural_generic/Make/Graph/Neuron/Optimise/Algodiff/Maths/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Maths (owl-base.Owl_neural_generic.Make.Graph.Neuron.Optimise.Algodiff.Maths)

Module Algodiff.Maths

val (+) : t -> t -> t
val (-) : t -> t -> t
val (*) : t -> t -> t
val (/) : t -> t -> t
val (*@) : t -> t -> t
val (**) : t -> t -> t
val add : t -> t -> t
val sub : t -> t -> t
val mul : t -> t -> t
val div : t -> t -> t
val kron : t -> t -> t
val dot : t -> t -> t
val pow : t -> t -> t
val atan2 : t -> t -> t
val min2 : t -> t -> t
val max2 : t -> t -> t
val cross_entropy : t -> t -> t
val inv : t -> t
val neg : t -> t
val abs : t -> t
val signum : t -> t
val floor : t -> t
val ceil : t -> t
val round : t -> t
val sqr : t -> t
val sqrt : t -> t
val log : t -> t
val log2 : t -> t
val log10 : t -> t
val exp : t -> t
val sin : t -> t
val cos : t -> t
val tan : t -> t
val sinh : t -> t
val cosh : t -> t
val tanh : t -> t
val asin : t -> t
val acos : t -> t
val atan : t -> t
val asinh : t -> t
val acosh : t -> t
val atanh : t -> t
val sum' : t -> t
val log_sum_exp' : t -> t
val log_sum_exp : ?axis:int -> ?keep_dims:bool -> t -> t
val sum : ?axis:int -> ?keep_dims:bool -> t -> t
val sum_reduce : ?axis:int array -> t -> t
val mean : t -> t
val transpose : ?axis:int array -> t -> t
val swap : int -> int -> t -> t
val l1norm' : t -> t
val l2norm' : t -> t
val l2norm_sqr' : t -> t
val sigmoid : t -> t
val relu : t -> t
val dawsn : t -> t
val softplus : t -> t
val softsign : t -> t
val softmax : ?axis:int -> t -> t
val reshape : t -> int array -> t
val flatten : t -> t
val get_item : t -> int -> int -> t
val get_row : t -> int -> t
val concat : axis:int -> t -> t -> t
val split : axis:int -> int array -> t -> t array
val of_arrays : t array array -> t
val to_arrays : t -> t array array
val concatenate : axis:int -> t array -> t
val stack : axis:int -> t array -> t
val get_slice : int list list -> t -> t
val set_slice : int list list -> t -> t -> t
val get_fancy : Owl_types.index list -> t -> t
val set_fancy : Owl_types.index list -> t -> t -> t
val diag : ?k:int -> t -> t
val diagm : ?k:int -> t -> t
val trace : t -> t
val triu : ?k:int -> t -> t
val tril : ?k:int -> t -> t
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make/Graph/Neuron/Optimise/Algodiff/NN/index.html b/owl-base/Owl_neural_generic/Make/Graph/Neuron/Optimise/Algodiff/NN/index.html deleted file mode 100644 index fd7e18738..000000000 --- a/owl-base/Owl_neural_generic/Make/Graph/Neuron/Optimise/Algodiff/NN/index.html +++ /dev/null @@ -1,20 +0,0 @@ - -NN (owl-base.Owl_neural_generic.Make.Graph.Neuron.Optimise.Algodiff.NN)

Module Algodiff.NN

val dropout : ?rate:float -> t -> t
val conv1d : ?padding:Owl_types.padding -> t -> t -> int array -> t
val conv2d : ?padding:Owl_types.padding -> t -> t -> int array -> t
val conv3d : ?padding:Owl_types.padding -> t -> t -> int array -> t
val dilated_conv1d : - ?padding:Owl_types.padding -> - t -> - t -> - int array -> - int array -> - t
val dilated_conv2d : - ?padding:Owl_types.padding -> - t -> - t -> - int array -> - int array -> - t
val dilated_conv3d : - ?padding:Owl_types.padding -> - t -> - t -> - int array -> - int array -> - t
val transpose_conv1d : ?padding:Owl_types.padding -> t -> t -> int array -> t
val transpose_conv2d : ?padding:Owl_types.padding -> t -> t -> int array -> t
val transpose_conv3d : ?padding:Owl_types.padding -> t -> t -> int array -> t
val max_pool1d : Owl_types.padding -> t -> int array -> int array -> t
val max_pool2d : Owl_types.padding -> t -> int array -> int array -> t
val max_pool3d : Owl_types.padding -> t -> int array -> int array -> t
val avg_pool1d : Owl_types.padding -> t -> int array -> int array -> t
val avg_pool2d : Owl_types.padding -> t -> int array -> int array -> t
val avg_pool3d : Owl_types.padding -> t -> int array -> int array -> t
val upsampling2d : t -> int array -> t
val pad : ?v:A.elt -> int list list -> t -> t
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make/Graph/Neuron/Optimise/Algodiff/index.html b/owl-base/Owl_neural_generic/Make/Graph/Neuron/Optimise/Algodiff/index.html deleted file mode 100644 index 8bda7b926..000000000 --- a/owl-base/Owl_neural_generic/Make/Graph/Neuron/Optimise/Algodiff/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Algodiff (owl-base.Owl_neural_generic.Make.Graph.Neuron.Optimise.Algodiff)

Module Optimise.Algodiff

module A : sig ... end
type t = Make_Embedded(A).Neuron.Optimise.Algodiff.t =
  1. | F of A.elt
  2. | Arr of A.arr
  3. | DF of t * t * int
  4. | DR of t * t Stdlib.ref * op * int Stdlib.ref * int * int Stdlib.ref
and adjoint = t -> t Stdlib.ref -> (t * t) list -> (t * t) list
and register = t list -> t list
and label = string * t list
and op = adjoint * register * label
val tag : unit -> int
val primal : t -> t
val primal' : t -> t
val zero : t -> t
val reset_zero : t -> t
val tangent : t -> t
val adjref : t -> t Stdlib.ref
val adjval : t -> t
val shape : t -> int array
val is_float : t -> bool
val is_arr : t -> bool
val row_num : t -> int
val col_num : t -> int
val numel : t -> int
val clip_by_value : amin:A.elt -> amax:A.elt -> t -> t
val clip_by_l2norm : A.elt -> t -> t
val copy_primal' : t -> t
val tile : t -> int array -> t
val repeat : t -> int array -> t
val pack_elt : A.elt -> t
val unpack_elt : t -> A.elt
val pack_flt : float -> t
val _f : float -> t
val unpack_flt : t -> float
val pack_arr : A.arr -> t
val unpack_arr : t -> A.arr
val deep_info : t -> string
val type_info : t -> string
val error_binop : string -> t -> t -> 'a
val error_uniop : string -> t -> 'a
val make_forward : t -> t -> int -> t
val make_reverse : t -> int -> t
val reverse_prop : t -> t -> unit
val diff : (t -> t) -> t -> t
val diff' : (t -> t) -> t -> t * t
val grad : (t -> t) -> t -> t
val grad' : (t -> t) -> t -> t * t
val jacobian : (t -> t) -> t -> t
val jacobian' : (t -> t) -> t -> t * t
val jacobianv : (t -> t) -> t -> t -> t
val jacobianv' : (t -> t) -> t -> t -> t * t
val jacobianTv : (t -> t) -> t -> t -> t
val jacobianTv' : (t -> t) -> t -> t -> t * t
val hessian : (t -> t) -> t -> t
val hessian' : (t -> t) -> t -> t * t
val hessianv : (t -> t) -> t -> t -> t
val hessianv' : (t -> t) -> t -> t -> t * t
val laplacian : (t -> t) -> t -> t
val laplacian' : (t -> t) -> t -> t * t
val gradhessian : (t -> t) -> t -> t * t
val gradhessian' : (t -> t) -> t -> t * t * t
val gradhessianv : (t -> t) -> t -> t -> t * t
val gradhessianv' : (t -> t) -> t -> t -> t * t * t
module Builder : sig ... end
module Maths : sig ... end
module Linalg : sig ... end
module NN : sig ... end
module Mat : sig ... end
module Arr : sig ... end
val to_trace : t list -> string
val to_dot : t list -> string
val pp_num : Stdlib.Format.formatter -> t -> unit
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make/Graph/Neuron/Optimise/Batch/index.html b/owl-base/Owl_neural_generic/Make/Graph/Neuron/Optimise/Batch/index.html deleted file mode 100644 index c5ea7e8a6..000000000 --- a/owl-base/Owl_neural_generic/Make/Graph/Neuron/Optimise/Batch/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Batch (owl-base.Owl_neural_generic.Make.Graph.Neuron.Optimise.Batch)

Module Optimise.Batch

type typ = Make_Embedded(A).Neuron.Optimise.Batch.typ =
  1. | Full
  2. | Mini of int
  3. | Sample of int
  4. | Stochastic
val run : typ -> Algodiff.t -> Algodiff.t -> int -> Algodiff.t * Algodiff.t
val batches : typ -> Algodiff.t -> int
val to_string : typ -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make/Graph/Neuron/Optimise/Checkpoint/index.html b/owl-base/Owl_neural_generic/Make/Graph/Neuron/Optimise/Checkpoint/index.html deleted file mode 100644 index df78227b5..000000000 --- a/owl-base/Owl_neural_generic/Make/Graph/Neuron/Optimise/Checkpoint/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Checkpoint (owl-base.Owl_neural_generic.Make.Graph.Neuron.Optimise.Checkpoint)

Module Optimise.Checkpoint

type state = Make_Embedded(A).Neuron.Optimise.Checkpoint.state = {
  1. mutable current_batch : int;
  2. mutable batches_per_epoch : int;
  3. mutable epochs : float;
  4. mutable batches : int;
  5. mutable loss : Algodiff.t array;
  6. mutable start_at : float;
  7. mutable stop : bool;
  8. mutable gs : Algodiff.t array array;
  9. mutable ps : Algodiff.t array array;
  10. mutable us : Algodiff.t array array;
  11. mutable ch : Algodiff.t array array array;
}
type typ = Make_Embedded(A).Neuron.Optimise.Checkpoint.typ =
  1. | Batch of int
  2. | Epoch of float
  3. | Custom of state -> unit
  4. | None
val init_state : int -> float -> state
val default_checkpoint_fun : (string -> 'a) -> 'a
val print_state_info : state -> unit
val print_summary : state -> unit
val run : typ -> (string -> unit) -> int -> Algodiff.t -> state -> unit
val to_string : typ -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make/Graph/Neuron/Optimise/Clipping/index.html b/owl-base/Owl_neural_generic/Make/Graph/Neuron/Optimise/Clipping/index.html deleted file mode 100644 index cc0784df2..000000000 --- a/owl-base/Owl_neural_generic/Make/Graph/Neuron/Optimise/Clipping/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Clipping (owl-base.Owl_neural_generic.Make.Graph.Neuron.Optimise.Clipping)

Module Optimise.Clipping

type typ = Make_Embedded(A).Neuron.Optimise.Clipping.typ =
  1. | L2norm of float
  2. | Value of float * float
  3. | None
val run : typ -> Algodiff.t -> Algodiff.t
val default : typ -> typ
val to_string : typ -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make/Graph/Neuron/Optimise/Gradient/index.html b/owl-base/Owl_neural_generic/Make/Graph/Neuron/Optimise/Gradient/index.html deleted file mode 100644 index e74dd268c..000000000 --- a/owl-base/Owl_neural_generic/Make/Graph/Neuron/Optimise/Gradient/index.html +++ /dev/null @@ -1,9 +0,0 @@ - -Gradient (owl-base.Owl_neural_generic.Make.Graph.Neuron.Optimise.Gradient)

Module Optimise.Gradient

type typ = Make_Embedded(A).Neuron.Optimise.Gradient.typ =
  1. | GD
  2. | CG
  3. | CD
  4. | NonlinearCG
  5. | DaiYuanCG
  6. | NewtonCG
  7. | Newton
val run : - typ -> - (Algodiff.t -> Algodiff.t) -> - Algodiff.t -> - Algodiff.t -> - Algodiff.t -> - Algodiff.t -> - Algodiff.t
val to_string : typ -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make/Graph/Neuron/Optimise/Learning_Rate/index.html b/owl-base/Owl_neural_generic/Make/Graph/Neuron/Optimise/Learning_Rate/index.html deleted file mode 100644 index 38f7fbbbd..000000000 --- a/owl-base/Owl_neural_generic/Make/Graph/Neuron/Optimise/Learning_Rate/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Learning_Rate (owl-base.Owl_neural_generic.Make.Graph.Neuron.Optimise.Learning_Rate)

Module Optimise.Learning_Rate

type typ = Make_Embedded(A).Neuron.Optimise.Learning_Rate.typ =
  1. | Adagrad of float
  2. | Const of float
  3. | Decay of float * float
  4. | Exp_decay of float * float
  5. | RMSprop of float * float
  6. | Adam of float * float * float
  7. | Schedule of float array
val run : typ -> int -> Algodiff.t -> Algodiff.t array -> Algodiff.t
val default : typ -> typ
val update_ch : typ -> Algodiff.t -> Algodiff.t array -> Algodiff.t array
val to_string : typ -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make/Graph/Neuron/Optimise/Loss/index.html b/owl-base/Owl_neural_generic/Make/Graph/Neuron/Optimise/Loss/index.html deleted file mode 100644 index 86fd5e272..000000000 --- a/owl-base/Owl_neural_generic/Make/Graph/Neuron/Optimise/Loss/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Loss (owl-base.Owl_neural_generic.Make.Graph.Neuron.Optimise.Loss)

Module Optimise.Loss

type typ = Make_Embedded(A).Neuron.Optimise.Loss.typ =
  1. | Hinge
  2. | L1norm
  3. | L2norm
  4. | Quadratic
  5. | Cross_entropy
  6. | Custom of Algodiff.t -> Algodiff.t -> Algodiff.t
val run : typ -> Algodiff.t -> Algodiff.t -> Algodiff.t
val to_string : typ -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make/Graph/Neuron/Optimise/Momentum/index.html b/owl-base/Owl_neural_generic/Make/Graph/Neuron/Optimise/Momentum/index.html deleted file mode 100644 index ae4712c10..000000000 --- a/owl-base/Owl_neural_generic/Make/Graph/Neuron/Optimise/Momentum/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Momentum (owl-base.Owl_neural_generic.Make.Graph.Neuron.Optimise.Momentum)

Module Optimise.Momentum

type typ = Make_Embedded(A).Neuron.Optimise.Momentum.typ =
  1. | Standard of float
  2. | Nesterov of float
  3. | None
val run : typ -> Algodiff.t -> Algodiff.t -> Algodiff.t
val default : typ -> typ
val to_string : typ -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make/Graph/Neuron/Optimise/Params/index.html b/owl-base/Owl_neural_generic/Make/Graph/Neuron/Optimise/Params/index.html deleted file mode 100644 index 733049cd8..000000000 --- a/owl-base/Owl_neural_generic/Make/Graph/Neuron/Optimise/Params/index.html +++ /dev/null @@ -1,14 +0,0 @@ - -Params (owl-base.Owl_neural_generic.Make.Graph.Neuron.Optimise.Params)

Module Optimise.Params

type typ = Make_Embedded(A).Neuron.Optimise.Params.typ = {
  1. mutable epochs : float;
  2. mutable batch : Batch.typ;
  3. mutable gradient : Gradient.typ;
  4. mutable loss : Loss.typ;
  5. mutable learning_rate : Learning_Rate.typ;
  6. mutable regularisation : Regularisation.typ;
  7. mutable momentum : Momentum.typ;
  8. mutable clipping : Clipping.typ;
  9. mutable stopping : Stopping.typ;
  10. mutable checkpoint : Checkpoint.typ;
  11. mutable verbosity : bool;
}
val default : unit -> typ
val config : - ?batch:Batch.typ -> - ?gradient:Gradient.typ -> - ?loss:Loss.typ -> - ?learning_rate:Learning_Rate.typ -> - ?regularisation:Regularisation.typ -> - ?momentum:Momentum.typ -> - ?clipping:Clipping.typ -> - ?stopping:Stopping.typ -> - ?checkpoint:Checkpoint.typ -> - ?verbosity:bool -> - float -> - typ
val to_string : typ -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make/Graph/Neuron/Optimise/Regularisation/index.html b/owl-base/Owl_neural_generic/Make/Graph/Neuron/Optimise/Regularisation/index.html deleted file mode 100644 index a67c516a9..000000000 --- a/owl-base/Owl_neural_generic/Make/Graph/Neuron/Optimise/Regularisation/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Regularisation (owl-base.Owl_neural_generic.Make.Graph.Neuron.Optimise.Regularisation)

Module Optimise.Regularisation

type typ = Make_Embedded(A).Neuron.Optimise.Regularisation.typ =
  1. | L1norm of float
  2. | L2norm of float
  3. | Elastic_net of float * float
  4. | None
val run : typ -> Algodiff.t -> Algodiff.t
val to_string : typ -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make/Graph/Neuron/Optimise/Stopping/index.html b/owl-base/Owl_neural_generic/Make/Graph/Neuron/Optimise/Stopping/index.html deleted file mode 100644 index c219f1bb7..000000000 --- a/owl-base/Owl_neural_generic/Make/Graph/Neuron/Optimise/Stopping/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Stopping (owl-base.Owl_neural_generic.Make.Graph.Neuron.Optimise.Stopping)

Module Optimise.Stopping

type typ = Make_Embedded(A).Neuron.Optimise.Stopping.typ =
  1. | Const of float
  2. | Early of int * int
  3. | None
val run : typ -> float -> bool
val default : typ -> typ
val to_string : typ -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make/Graph/Neuron/Optimise/Utils/index.html b/owl-base/Owl_neural_generic/Make/Graph/Neuron/Optimise/Utils/index.html deleted file mode 100644 index 0e214a85d..000000000 --- a/owl-base/Owl_neural_generic/Make/Graph/Neuron/Optimise/Utils/index.html +++ /dev/null @@ -1,7 +0,0 @@ - -Utils (owl-base.Owl_neural_generic.Make.Graph.Neuron.Optimise.Utils)

Module Optimise.Utils

val sample_num : Algodiff.t -> int
val draw_samples : Algodiff.t -> Algodiff.t -> int -> Algodiff.t * Algodiff.t
val get_chunk : - Algodiff.t -> - Algodiff.t -> - int -> - int -> - Algodiff.t * Algodiff.t
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make/Graph/Neuron/Optimise/index.html b/owl-base/Owl_neural_generic/Make/Graph/Neuron/Optimise/index.html deleted file mode 100644 index 9b1ce3b1e..000000000 --- a/owl-base/Owl_neural_generic/Make/Graph/Neuron/Optimise/index.html +++ /dev/null @@ -1,31 +0,0 @@ - -Optimise (owl-base.Owl_neural_generic.Make.Graph.Neuron.Optimise)

Module Neuron.Optimise

module Algodiff : sig ... end
module Utils : sig ... end
module Learning_Rate : sig ... end
module Batch : sig ... end
module Loss : sig ... end
module Gradient : sig ... end
module Momentum : sig ... end
module Regularisation : sig ... end
module Clipping : sig ... end
module Stopping : sig ... end
module Checkpoint : sig ... end
module Params : sig ... end
val minimise_weight : - ?state:Checkpoint.state -> - Params.typ -> - (Algodiff.t -> Algodiff.t -> Algodiff.t) -> - Algodiff.t -> - Algodiff.t -> - Algodiff.t -> - Checkpoint.state * Algodiff.t
val minimise_network : - ?state:Checkpoint.state -> - Params.typ -> - (Algodiff.t -> Algodiff.t * Algodiff.t array array) -> - (Algodiff.t -> Algodiff.t array array * Algodiff.t array array) -> - (Algodiff.t array array -> unit) -> - (string -> unit) -> - Algodiff.t -> - Algodiff.t -> - Checkpoint.state
val minimise_fun : - ?state:Checkpoint.state -> - Params.typ -> - (Algodiff.t -> Algodiff.t) -> - Algodiff.t -> - Checkpoint.state * Algodiff.t
val minimise_compiled_network : - ?state:Checkpoint.state -> - Params.typ -> - (Algodiff.t -> Algodiff.t -> Algodiff.t) -> - (unit -> unit) -> - (string -> unit) -> - Algodiff.t -> - Algodiff.t -> - Checkpoint.state
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make/Graph/Neuron/Padding1D/index.html b/owl-base/Owl_neural_generic/Make/Graph/Neuron/Padding1D/index.html deleted file mode 100644 index 054a3e60f..000000000 --- a/owl-base/Owl_neural_generic/Make/Graph/Neuron/Padding1D/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Padding1D (owl-base.Owl_neural_generic.Make.Graph.Neuron.Padding1D)

Module Neuron.Padding1D

\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make/Graph/Neuron/Padding2D/index.html b/owl-base/Owl_neural_generic/Make/Graph/Neuron/Padding2D/index.html deleted file mode 100644 index 4f2a285f3..000000000 --- a/owl-base/Owl_neural_generic/Make/Graph/Neuron/Padding2D/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Padding2D (owl-base.Owl_neural_generic.Make.Graph.Neuron.Padding2D)

Module Neuron.Padding2D

type neuron_typ = Make_Embedded(A).Neuron.Padding2D.neuron_typ = {
  1. mutable padding : int array array;
  2. mutable in_shape : int array;
  3. mutable out_shape : int array;
}
val create : int array array -> neuron_typ
val connect : int array -> neuron_typ -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make/Graph/Neuron/Padding3D/index.html b/owl-base/Owl_neural_generic/Make/Graph/Neuron/Padding3D/index.html deleted file mode 100644 index 18668982b..000000000 --- a/owl-base/Owl_neural_generic/Make/Graph/Neuron/Padding3D/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Padding3D (owl-base.Owl_neural_generic.Make.Graph.Neuron.Padding3D)

Module Neuron.Padding3D

\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make/Graph/Neuron/Recurrent/index.html b/owl-base/Owl_neural_generic/Make/Graph/Neuron/Recurrent/index.html deleted file mode 100644 index 95f068ad3..000000000 --- a/owl-base/Owl_neural_generic/Make/Graph/Neuron/Recurrent/index.html +++ /dev/null @@ -1,9 +0,0 @@ - -Recurrent (owl-base.Owl_neural_generic.Make.Graph.Neuron.Recurrent)

Module Neuron.Recurrent

type neuron_typ = Make_Embedded(A).Neuron.Recurrent.neuron_typ = {
  1. mutable whh : Optimise.Algodiff.t;
  2. mutable wxh : Optimise.Algodiff.t;
  3. mutable why : Optimise.Algodiff.t;
  4. mutable bh : Optimise.Algodiff.t;
  5. mutable by : Optimise.Algodiff.t;
  6. mutable h : Optimise.Algodiff.t;
  7. mutable hiddens : int;
  8. mutable act : Activation.typ;
  9. mutable init_typ : Init.typ;
  10. mutable in_shape : int array;
  11. mutable out_shape : int array;
}
val create : - ?time_steps:int -> - ?inputs:int -> - int -> - int -> - Activation.typ -> - Init.typ -> - neuron_typ
val connect : int array -> neuron_typ -> unit
val init : neuron_typ -> unit
val reset : neuron_typ -> unit
val mktag : int -> neuron_typ -> unit
val mkpar : neuron_typ -> Optimise.Algodiff.t array
val mkpri : neuron_typ -> Optimise.Algodiff.t array
val mkadj : neuron_typ -> Optimise.Algodiff.t array
val update : neuron_typ -> Optimise.Algodiff.t array -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make/Graph/Neuron/Reshape/index.html b/owl-base/Owl_neural_generic/Make/Graph/Neuron/Reshape/index.html deleted file mode 100644 index 81a0d866c..000000000 --- a/owl-base/Owl_neural_generic/Make/Graph/Neuron/Reshape/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Reshape (owl-base.Owl_neural_generic.Make.Graph.Neuron.Reshape)

Module Neuron.Reshape

type neuron_typ = Make_Embedded(A).Neuron.Reshape.neuron_typ = {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
}
val create : ?inputs:int array -> int array -> neuron_typ
val connect : int array -> neuron_typ -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make/Graph/Neuron/Slice/index.html b/owl-base/Owl_neural_generic/Make/Graph/Neuron/Slice/index.html deleted file mode 100644 index 80d2df3ef..000000000 --- a/owl-base/Owl_neural_generic/Make/Graph/Neuron/Slice/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Slice (owl-base.Owl_neural_generic.Make.Graph.Neuron.Slice)

Module Neuron.Slice

type neuron_typ = Make_Embedded(A).Neuron.Slice.neuron_typ = {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
  3. mutable slice : int list list;
}
val create : int list list -> neuron_typ
val connect : int array -> neuron_typ -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make/Graph/Neuron/TransposeConv1D/index.html b/owl-base/Owl_neural_generic/Make/Graph/Neuron/TransposeConv1D/index.html deleted file mode 100644 index 14766ef7b..000000000 --- a/owl-base/Owl_neural_generic/Make/Graph/Neuron/TransposeConv1D/index.html +++ /dev/null @@ -1,8 +0,0 @@ - -TransposeConv1D (owl-base.Owl_neural_generic.Make.Graph.Neuron.TransposeConv1D)

Module Neuron.TransposeConv1D

type neuron_typ = Make_Embedded(A).Neuron.TransposeConv1D.neuron_typ = {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable b : Optimise.Algodiff.t;
  3. mutable kernel : int array;
  4. mutable stride : int array;
  5. mutable padding : Owl_types.padding;
  6. mutable init_typ : Init.typ;
  7. mutable in_shape : int array;
  8. mutable out_shape : int array;
}
val create : - ?inputs:int array -> - Owl_types.padding -> - int array -> - int array -> - Init.typ -> - neuron_typ
val connect : int array -> neuron_typ -> unit
val init : neuron_typ -> unit
val reset : neuron_typ -> unit
val mktag : int -> neuron_typ -> unit
val mkpar : neuron_typ -> Optimise.Algodiff.t array
val mkpri : neuron_typ -> Optimise.Algodiff.t array
val mkadj : neuron_typ -> Optimise.Algodiff.t array
val update : neuron_typ -> Optimise.Algodiff.t array -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make/Graph/Neuron/TransposeConv2D/index.html b/owl-base/Owl_neural_generic/Make/Graph/Neuron/TransposeConv2D/index.html deleted file mode 100644 index ee8f08baf..000000000 --- a/owl-base/Owl_neural_generic/Make/Graph/Neuron/TransposeConv2D/index.html +++ /dev/null @@ -1,8 +0,0 @@ - -TransposeConv2D (owl-base.Owl_neural_generic.Make.Graph.Neuron.TransposeConv2D)

Module Neuron.TransposeConv2D

type neuron_typ = Make_Embedded(A).Neuron.TransposeConv2D.neuron_typ = {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable b : Optimise.Algodiff.t;
  3. mutable kernel : int array;
  4. mutable stride : int array;
  5. mutable padding : Owl_types.padding;
  6. mutable init_typ : Init.typ;
  7. mutable in_shape : int array;
  8. mutable out_shape : int array;
}
val create : - ?inputs:int array -> - Owl_types.padding -> - int array -> - int array -> - Init.typ -> - neuron_typ
val connect : int array -> neuron_typ -> unit
val init : neuron_typ -> unit
val reset : neuron_typ -> unit
val mktag : int -> neuron_typ -> unit
val mkpar : neuron_typ -> Optimise.Algodiff.t array
val mkpri : neuron_typ -> Optimise.Algodiff.t array
val mkadj : neuron_typ -> Optimise.Algodiff.t array
val update : neuron_typ -> Optimise.Algodiff.t array -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make/Graph/Neuron/TransposeConv3D/index.html b/owl-base/Owl_neural_generic/Make/Graph/Neuron/TransposeConv3D/index.html deleted file mode 100644 index 399845fa6..000000000 --- a/owl-base/Owl_neural_generic/Make/Graph/Neuron/TransposeConv3D/index.html +++ /dev/null @@ -1,8 +0,0 @@ - -TransposeConv3D (owl-base.Owl_neural_generic.Make.Graph.Neuron.TransposeConv3D)

Module Neuron.TransposeConv3D

type neuron_typ = Make_Embedded(A).Neuron.TransposeConv3D.neuron_typ = {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable b : Optimise.Algodiff.t;
  3. mutable kernel : int array;
  4. mutable stride : int array;
  5. mutable padding : Owl_types.padding;
  6. mutable init_typ : Init.typ;
  7. mutable in_shape : int array;
  8. mutable out_shape : int array;
}
val create : - ?inputs:int array -> - Owl_types.padding -> - int array -> - int array -> - Init.typ -> - neuron_typ
val connect : int array -> neuron_typ -> unit
val init : neuron_typ -> unit
val reset : neuron_typ -> unit
val mktag : int -> neuron_typ -> unit
val mkpar : neuron_typ -> Optimise.Algodiff.t array
val mkpri : neuron_typ -> Optimise.Algodiff.t array
val mkadj : neuron_typ -> Optimise.Algodiff.t array
val update : neuron_typ -> Optimise.Algodiff.t array -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make/Graph/Neuron/UpSampling1D/index.html b/owl-base/Owl_neural_generic/Make/Graph/Neuron/UpSampling1D/index.html deleted file mode 100644 index 3be484628..000000000 --- a/owl-base/Owl_neural_generic/Make/Graph/Neuron/UpSampling1D/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -UpSampling1D (owl-base.Owl_neural_generic.Make.Graph.Neuron.UpSampling1D)

Module Neuron.UpSampling1D

\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make/Graph/Neuron/UpSampling2D/index.html b/owl-base/Owl_neural_generic/Make/Graph/Neuron/UpSampling2D/index.html deleted file mode 100644 index 87ac603f4..000000000 --- a/owl-base/Owl_neural_generic/Make/Graph/Neuron/UpSampling2D/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -UpSampling2D (owl-base.Owl_neural_generic.Make.Graph.Neuron.UpSampling2D)

Module Neuron.UpSampling2D

type neuron_typ = Make_Embedded(A).Neuron.UpSampling2D.neuron_typ = {
  1. mutable size : int array;
  2. mutable in_shape : int array;
  3. mutable out_shape : int array;
}
val create : int array -> neuron_typ
val connect : int array -> neuron_typ -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make/Graph/Neuron/UpSampling3D/index.html b/owl-base/Owl_neural_generic/Make/Graph/Neuron/UpSampling3D/index.html deleted file mode 100644 index 4a5b0123a..000000000 --- a/owl-base/Owl_neural_generic/Make/Graph/Neuron/UpSampling3D/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -UpSampling3D (owl-base.Owl_neural_generic.Make.Graph.Neuron.UpSampling3D)

Module Neuron.UpSampling3D

\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make/Graph/Neuron/index.html b/owl-base/Owl_neural_generic/Make/Graph/Neuron/index.html deleted file mode 100644 index d1fdf1f88..000000000 --- a/owl-base/Owl_neural_generic/Make/Graph/Neuron/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Neuron (owl-base.Owl_neural_generic.Make.Graph.Neuron)

Module Graph.Neuron

module Optimise : sig ... end
module Init : sig ... end
module Input : sig ... end
module Activation : sig ... end
module Linear : sig ... end
module LinearNoBias : sig ... end
module Recurrent : sig ... end
module LSTM : sig ... end
module GRU : sig ... end
module Conv1D : sig ... end
module Conv2D : sig ... end
module Conv3D : sig ... end
module DilatedConv1D : sig ... end
module DilatedConv2D : sig ... end
module DilatedConv3D : sig ... end
module TransposeConv1D : sig ... end
module TransposeConv2D : sig ... end
module TransposeConv3D : sig ... end
module FullyConnected : sig ... end
module MaxPool1D : sig ... end
module MaxPool2D : sig ... end
module AvgPool1D : sig ... end
module AvgPool2D : sig ... end
module GlobalMaxPool1D : sig ... end
module GlobalMaxPool2D : sig ... end
module GlobalAvgPool1D : sig ... end
module GlobalAvgPool2D : sig ... end
module UpSampling1D : sig ... end
module UpSampling2D : sig ... end
module UpSampling3D : sig ... end
module Padding1D : sig ... end
module Padding2D : sig ... end
module Padding3D : sig ... end
module Lambda : sig ... end
module LambdaArray : sig ... end
module Dropout : sig ... end
module Reshape : sig ... end
module Flatten : sig ... end
module Slice : sig ... end
module Add : sig ... end
module Mul : sig ... end
module Dot : sig ... end
module Max : sig ... end
module Average : sig ... end
module Concatenate : sig ... end
module Normalisation : sig ... end
module GaussianNoise : sig ... end
module GaussianDropout : sig ... end
module AlphaDropout : sig ... end
module Embedding : sig ... end
module Masking : sig ... end
type neuron = Make_Embedded(A).Neuron.neuron =
  1. | Input of Input.neuron_typ
  2. | Linear of Linear.neuron_typ
  3. | LinearNoBias of LinearNoBias.neuron_typ
  4. | Embedding of Embedding.neuron_typ
  5. | LSTM of LSTM.neuron_typ
  6. | GRU of GRU.neuron_typ
  7. | Recurrent of Recurrent.neuron_typ
  8. | Conv1D of Conv1D.neuron_typ
  9. | Conv2D of Conv2D.neuron_typ
  10. | Conv3D of Conv3D.neuron_typ
  11. | DilatedConv1D of DilatedConv1D.neuron_typ
  12. | DilatedConv2D of DilatedConv2D.neuron_typ
  13. | DilatedConv3D of DilatedConv3D.neuron_typ
  14. | TransposeConv1D of TransposeConv1D.neuron_typ
  15. | TransposeConv2D of TransposeConv2D.neuron_typ
  16. | TransposeConv3D of TransposeConv3D.neuron_typ
  17. | FullyConnected of FullyConnected.neuron_typ
  18. | MaxPool1D of MaxPool1D.neuron_typ
  19. | MaxPool2D of MaxPool2D.neuron_typ
  20. | AvgPool1D of AvgPool1D.neuron_typ
  21. | AvgPool2D of AvgPool2D.neuron_typ
  22. | GlobalMaxPool1D of GlobalMaxPool1D.neuron_typ
  23. | GlobalMaxPool2D of GlobalMaxPool2D.neuron_typ
  24. | GlobalAvgPool1D of GlobalAvgPool1D.neuron_typ
  25. | GlobalAvgPool2D of GlobalAvgPool2D.neuron_typ
  26. | UpSampling2D of UpSampling2D.neuron_typ
  27. | Padding2D of Padding2D.neuron_typ
  28. | Dropout of Dropout.neuron_typ
  29. | Reshape of Reshape.neuron_typ
  30. | Flatten of Flatten.neuron_typ
  31. | Slice of Slice.neuron_typ
  32. | Lambda of Lambda.neuron_typ
  33. | LambdaArray of LambdaArray.neuron_typ
  34. | Activation of Activation.neuron_typ
  35. | GaussianNoise of GaussianNoise.neuron_typ
  36. | GaussianDropout of GaussianDropout.neuron_typ
  37. | AlphaDropout of AlphaDropout.neuron_typ
  38. | Normalisation of Normalisation.neuron_typ
  39. | Add of Add.neuron_typ
  40. | Mul of Mul.neuron_typ
  41. | Dot of Dot.neuron_typ
  42. | Max of Max.neuron_typ
  43. | Average of Average.neuron_typ
  44. | Concatenate of Concatenate.neuron_typ
val get_in_out_shape : neuron -> int array * int array
val get_in_shape : neuron -> int array
val get_out_shape : neuron -> int array
val connect : int array array -> neuron -> unit
val init : neuron -> unit
val reset : neuron -> unit
val mktag : int -> neuron -> unit
val mkpar : neuron -> Optimise.Algodiff.t array
val mkpri : neuron -> Optimise.Algodiff.t array
val mkadj : neuron -> Optimise.Algodiff.t array
val update : neuron -> Optimise.Algodiff.t array -> unit
val load_weights : neuron -> Optimise.Algodiff.t array -> unit
val save_weights : neuron -> Optimise.Algodiff.t array
val copy : neuron -> neuron
val to_string : neuron -> string
val to_name : neuron -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make/Graph/index.html b/owl-base/Owl_neural_generic/Make/Graph/index.html deleted file mode 100644 index 16d5f28ab..000000000 --- a/owl-base/Owl_neural_generic/Make/Graph/index.html +++ /dev/null @@ -1,243 +0,0 @@ - -Graph (owl-base.Owl_neural_generic.Make.Graph)

Module Make.Graph

module Neuron : sig ... end
type node = Make_Embedded(A).node = {
  1. mutable name : string;
  2. mutable prev : node array;
  3. mutable next : node array;
  4. mutable neuron : Neuron.neuron;
  5. mutable output : Neuron.Optimise.Algodiff.t option;
  6. mutable network : network;
  7. mutable train : bool;
}
and network = Make_Embedded(A).network = {
  1. mutable nnid : string;
  2. mutable size : int;
  3. mutable roots : node array;
  4. mutable outputs : node array;
  5. mutable topo : node array;
}
val make_network : ?nnid:string -> int -> node array -> node array -> network
val make_node : - ?name:string -> - ?train:bool -> - node array -> - node array -> - Neuron.neuron -> - Neuron.Optimise.Algodiff.t option -> - network -> - node
val get_roots : network -> node array
val get_outputs : network -> node array
val get_node : network -> string -> node
val get_network : ?name:string -> node -> network
val outputs : ?name:string -> node array -> network
val get_network_name : network -> string
val set_network_name : network -> string -> unit
val collect_output : node array -> Neuron.Optimise.Algodiff.t array
val connect_pair : node -> node -> unit
val connect_to_parents : node array -> node -> unit
val add_node : - ?act_typ:Neuron.Activation.typ -> - network -> - node array -> - node -> - node
val input_shape : network -> int array
val input_shapes : network -> int array array
val init : network -> unit
val reset : network -> unit
val mktag : int -> network -> unit
val mkpar : network -> Neuron.Optimise.Algodiff.t array array
val mkpri : network -> Neuron.Optimise.Algodiff.t array array
val mkadj : network -> Neuron.Optimise.Algodiff.t array array
val update : network -> Neuron.Optimise.Algodiff.t array array -> unit
val run_inputs : - Neuron.Optimise.Algodiff.t array -> - network -> - Neuron.Optimise.Algodiff.t array
val forward_inputs : - network -> - Neuron.Optimise.Algodiff.t array -> - Neuron.Optimise.Algodiff.t array * Neuron.Optimise.Algodiff.t array array
val backward : - network -> - Neuron.Optimise.Algodiff.t -> - Neuron.Optimise.Algodiff.t array array - * Neuron.Optimise.Algodiff.t array array
val copy : network -> network
val model_inputs : - network -> - Neuron.Optimise.Algodiff.A.arr array -> - Neuron.Optimise.Algodiff.A.arr array
val input : ?name:string -> int array -> node
val inputs : ?names:string array -> int array array -> node array
val activation : ?name:string -> Neuron.Activation.typ -> node -> node
val linear : - ?name:string -> - ?init_typ:Neuron.Init.typ -> - ?act_typ:Neuron.Activation.typ -> - int -> - node -> - node
val linear_nobias : - ?name:string -> - ?init_typ:Neuron.Init.typ -> - ?act_typ:Neuron.Activation.typ -> - int -> - node -> - node
val embedding : - ?name:string -> - ?init_typ:Neuron.Init.typ -> - ?act_typ:Neuron.Activation.typ -> - int -> - int -> - node -> - node
val recurrent : - ?name:string -> - ?init_typ:Neuron.Init.typ -> - act_typ:Neuron.Activation.typ -> - int -> - int -> - node -> - node
val lstm : ?name:string -> ?init_typ:Neuron.Init.typ -> int -> node -> node
val gru : ?name:string -> ?init_typ:Neuron.Init.typ -> int -> node -> node
val conv1d : - ?name:string -> - ?padding:Owl_types.padding -> - ?init_typ:Neuron.Init.typ -> - ?act_typ:Neuron.Activation.typ -> - int array -> - int array -> - node -> - node
val conv2d : - ?name:string -> - ?padding:Owl_types.padding -> - ?init_typ:Neuron.Init.typ -> - ?act_typ:Neuron.Activation.typ -> - int array -> - int array -> - node -> - node
val conv3d : - ?name:string -> - ?padding:Owl_types.padding -> - ?init_typ:Neuron.Init.typ -> - ?act_typ:Neuron.Activation.typ -> - int array -> - int array -> - node -> - node
val dilated_conv1d : - ?name:string -> - ?padding:Owl_types.padding -> - ?init_typ:Neuron.Init.typ -> - ?act_typ:Neuron.Activation.typ -> - int array -> - int array -> - int array -> - node -> - node
val dilated_conv2d : - ?name:string -> - ?padding:Owl_types.padding -> - ?init_typ:Neuron.Init.typ -> - ?act_typ:Neuron.Activation.typ -> - int array -> - int array -> - int array -> - node -> - node
val dilated_conv3d : - ?name:string -> - ?padding:Owl_types.padding -> - ?init_typ:Neuron.Init.typ -> - ?act_typ:Neuron.Activation.typ -> - int array -> - int array -> - int array -> - node -> - node
val transpose_conv1d : - ?name:string -> - ?padding:Owl_types.padding -> - ?init_typ:Neuron.Init.typ -> - ?act_typ:Neuron.Activation.typ -> - int array -> - int array -> - node -> - node
val transpose_conv2d : - ?name:string -> - ?padding:Owl_types.padding -> - ?init_typ:Neuron.Init.typ -> - ?act_typ:Neuron.Activation.typ -> - int array -> - int array -> - node -> - node
val transpose_conv3d : - ?name:string -> - ?padding:Owl_types.padding -> - ?init_typ:Neuron.Init.typ -> - ?act_typ:Neuron.Activation.typ -> - int array -> - int array -> - node -> - node
val fully_connected : - ?name:string -> - ?init_typ:Neuron.Init.typ -> - ?act_typ:Neuron.Activation.typ -> - int -> - node -> - node
val max_pool1d : - ?name:string -> - ?padding:Owl_types.padding -> - ?act_typ:Neuron.Activation.typ -> - int array -> - int array -> - node -> - node
val max_pool2d : - ?name:string -> - ?padding:Owl_types.padding -> - ?act_typ:Neuron.Activation.typ -> - int array -> - int array -> - node -> - node
val avg_pool1d : - ?name:string -> - ?padding:Owl_types.padding -> - ?act_typ:Neuron.Activation.typ -> - int array -> - int array -> - node -> - node
val avg_pool2d : - ?name:string -> - ?padding:Owl_types.padding -> - ?act_typ:Neuron.Activation.typ -> - int array -> - int array -> - node -> - node
val global_max_pool1d : - ?name:string -> - ?act_typ:Neuron.Activation.typ -> - node -> - node
val global_max_pool2d : - ?name:string -> - ?act_typ:Neuron.Activation.typ -> - node -> - node
val global_avg_pool1d : - ?name:string -> - ?act_typ:Neuron.Activation.typ -> - node -> - node
val global_avg_pool2d : - ?name:string -> - ?act_typ:Neuron.Activation.typ -> - node -> - node
val upsampling2d : - ?name:string -> - ?act_typ:Neuron.Activation.typ -> - int array -> - node -> - node
val padding2d : - ?name:string -> - ?act_typ:Neuron.Activation.typ -> - int array array -> - node -> - node
val dropout : ?name:string -> float -> node -> node
val gaussian_noise : ?name:string -> float -> node -> node
val gaussian_dropout : ?name:string -> float -> node -> node
val alpha_dropout : ?name:string -> float -> node -> node
val normalisation : - ?name:string -> - ?axis:int -> - ?training:bool -> - ?decay:float -> - ?mu:Neuron.Optimise.Algodiff.A.arr -> - ?var:Neuron.Optimise.Algodiff.A.arr -> - node -> - node
val reshape : ?name:string -> int array -> node -> node
val flatten : ?name:string -> node -> node
val slice : ?name:string -> int list list -> node -> node
val lambda : - ?name:string -> - ?act_typ:Neuron.Activation.typ -> - ?out_shape:int array -> - (Neuron.Optimise.Algodiff.t -> Neuron.Optimise.Algodiff.t) -> - node -> - node
val lambda_array : - ?name:string -> - ?act_typ:Neuron.Activation.typ -> - int array -> - (Neuron.Optimise.Algodiff.t array -> Neuron.Optimise.Algodiff.t) -> - node array -> - node
val add : ?name:string -> ?act_typ:Neuron.Activation.typ -> node array -> node
val mul : ?name:string -> ?act_typ:Neuron.Activation.typ -> node array -> node
val dot : ?name:string -> ?act_typ:Neuron.Activation.typ -> node array -> node
val max : ?name:string -> ?act_typ:Neuron.Activation.typ -> node array -> node
val average : - ?name:string -> - ?act_typ:Neuron.Activation.typ -> - node array -> - node
val concatenate : - ?name:string -> - ?act_typ:Neuron.Activation.typ -> - int -> - node array -> - node
val to_string : network -> string
val pp_network : Stdlib.Format.formatter -> network -> unit
val print : network -> unit
val save : ?unsafe:bool -> network -> string -> unit
val load : string -> network
val save_weights : network -> string -> unit
val load_weights : network -> string -> unit
val make_subnetwork : - ?copy:bool -> - ?make_inputs:string array -> - network -> - string array -> - network
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make/argument-1-A/Linalg/index.html b/owl-base/Owl_neural_generic/Make/argument-1-A/Linalg/index.html deleted file mode 100644 index 935b7b0c2..000000000 --- a/owl-base/Owl_neural_generic/Make/argument-1-A/Linalg/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -Linalg (owl-base.Owl_neural_generic.Make.A.Linalg)

Module A.Linalg

val inv : arr -> arr
val logdet : arr -> elt
val chol : ?upper:bool -> arr -> arr
val svd : ?thin:bool -> arr -> arr * arr * arr
val qr : arr -> arr * arr
val lq : arr -> arr * arr
val sylvester : arr -> arr -> arr -> arr
val lyapunov : arr -> arr -> arr
val discrete_lyapunov : - ?solver:[ `default | `bilinear | `direct ] -> - arr -> - arr -> - arr
val linsolve : ?trans:bool -> ?typ:[ `n | `u | `l ] -> arr -> arr -> arr
val care : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
val dare : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make/argument-1-A/Mat/index.html b/owl-base/Owl_neural_generic/Make/argument-1-A/Mat/index.html deleted file mode 100644 index 4fa24cd30..000000000 --- a/owl-base/Owl_neural_generic/Make/argument-1-A/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl-base.Owl_neural_generic.Make.A.Mat)

Module A.Mat

val diagm : ?k:int -> arr -> arr
val triu : ?k:int -> arr -> arr
val tril : ?k:int -> arr -> arr
val eye : int -> arr
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make/argument-1-A/Scalar/index.html b/owl-base/Owl_neural_generic/Make/argument-1-A/Scalar/index.html deleted file mode 100644 index 933d01a87..000000000 --- a/owl-base/Owl_neural_generic/Make/argument-1-A/Scalar/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Scalar (owl-base.Owl_neural_generic.Make.A.Scalar)

Module A.Scalar

val add : elt -> elt -> elt
val sub : elt -> elt -> elt
val mul : elt -> elt -> elt
val div : elt -> elt -> elt
val pow : elt -> elt -> elt
val atan2 : elt -> elt -> elt
val abs : elt -> elt
val neg : elt -> elt
val sqr : elt -> elt
val sqrt : elt -> elt
val exp : elt -> elt
val log : elt -> elt
val log2 : elt -> elt
val log10 : elt -> elt
val signum : elt -> elt
val floor : elt -> elt
val ceil : elt -> elt
val round : elt -> elt
val sin : elt -> elt
val cos : elt -> elt
val tan : elt -> elt
val sinh : elt -> elt
val cosh : elt -> elt
val tanh : elt -> elt
val asin : elt -> elt
val acos : elt -> elt
val atan : elt -> elt
val asinh : elt -> elt
val acosh : elt -> elt
val atanh : elt -> elt
val relu : elt -> elt
val dawsn : elt -> elt
val sigmoid : elt -> elt
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make/argument-1-A/index.html b/owl-base/Owl_neural_generic/Make/argument-1-A/index.html deleted file mode 100644 index 50cfe11b4..000000000 --- a/owl-base/Owl_neural_generic/Make/argument-1-A/index.html +++ /dev/null @@ -1,158 +0,0 @@ - -A (owl-base.Owl_neural_generic.Make.A)

Parameter Make.A

include Owl_types_ndarray_eltcmp.Sig
include Owl_types_ndarray_basic.Sig
type arr
type elt
val empty : int array -> arr
val zeros : int array -> arr
val ones : int array -> arr
val create : int array -> elt -> arr
val sequential : ?a:elt -> ?step:elt -> int array -> arr
val uniform : ?a:elt -> ?b:elt -> int array -> arr
val gaussian : ?mu:elt -> ?sigma:elt -> int array -> arr
val bernoulli : ?p:elt -> int array -> arr
val init : int array -> (int -> elt) -> arr
val init_nd : int array -> (int array -> elt) -> arr
val shape : arr -> int array
val numel : arr -> int
val get : arr -> int array -> elt
val set : arr -> int array -> elt -> unit
val get_slice : int list list -> arr -> arr
val set_slice : int list list -> arr -> arr -> unit
val get_fancy : Owl_types_common.index list -> arr -> arr
val set_fancy : Owl_types_common.index list -> arr -> arr -> unit
val copy : arr -> arr
val copy_ : out:arr -> arr -> unit
val reset : arr -> unit
val reshape : arr -> int array -> arr
val reverse : arr -> arr
val tile : arr -> int array -> arr
val repeat : arr -> int array -> arr
val concatenate : ?axis:int -> arr array -> arr
val stack : ?axis:int -> arr array -> arr
val split : ?axis:int -> int array -> arr -> arr array
val expand : ?hi:bool -> arr -> int -> arr
val squeeze : ?axis:int array -> arr -> arr
val draw : ?axis:int -> arr -> int -> arr * int array
val map : (elt -> elt) -> arr -> arr
val fold : ?axis:int -> (elt -> elt -> elt) -> elt -> arr -> arr
val scan : ?axis:int -> (elt -> elt -> elt) -> arr -> arr
val one_hot : int -> arr -> arr
val pad : ?v:elt -> int list list -> arr -> arr
val print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(elt -> string) -> - arr -> - unit
val abs : arr -> arr
val neg : arr -> arr
val floor : arr -> arr
val ceil : arr -> arr
val round : arr -> arr
val sqr : arr -> arr
val sqrt : arr -> arr
val log : arr -> arr
val log2 : arr -> arr
val log10 : arr -> arr
val exp : arr -> arr
val sin : arr -> arr
val cos : arr -> arr
val tan : arr -> arr
val sinh : arr -> arr
val cosh : arr -> arr
val tanh : arr -> arr
val asin : arr -> arr
val acos : arr -> arr
val atan : arr -> arr
val asinh : arr -> arr
val acosh : arr -> arr
val atanh : arr -> arr
val min : ?axis:int -> ?keep_dims:bool -> arr -> arr
val max : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum_reduce : ?axis:int array -> arr -> arr
val signum : arr -> arr
val sigmoid : arr -> arr
val relu : arr -> arr
val dawsn : arr -> arr
val min' : arr -> elt
val max' : arr -> elt
val sum' : arr -> elt
val log_sum_exp' : arr -> elt
val log_sum_exp : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm' : arr -> elt
val l2norm' : arr -> elt
val l2norm_sqr' : arr -> elt
val clip_by_value : ?amin:elt -> ?amax:elt -> arr -> arr
val clip_by_l2norm : elt -> arr -> arr
val pow : arr -> arr -> arr
val scalar_pow : elt -> arr -> arr
val pow_scalar : arr -> elt -> arr
val atan2 : arr -> arr -> arr
val scalar_atan2 : elt -> arr -> arr
val atan2_scalar : arr -> elt -> arr
val add : arr -> arr -> arr
val sub : arr -> arr -> arr
val mul : arr -> arr -> arr
val div : arr -> arr -> arr
val add_scalar : arr -> elt -> arr
val sub_scalar : arr -> elt -> arr
val mul_scalar : arr -> elt -> arr
val div_scalar : arr -> elt -> arr
val scalar_add : elt -> arr -> arr
val scalar_sub : elt -> arr -> arr
val scalar_mul : elt -> arr -> arr
val scalar_div : elt -> arr -> arr
val fma : arr -> arr -> arr -> arr
val conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val dilated_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val transpose_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val max_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val upsampling2d : arr -> int array -> arr
val conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val dilated_conv1d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv1d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val transpose_conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val max_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val upsampling2d_backward : arr -> int array -> arr -> arr
val row_num : arr -> int
val col_num : arr -> int
val row : arr -> int -> arr
val rows : arr -> int array -> arr
val copy_row_to : arr -> arr -> int -> unit
val copy_col_to : arr -> arr -> int -> unit
val dot : arr -> arr -> arr
val diag : ?k:int -> arr -> arr
val trace : arr -> elt
val transpose : ?axis:int array -> arr -> arr
val to_rows : arr -> arr array
val of_rows : arr array -> arr
val to_cols : arr -> arr array
val of_cols : arr array -> arr
val of_array : elt array -> int array -> arr
val of_arrays : elt array array -> arr
val float_to_elt : float -> elt
val elt_to_float : elt -> float
val elt_equal : arr -> arr -> arr
val elt_not_equal : arr -> arr -> arr
val elt_less : arr -> arr -> arr
val elt_greater : arr -> arr -> arr
val elt_less_equal : arr -> arr -> arr
val elt_greater_equal : arr -> arr -> arr
val elt_equal_scalar : arr -> elt -> arr
val elt_not_equal_scalar : arr -> elt -> arr
val elt_less_scalar : arr -> elt -> arr
val elt_greater_scalar : arr -> elt -> arr
val elt_less_equal_scalar : arr -> elt -> arr
val elt_greater_equal_scalar : arr -> elt -> arr
module Scalar : sig ... end
module Mat : sig ... end
module Linalg : sig ... end
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make/index.html b/owl-base/Owl_neural_generic/Make/index.html deleted file mode 100644 index 689b408ae..000000000 --- a/owl-base/Owl_neural_generic/Make/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Make (owl-base.Owl_neural_generic.Make)

Module Owl_neural_generic.Make

Parameters

Signature

include sig ... end
module Graph : sig ... end
module Optimise = Graph.Neuron.Optimise
module Init = Graph.Neuron.Init
module Activation = Graph.Neuron.Activation
module Regularisation = Graph.Neuron.Optimise.Regularisation
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Activation/index.html b/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Activation/index.html deleted file mode 100644 index f8ed071fd..000000000 --- a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Activation/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -Activation (owl-base.Owl_neural_generic.Make_Embedded.Neuron.Activation)

Module Neuron.Activation

type typ = - Owl_neural_neuron.Make(Owl_optimise_generic.Make(Owl_algodiff_generic.Make(A))).Activation.typ = -
  1. | Elu
  2. | Relu
  3. | Sigmoid
  4. | HardSigmoid
  5. | Softmax of int
  6. | Softplus
  7. | Softsign
  8. | Tanh
  9. | Relu6
  10. | LeakyRelu of float
  11. | TRelu of float
  12. | Custom of Optimise.Algodiff.t -> Optimise.Algodiff.t
  13. | None
type neuron_typ = - Owl_neural_neuron.Make(Owl_optimise_generic.Make(Owl_algodiff_generic.Make(A))).Activation.neuron_typ = - {
  1. mutable activation : typ;
  2. mutable in_shape : int array;
  3. mutable out_shape : int array;
}
val create : typ -> neuron_typ
val connect : int array -> neuron_typ -> unit
val run_activation : Optimise.Algodiff.t -> typ -> Optimise.Algodiff.t
val copy : neuron_typ -> neuron_typ
val activation_to_string : typ -> string
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Add/index.html b/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Add/index.html deleted file mode 100644 index 48f1247a3..000000000 --- a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Add/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Add (owl-base.Owl_neural_generic.Make_Embedded.Neuron.Add)

Module Neuron.Add

type neuron_typ = - Owl_neural_neuron.Make(Owl_optimise_generic.Make(Owl_algodiff_generic.Make(A))).Add.neuron_typ = - {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
}
val create : unit -> neuron_typ
val connect : int array array -> neuron_typ -> unit
val copy : 'a -> neuron_typ
val run : Optimise.Algodiff.t array -> 'a -> Optimise.Algodiff.t
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/AlphaDropout/index.html b/owl-base/Owl_neural_generic/Make_Embedded/Neuron/AlphaDropout/index.html deleted file mode 100644 index 96b5b3ec5..000000000 --- a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/AlphaDropout/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -AlphaDropout (owl-base.Owl_neural_generic.Make_Embedded.Neuron.AlphaDropout)

Module Neuron.AlphaDropout

type neuron_typ = - Owl_neural_neuron.Make(Owl_optimise_generic.Make(Owl_algodiff_generic.Make(A))).AlphaDropout.neuron_typ = - {
  1. mutable rate : float;
  2. mutable in_shape : int array;
  3. mutable out_shape : int array;
}
val create : float -> neuron_typ
val connect : int array -> neuron_typ -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Average/index.html b/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Average/index.html deleted file mode 100644 index 21b380a47..000000000 --- a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Average/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Average (owl-base.Owl_neural_generic.Make_Embedded.Neuron.Average)

Module Neuron.Average

type neuron_typ = - Owl_neural_neuron.Make(Owl_optimise_generic.Make(Owl_algodiff_generic.Make(A))).Average.neuron_typ = - {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
}
val create : unit -> neuron_typ
val connect : int array array -> neuron_typ -> unit
val copy : 'a -> neuron_typ
val run : Optimise.Algodiff.t array -> 'a -> Optimise.Algodiff.t
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/AvgPool1D/index.html b/owl-base/Owl_neural_generic/Make_Embedded/Neuron/AvgPool1D/index.html deleted file mode 100644 index 4d05a1f25..000000000 --- a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/AvgPool1D/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -AvgPool1D (owl-base.Owl_neural_generic.Make_Embedded.Neuron.AvgPool1D)

Module Neuron.AvgPool1D

type neuron_typ = - Owl_neural_neuron.Make(Owl_optimise_generic.Make(Owl_algodiff_generic.Make(A))).AvgPool1D.neuron_typ = - {
  1. mutable padding : Owl_types.padding;
  2. mutable kernel : int array;
  3. mutable stride : int array;
  4. mutable in_shape : int array;
  5. mutable out_shape : int array;
}
val create : Owl_types.padding -> int array -> int array -> neuron_typ
val connect : int array -> neuron_typ -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/AvgPool2D/index.html b/owl-base/Owl_neural_generic/Make_Embedded/Neuron/AvgPool2D/index.html deleted file mode 100644 index c2dfaa6a9..000000000 --- a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/AvgPool2D/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -AvgPool2D (owl-base.Owl_neural_generic.Make_Embedded.Neuron.AvgPool2D)

Module Neuron.AvgPool2D

type neuron_typ = - Owl_neural_neuron.Make(Owl_optimise_generic.Make(Owl_algodiff_generic.Make(A))).AvgPool2D.neuron_typ = - {
  1. mutable padding : Owl_types.padding;
  2. mutable kernel : int array;
  3. mutable stride : int array;
  4. mutable in_shape : int array;
  5. mutable out_shape : int array;
}
val create : Owl_types.padding -> int array -> int array -> neuron_typ
val connect : int array -> neuron_typ -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Concatenate/index.html b/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Concatenate/index.html deleted file mode 100644 index 6d534e97f..000000000 --- a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Concatenate/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Concatenate (owl-base.Owl_neural_generic.Make_Embedded.Neuron.Concatenate)

Module Neuron.Concatenate

type neuron_typ = - Owl_neural_neuron.Make(Owl_optimise_generic.Make(Owl_algodiff_generic.Make(A))).Concatenate.neuron_typ = - {
  1. mutable axis : int;
  2. mutable in_shape : int array;
  3. mutable out_shape : int array;
}
val create : int -> neuron_typ
val connect : int array array -> neuron_typ -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Conv1D/index.html b/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Conv1D/index.html deleted file mode 100644 index 3fd1c8ad3..000000000 --- a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Conv1D/index.html +++ /dev/null @@ -1,10 +0,0 @@ - -Conv1D (owl-base.Owl_neural_generic.Make_Embedded.Neuron.Conv1D)

Module Neuron.Conv1D

type neuron_typ = - Owl_neural_neuron.Make(Owl_optimise_generic.Make(Owl_algodiff_generic.Make(A))).Conv1D.neuron_typ = - {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable b : Optimise.Algodiff.t;
  3. mutable kernel : int array;
  4. mutable stride : int array;
  5. mutable padding : Owl_types.padding;
  6. mutable init_typ : Init.typ;
  7. mutable in_shape : int array;
  8. mutable out_shape : int array;
}
val create : - ?inputs:int array -> - Owl_types.padding -> - int array -> - int array -> - Init.typ -> - neuron_typ
val connect : int array -> neuron_typ -> unit
val init : neuron_typ -> unit
val reset : neuron_typ -> unit
val mktag : int -> neuron_typ -> unit
val mkpar : neuron_typ -> Optimise.Algodiff.t array
val mkpri : neuron_typ -> Optimise.Algodiff.t array
val mkadj : neuron_typ -> Optimise.Algodiff.t array
val update : neuron_typ -> Optimise.Algodiff.t array -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Conv2D/index.html b/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Conv2D/index.html deleted file mode 100644 index 745df39ce..000000000 --- a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Conv2D/index.html +++ /dev/null @@ -1,10 +0,0 @@ - -Conv2D (owl-base.Owl_neural_generic.Make_Embedded.Neuron.Conv2D)

Module Neuron.Conv2D

type neuron_typ = - Owl_neural_neuron.Make(Owl_optimise_generic.Make(Owl_algodiff_generic.Make(A))).Conv2D.neuron_typ = - {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable b : Optimise.Algodiff.t;
  3. mutable kernel : int array;
  4. mutable stride : int array;
  5. mutable padding : Owl_types.padding;
  6. mutable init_typ : Init.typ;
  7. mutable in_shape : int array;
  8. mutable out_shape : int array;
}
val create : - ?inputs:int array -> - Owl_types.padding -> - int array -> - int array -> - Init.typ -> - neuron_typ
val connect : int array -> neuron_typ -> unit
val init : neuron_typ -> unit
val reset : neuron_typ -> unit
val mktag : int -> neuron_typ -> unit
val mkpar : neuron_typ -> Optimise.Algodiff.t array
val mkpri : neuron_typ -> Optimise.Algodiff.t array
val mkadj : neuron_typ -> Optimise.Algodiff.t array
val update : neuron_typ -> Optimise.Algodiff.t array -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Conv3D/index.html b/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Conv3D/index.html deleted file mode 100644 index f2db93d46..000000000 --- a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Conv3D/index.html +++ /dev/null @@ -1,10 +0,0 @@ - -Conv3D (owl-base.Owl_neural_generic.Make_Embedded.Neuron.Conv3D)

Module Neuron.Conv3D

type neuron_typ = - Owl_neural_neuron.Make(Owl_optimise_generic.Make(Owl_algodiff_generic.Make(A))).Conv3D.neuron_typ = - {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable b : Optimise.Algodiff.t;
  3. mutable kernel : int array;
  4. mutable stride : int array;
  5. mutable padding : Owl_types.padding;
  6. mutable init_typ : Init.typ;
  7. mutable in_shape : int array;
  8. mutable out_shape : int array;
}
val create : - ?inputs:int array -> - Owl_types.padding -> - int array -> - int array -> - Init.typ -> - neuron_typ
val connect : int array -> neuron_typ -> unit
val init : neuron_typ -> unit
val reset : neuron_typ -> unit
val mktag : int -> neuron_typ -> unit
val mkpar : neuron_typ -> Optimise.Algodiff.t array
val mkpri : neuron_typ -> Optimise.Algodiff.t array
val mkadj : neuron_typ -> Optimise.Algodiff.t array
val update : neuron_typ -> Optimise.Algodiff.t array -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/DilatedConv1D/index.html b/owl-base/Owl_neural_generic/Make_Embedded/Neuron/DilatedConv1D/index.html deleted file mode 100644 index d191632a0..000000000 --- a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/DilatedConv1D/index.html +++ /dev/null @@ -1,11 +0,0 @@ - -DilatedConv1D (owl-base.Owl_neural_generic.Make_Embedded.Neuron.DilatedConv1D)

Module Neuron.DilatedConv1D

type neuron_typ = - Owl_neural_neuron.Make(Owl_optimise_generic.Make(Owl_algodiff_generic.Make(A))).DilatedConv1D.neuron_typ = - {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable b : Optimise.Algodiff.t;
  3. mutable kernel : int array;
  4. mutable stride : int array;
  5. mutable rate : int array;
  6. mutable padding : Owl_types.padding;
  7. mutable init_typ : Init.typ;
  8. mutable in_shape : int array;
  9. mutable out_shape : int array;
}
val create : - ?inputs:int array -> - Owl_types.padding -> - int array -> - int array -> - int array -> - Init.typ -> - neuron_typ
val connect : int array -> neuron_typ -> unit
val init : neuron_typ -> unit
val reset : neuron_typ -> unit
val mktag : int -> neuron_typ -> unit
val mkpar : neuron_typ -> Optimise.Algodiff.t array
val mkpri : neuron_typ -> Optimise.Algodiff.t array
val mkadj : neuron_typ -> Optimise.Algodiff.t array
val update : neuron_typ -> Optimise.Algodiff.t array -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/DilatedConv2D/index.html b/owl-base/Owl_neural_generic/Make_Embedded/Neuron/DilatedConv2D/index.html deleted file mode 100644 index f4a05f134..000000000 --- a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/DilatedConv2D/index.html +++ /dev/null @@ -1,11 +0,0 @@ - -DilatedConv2D (owl-base.Owl_neural_generic.Make_Embedded.Neuron.DilatedConv2D)

Module Neuron.DilatedConv2D

type neuron_typ = - Owl_neural_neuron.Make(Owl_optimise_generic.Make(Owl_algodiff_generic.Make(A))).DilatedConv2D.neuron_typ = - {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable b : Optimise.Algodiff.t;
  3. mutable kernel : int array;
  4. mutable stride : int array;
  5. mutable rate : int array;
  6. mutable padding : Owl_types.padding;
  7. mutable init_typ : Init.typ;
  8. mutable in_shape : int array;
  9. mutable out_shape : int array;
}
val create : - ?inputs:int array -> - Owl_types.padding -> - int array -> - int array -> - int array -> - Init.typ -> - neuron_typ
val connect : int array -> neuron_typ -> unit
val init : neuron_typ -> unit
val reset : neuron_typ -> unit
val mktag : int -> neuron_typ -> unit
val mkpar : neuron_typ -> Optimise.Algodiff.t array
val mkpri : neuron_typ -> Optimise.Algodiff.t array
val mkadj : neuron_typ -> Optimise.Algodiff.t array
val update : neuron_typ -> Optimise.Algodiff.t array -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/DilatedConv3D/index.html b/owl-base/Owl_neural_generic/Make_Embedded/Neuron/DilatedConv3D/index.html deleted file mode 100644 index 414001a61..000000000 --- a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/DilatedConv3D/index.html +++ /dev/null @@ -1,11 +0,0 @@ - -DilatedConv3D (owl-base.Owl_neural_generic.Make_Embedded.Neuron.DilatedConv3D)

Module Neuron.DilatedConv3D

type neuron_typ = - Owl_neural_neuron.Make(Owl_optimise_generic.Make(Owl_algodiff_generic.Make(A))).DilatedConv3D.neuron_typ = - {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable b : Optimise.Algodiff.t;
  3. mutable kernel : int array;
  4. mutable stride : int array;
  5. mutable rate : int array;
  6. mutable padding : Owl_types.padding;
  7. mutable init_typ : Init.typ;
  8. mutable in_shape : int array;
  9. mutable out_shape : int array;
}
val create : - ?inputs:int array -> - Owl_types.padding -> - int array -> - int array -> - int array -> - Init.typ -> - neuron_typ
val connect : int array -> neuron_typ -> unit
val init : neuron_typ -> unit
val reset : neuron_typ -> unit
val mktag : int -> neuron_typ -> unit
val mkpar : neuron_typ -> Optimise.Algodiff.t array
val mkpri : neuron_typ -> Optimise.Algodiff.t array
val mkadj : neuron_typ -> Optimise.Algodiff.t array
val update : neuron_typ -> Optimise.Algodiff.t array -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Dot/index.html b/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Dot/index.html deleted file mode 100644 index a2a781498..000000000 --- a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Dot/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Dot (owl-base.Owl_neural_generic.Make_Embedded.Neuron.Dot)

Module Neuron.Dot

type neuron_typ = - Owl_neural_neuron.Make(Owl_optimise_generic.Make(Owl_algodiff_generic.Make(A))).Dot.neuron_typ = - {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
}
val create : unit -> neuron_typ
val connect : int array array -> neuron_typ -> unit
val copy : 'a -> neuron_typ
val run : Optimise.Algodiff.t array -> 'a -> Optimise.Algodiff.t
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Dropout/index.html b/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Dropout/index.html deleted file mode 100644 index f4aba9186..000000000 --- a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Dropout/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Dropout (owl-base.Owl_neural_generic.Make_Embedded.Neuron.Dropout)

Module Neuron.Dropout

type neuron_typ = - Owl_neural_neuron.Make(Owl_optimise_generic.Make(Owl_algodiff_generic.Make(A))).Dropout.neuron_typ = - {
  1. mutable rate : float;
  2. mutable in_shape : int array;
  3. mutable out_shape : int array;
}
val create : float -> neuron_typ
val connect : int array -> neuron_typ -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Embedding/index.html b/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Embedding/index.html deleted file mode 100644 index b304dd8f3..000000000 --- a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Embedding/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Embedding (owl-base.Owl_neural_generic.Make_Embedded.Neuron.Embedding)

Module Neuron.Embedding

type neuron_typ = - Owl_neural_neuron.Make(Owl_optimise_generic.Make(Owl_algodiff_generic.Make(A))).Embedding.neuron_typ = - {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable init_typ : Init.typ;
  3. mutable in_dim : int;
  4. mutable in_shape : int array;
  5. mutable out_shape : int array;
}
val create : ?inputs:int -> int -> int -> Init.typ -> neuron_typ
val connect : int array -> neuron_typ -> unit
val init : neuron_typ -> unit
val reset : neuron_typ -> unit
val mktag : int -> neuron_typ -> unit
val mkpar : neuron_typ -> Optimise.Algodiff.t array
val mkpri : neuron_typ -> Optimise.Algodiff.t array
val mkadj : neuron_typ -> Optimise.Algodiff.t array
val update : neuron_typ -> Optimise.Algodiff.t array -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Flatten/index.html b/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Flatten/index.html deleted file mode 100644 index f87b220b1..000000000 --- a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Flatten/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Flatten (owl-base.Owl_neural_generic.Make_Embedded.Neuron.Flatten)

Module Neuron.Flatten

type neuron_typ = - Owl_neural_neuron.Make(Owl_optimise_generic.Make(Owl_algodiff_generic.Make(A))).Flatten.neuron_typ = - {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
}
val create : unit -> neuron_typ
val connect : int array -> neuron_typ -> unit
val copy : 'a -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/FullyConnected/index.html b/owl-base/Owl_neural_generic/Make_Embedded/Neuron/FullyConnected/index.html deleted file mode 100644 index 78a645399..000000000 --- a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/FullyConnected/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -FullyConnected (owl-base.Owl_neural_generic.Make_Embedded.Neuron.FullyConnected)

Module Neuron.FullyConnected

type neuron_typ = - Owl_neural_neuron.Make(Owl_optimise_generic.Make(Owl_algodiff_generic.Make(A))).FullyConnected.neuron_typ = - {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable b : Optimise.Algodiff.t;
  3. mutable init_typ : Init.typ;
  4. mutable in_shape : int array;
  5. mutable out_shape : int array;
}
val create : ?inputs:int -> int -> Init.typ -> neuron_typ
val connect : int array -> neuron_typ -> unit
val init : neuron_typ -> unit
val reset : neuron_typ -> unit
val mktag : int -> neuron_typ -> unit
val mkpar : neuron_typ -> Optimise.Algodiff.t array
val mkpri : neuron_typ -> Optimise.Algodiff.t array
val mkadj : neuron_typ -> Optimise.Algodiff.t array
val update : neuron_typ -> Optimise.Algodiff.t array -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/GRU/index.html b/owl-base/Owl_neural_generic/Make_Embedded/Neuron/GRU/index.html deleted file mode 100644 index 353423b32..000000000 --- a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/GRU/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -GRU (owl-base.Owl_neural_generic.Make_Embedded.Neuron.GRU)

Module Neuron.GRU

type neuron_typ = - Owl_neural_neuron.Make(Owl_optimise_generic.Make(Owl_algodiff_generic.Make(A))).GRU.neuron_typ = - {
  1. mutable wxz : Optimise.Algodiff.t;
  2. mutable whz : Optimise.Algodiff.t;
  3. mutable wxr : Optimise.Algodiff.t;
  4. mutable whr : Optimise.Algodiff.t;
  5. mutable wxh : Optimise.Algodiff.t;
  6. mutable whh : Optimise.Algodiff.t;
  7. mutable bz : Optimise.Algodiff.t;
  8. mutable br : Optimise.Algodiff.t;
  9. mutable bh : Optimise.Algodiff.t;
  10. mutable h : Optimise.Algodiff.t;
  11. mutable init_typ : Init.typ;
  12. mutable in_shape : int array;
  13. mutable out_shape : int array;
}
val create : ?time_steps:int -> ?inputs:int -> int -> Init.typ -> neuron_typ
val connect : int array -> neuron_typ -> unit
val init : neuron_typ -> unit
val reset : neuron_typ -> unit
val mktag : int -> neuron_typ -> unit
val mkpar : neuron_typ -> Optimise.Algodiff.t array
val mkpri : neuron_typ -> Optimise.Algodiff.t array
val mkadj : neuron_typ -> Optimise.Algodiff.t array
val update : neuron_typ -> Optimise.Algodiff.t array -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/GaussianDropout/index.html b/owl-base/Owl_neural_generic/Make_Embedded/Neuron/GaussianDropout/index.html deleted file mode 100644 index c16f0241e..000000000 --- a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/GaussianDropout/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -GaussianDropout (owl-base.Owl_neural_generic.Make_Embedded.Neuron.GaussianDropout)

Module Neuron.GaussianDropout

type neuron_typ = - Owl_neural_neuron.Make(Owl_optimise_generic.Make(Owl_algodiff_generic.Make(A))).GaussianDropout.neuron_typ = - {
  1. mutable rate : float;
  2. mutable in_shape : int array;
  3. mutable out_shape : int array;
}
val create : float -> neuron_typ
val connect : int array -> neuron_typ -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/GaussianNoise/index.html b/owl-base/Owl_neural_generic/Make_Embedded/Neuron/GaussianNoise/index.html deleted file mode 100644 index a02fa98ef..000000000 --- a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/GaussianNoise/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -GaussianNoise (owl-base.Owl_neural_generic.Make_Embedded.Neuron.GaussianNoise)

Module Neuron.GaussianNoise

type neuron_typ = - Owl_neural_neuron.Make(Owl_optimise_generic.Make(Owl_algodiff_generic.Make(A))).GaussianNoise.neuron_typ = - {
  1. mutable sigma : float;
  2. mutable in_shape : int array;
  3. mutable out_shape : int array;
}
val create : float -> neuron_typ
val connect : int array -> neuron_typ -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/GlobalAvgPool1D/index.html b/owl-base/Owl_neural_generic/Make_Embedded/Neuron/GlobalAvgPool1D/index.html deleted file mode 100644 index 286dd7929..000000000 --- a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/GlobalAvgPool1D/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -GlobalAvgPool1D (owl-base.Owl_neural_generic.Make_Embedded.Neuron.GlobalAvgPool1D)

Module Neuron.GlobalAvgPool1D

type neuron_typ = - Owl_neural_neuron.Make(Owl_optimise_generic.Make(Owl_algodiff_generic.Make(A))).GlobalAvgPool1D.neuron_typ = - {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
}
val create : unit -> neuron_typ
val connect : int array -> neuron_typ -> unit
val copy : 'a -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/GlobalAvgPool2D/index.html b/owl-base/Owl_neural_generic/Make_Embedded/Neuron/GlobalAvgPool2D/index.html deleted file mode 100644 index 3b1232dde..000000000 --- a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/GlobalAvgPool2D/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -GlobalAvgPool2D (owl-base.Owl_neural_generic.Make_Embedded.Neuron.GlobalAvgPool2D)

Module Neuron.GlobalAvgPool2D

type neuron_typ = - Owl_neural_neuron.Make(Owl_optimise_generic.Make(Owl_algodiff_generic.Make(A))).GlobalAvgPool2D.neuron_typ = - {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
}
val create : unit -> neuron_typ
val connect : int array -> neuron_typ -> unit
val copy : 'a -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/GlobalMaxPool1D/index.html b/owl-base/Owl_neural_generic/Make_Embedded/Neuron/GlobalMaxPool1D/index.html deleted file mode 100644 index aee4345c7..000000000 --- a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/GlobalMaxPool1D/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -GlobalMaxPool1D (owl-base.Owl_neural_generic.Make_Embedded.Neuron.GlobalMaxPool1D)

Module Neuron.GlobalMaxPool1D

type neuron_typ = - Owl_neural_neuron.Make(Owl_optimise_generic.Make(Owl_algodiff_generic.Make(A))).GlobalMaxPool1D.neuron_typ = - {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
}
val create : unit -> neuron_typ
val connect : int array -> neuron_typ -> unit
val copy : 'a -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/GlobalMaxPool2D/index.html b/owl-base/Owl_neural_generic/Make_Embedded/Neuron/GlobalMaxPool2D/index.html deleted file mode 100644 index 7bfd84d16..000000000 --- a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/GlobalMaxPool2D/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -GlobalMaxPool2D (owl-base.Owl_neural_generic.Make_Embedded.Neuron.GlobalMaxPool2D)

Module Neuron.GlobalMaxPool2D

type neuron_typ = - Owl_neural_neuron.Make(Owl_optimise_generic.Make(Owl_algodiff_generic.Make(A))).GlobalMaxPool2D.neuron_typ = - {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
}
val create : unit -> neuron_typ
val connect : int array -> neuron_typ -> unit
val copy : 'a -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Init/index.html b/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Init/index.html deleted file mode 100644 index 5f1d15bac..000000000 --- a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Init/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Init (owl-base.Owl_neural_generic.Make_Embedded.Neuron.Init)

Module Neuron.Init

type typ = - Owl_neural_neuron.Make(Owl_optimise_generic.Make(Owl_algodiff_generic.Make(A))).Init.typ = -
  1. | Uniform of float * float
  2. | Gaussian of float * float
  3. | Standard
  4. | Tanh
  5. | GlorotNormal
  6. | GlorotUniform
  7. | LecunNormal
  8. | HeNormal
  9. | Custom of int array -> Optimise.Algodiff.t
val calc_fans : int array -> float * float
val run : typ -> int array -> Optimise.Algodiff.t -> Optimise.Algodiff.t
val to_string : typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Input/index.html b/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Input/index.html deleted file mode 100644 index e45568e82..000000000 --- a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Input/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Input (owl-base.Owl_neural_generic.Make_Embedded.Neuron.Input)

Module Neuron.Input

type neuron_typ = - Owl_neural_neuron.Make(Owl_optimise_generic.Make(Owl_algodiff_generic.Make(A))).Input.neuron_typ = - {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
}
val create : int array -> neuron_typ
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/LSTM/index.html b/owl-base/Owl_neural_generic/Make_Embedded/Neuron/LSTM/index.html deleted file mode 100644 index c3188fc09..000000000 --- a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/LSTM/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -LSTM (owl-base.Owl_neural_generic.Make_Embedded.Neuron.LSTM)

Module Neuron.LSTM

type neuron_typ = - Owl_neural_neuron.Make(Owl_optimise_generic.Make(Owl_algodiff_generic.Make(A))).LSTM.neuron_typ = - {
  1. mutable wxi : Optimise.Algodiff.t;
  2. mutable whi : Optimise.Algodiff.t;
  3. mutable wxc : Optimise.Algodiff.t;
  4. mutable whc : Optimise.Algodiff.t;
  5. mutable wxf : Optimise.Algodiff.t;
  6. mutable whf : Optimise.Algodiff.t;
  7. mutable wxo : Optimise.Algodiff.t;
  8. mutable who : Optimise.Algodiff.t;
  9. mutable bi : Optimise.Algodiff.t;
  10. mutable bc : Optimise.Algodiff.t;
  11. mutable bf : Optimise.Algodiff.t;
  12. mutable bo : Optimise.Algodiff.t;
  13. mutable c : Optimise.Algodiff.t;
  14. mutable h : Optimise.Algodiff.t;
  15. mutable init_typ : Init.typ;
  16. mutable in_shape : int array;
  17. mutable out_shape : int array;
}
val create : ?time_steps:int -> ?inputs:int -> int -> Init.typ -> neuron_typ
val connect : int array -> neuron_typ -> unit
val init : neuron_typ -> unit
val reset : neuron_typ -> unit
val mktag : int -> neuron_typ -> unit
val mkpar : neuron_typ -> Optimise.Algodiff.t array
val mkpri : neuron_typ -> Optimise.Algodiff.t array
val mkadj : neuron_typ -> Optimise.Algodiff.t array
val update : neuron_typ -> Optimise.Algodiff.t array -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Lambda/index.html b/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Lambda/index.html deleted file mode 100644 index 1e93e7ee5..000000000 --- a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Lambda/index.html +++ /dev/null @@ -1,7 +0,0 @@ - -Lambda (owl-base.Owl_neural_generic.Make_Embedded.Neuron.Lambda)

Module Neuron.Lambda

type neuron_typ = - Owl_neural_neuron.Make(Owl_optimise_generic.Make(Owl_algodiff_generic.Make(A))).Lambda.neuron_typ = - {
  1. mutable lambda : Optimise.Algodiff.t -> Optimise.Algodiff.t;
  2. mutable in_shape : int array;
  3. mutable out_shape : int array;
}
val create : - ?out_shape:int array -> - (Optimise.Algodiff.t -> Optimise.Algodiff.t) -> - neuron_typ
val connect : int array -> neuron_typ -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/LambdaArray/index.html b/owl-base/Owl_neural_generic/Make_Embedded/Neuron/LambdaArray/index.html deleted file mode 100644 index 1ca7d3861..000000000 --- a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/LambdaArray/index.html +++ /dev/null @@ -1,7 +0,0 @@ - -LambdaArray (owl-base.Owl_neural_generic.Make_Embedded.Neuron.LambdaArray)

Module Neuron.LambdaArray

type neuron_typ = - Owl_neural_neuron.Make(Owl_optimise_generic.Make(Owl_algodiff_generic.Make(A))).LambdaArray.neuron_typ = - {
  1. mutable lambda : Optimise.Algodiff.t array -> Optimise.Algodiff.t;
  2. mutable in_shape : int array;
  3. mutable out_shape : int array;
}
val create : - int array -> - (Optimise.Algodiff.t array -> Optimise.Algodiff.t) -> - neuron_typ
val connect : int array array -> neuron_typ -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Linear/index.html b/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Linear/index.html deleted file mode 100644 index a6c1f5094..000000000 --- a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Linear/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Linear (owl-base.Owl_neural_generic.Make_Embedded.Neuron.Linear)

Module Neuron.Linear

type neuron_typ = - Owl_neural_neuron.Make(Owl_optimise_generic.Make(Owl_algodiff_generic.Make(A))).Linear.neuron_typ = - {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable b : Optimise.Algodiff.t;
  3. mutable init_typ : Init.typ;
  4. mutable in_shape : int array;
  5. mutable out_shape : int array;
}
val create : ?inputs:int -> int -> Init.typ -> neuron_typ
val connect : int array -> neuron_typ -> unit
val init : neuron_typ -> unit
val reset : neuron_typ -> unit
val mktag : int -> neuron_typ -> unit
val mkpar : neuron_typ -> Optimise.Algodiff.t array
val mkpri : neuron_typ -> Optimise.Algodiff.t array
val mkadj : neuron_typ -> Optimise.Algodiff.t array
val update : neuron_typ -> Optimise.Algodiff.t array -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/LinearNoBias/index.html b/owl-base/Owl_neural_generic/Make_Embedded/Neuron/LinearNoBias/index.html deleted file mode 100644 index cf88c53a2..000000000 --- a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/LinearNoBias/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -LinearNoBias (owl-base.Owl_neural_generic.Make_Embedded.Neuron.LinearNoBias)

Module Neuron.LinearNoBias

type neuron_typ = - Owl_neural_neuron.Make(Owl_optimise_generic.Make(Owl_algodiff_generic.Make(A))).LinearNoBias.neuron_typ = - {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable init_typ : Init.typ;
  3. mutable in_shape : int array;
  4. mutable out_shape : int array;
}
val create : ?inputs:int -> int -> Init.typ -> neuron_typ
val connect : int array -> neuron_typ -> unit
val init : neuron_typ -> unit
val reset : neuron_typ -> unit
val mktag : int -> neuron_typ -> unit
val mkpar : neuron_typ -> Optimise.Algodiff.t array
val mkpri : neuron_typ -> Optimise.Algodiff.t array
val mkadj : neuron_typ -> Optimise.Algodiff.t array
val update : neuron_typ -> Optimise.Algodiff.t array -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Masking/index.html b/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Masking/index.html deleted file mode 100644 index d4389884f..000000000 --- a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Masking/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Masking (owl-base.Owl_neural_generic.Make_Embedded.Neuron.Masking)

Module Neuron.Masking

\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Max/index.html b/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Max/index.html deleted file mode 100644 index 703d09b22..000000000 --- a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Max/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Max (owl-base.Owl_neural_generic.Make_Embedded.Neuron.Max)

Module Neuron.Max

type neuron_typ = - Owl_neural_neuron.Make(Owl_optimise_generic.Make(Owl_algodiff_generic.Make(A))).Max.neuron_typ = - {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
}
val create : unit -> neuron_typ
val connect : int array array -> neuron_typ -> unit
val copy : 'a -> neuron_typ
val run : Optimise.Algodiff.t array -> 'a -> Optimise.Algodiff.t
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/MaxPool1D/index.html b/owl-base/Owl_neural_generic/Make_Embedded/Neuron/MaxPool1D/index.html deleted file mode 100644 index 0d56b68a6..000000000 --- a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/MaxPool1D/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -MaxPool1D (owl-base.Owl_neural_generic.Make_Embedded.Neuron.MaxPool1D)

Module Neuron.MaxPool1D

type neuron_typ = - Owl_neural_neuron.Make(Owl_optimise_generic.Make(Owl_algodiff_generic.Make(A))).MaxPool1D.neuron_typ = - {
  1. mutable padding : Owl_types.padding;
  2. mutable kernel : int array;
  3. mutable stride : int array;
  4. mutable in_shape : int array;
  5. mutable out_shape : int array;
}
val create : Owl_types.padding -> int array -> int array -> neuron_typ
val connect : int array -> neuron_typ -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/MaxPool2D/index.html b/owl-base/Owl_neural_generic/Make_Embedded/Neuron/MaxPool2D/index.html deleted file mode 100644 index 1efed1b11..000000000 --- a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/MaxPool2D/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -MaxPool2D (owl-base.Owl_neural_generic.Make_Embedded.Neuron.MaxPool2D)

Module Neuron.MaxPool2D

type neuron_typ = - Owl_neural_neuron.Make(Owl_optimise_generic.Make(Owl_algodiff_generic.Make(A))).MaxPool2D.neuron_typ = - {
  1. mutable padding : Owl_types.padding;
  2. mutable kernel : int array;
  3. mutable stride : int array;
  4. mutable in_shape : int array;
  5. mutable out_shape : int array;
}
val create : Owl_types.padding -> int array -> int array -> neuron_typ
val connect : int array -> neuron_typ -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Mul/index.html b/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Mul/index.html deleted file mode 100644 index 0fd799c9a..000000000 --- a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Mul/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Mul (owl-base.Owl_neural_generic.Make_Embedded.Neuron.Mul)

Module Neuron.Mul

type neuron_typ = - Owl_neural_neuron.Make(Owl_optimise_generic.Make(Owl_algodiff_generic.Make(A))).Mul.neuron_typ = - {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
}
val create : unit -> neuron_typ
val connect : int array array -> neuron_typ -> unit
val copy : 'a -> neuron_typ
val run : Optimise.Algodiff.t array -> 'a -> Optimise.Algodiff.t
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Normalisation/index.html b/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Normalisation/index.html deleted file mode 100644 index e0d288815..000000000 --- a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Normalisation/index.html +++ /dev/null @@ -1,10 +0,0 @@ - -Normalisation (owl-base.Owl_neural_generic.Make_Embedded.Neuron.Normalisation)

Module Neuron.Normalisation

type neuron_typ = - Owl_neural_neuron.Make(Owl_optimise_generic.Make(Owl_algodiff_generic.Make(A))).Normalisation.neuron_typ = - {
  1. mutable axis : int;
  2. mutable beta : Optimise.Algodiff.t;
  3. mutable gamma : Optimise.Algodiff.t;
  4. mutable mu : Optimise.Algodiff.t;
  5. mutable var : Optimise.Algodiff.t;
  6. mutable decay : Optimise.Algodiff.t;
  7. mutable training : bool;
  8. mutable in_shape : int array;
  9. mutable out_shape : int array;
}
val create : - ?training:bool -> - ?decay:float -> - ?mu:Optimise.Algodiff.A.arr -> - ?var:Optimise.Algodiff.A.arr -> - int -> - neuron_typ
val connect : int array -> neuron_typ -> unit
val init : neuron_typ -> unit
val reset : neuron_typ -> unit
val mktag : int -> neuron_typ -> unit
val mkpar : neuron_typ -> Optimise.Algodiff.t array
val mkpri : neuron_typ -> Optimise.Algodiff.t array
val mkadj : neuron_typ -> Optimise.Algodiff.t array
val update : neuron_typ -> Optimise.Algodiff.t array -> unit
val load_weights : neuron_typ -> Optimise.Algodiff.t array -> unit
val save_weights : neuron_typ -> Optimise.Algodiff.t array
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Optimise/Algodiff/A/Linalg/index.html b/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Optimise/Algodiff/A/Linalg/index.html deleted file mode 100644 index 0b7f701f8..000000000 --- a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Optimise/Algodiff/A/Linalg/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -Linalg (owl-base.Owl_neural_generic.Make_Embedded.Neuron.Optimise.Algodiff.A.Linalg)

Module A.Linalg

val inv : arr -> arr
val logdet : arr -> elt
val chol : ?upper:bool -> arr -> arr
val svd : ?thin:bool -> arr -> arr * arr * arr
val qr : arr -> arr * arr
val lq : arr -> arr * arr
val sylvester : arr -> arr -> arr -> arr
val lyapunov : arr -> arr -> arr
val discrete_lyapunov : - ?solver:[ `bilinear | `default | `direct ] -> - arr -> - arr -> - arr
val linsolve : ?trans:bool -> ?typ:[ `l | `n | `u ] -> arr -> arr -> arr
val care : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
val dare : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Optimise/Algodiff/A/Mat/index.html b/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Optimise/Algodiff/A/Mat/index.html deleted file mode 100644 index fee880d29..000000000 --- a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Optimise/Algodiff/A/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl-base.Owl_neural_generic.Make_Embedded.Neuron.Optimise.Algodiff.A.Mat)

Module A.Mat

val diagm : ?k:int -> arr -> arr
val triu : ?k:int -> arr -> arr
val tril : ?k:int -> arr -> arr
val eye : int -> arr
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Optimise/Algodiff/A/Scalar/index.html b/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Optimise/Algodiff/A/Scalar/index.html deleted file mode 100644 index 3533de423..000000000 --- a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Optimise/Algodiff/A/Scalar/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Scalar (owl-base.Owl_neural_generic.Make_Embedded.Neuron.Optimise.Algodiff.A.Scalar)

Module A.Scalar

val add : elt -> elt -> elt
val sub : elt -> elt -> elt
val mul : elt -> elt -> elt
val div : elt -> elt -> elt
val pow : elt -> elt -> elt
val atan2 : elt -> elt -> elt
val abs : elt -> elt
val neg : elt -> elt
val sqr : elt -> elt
val sqrt : elt -> elt
val exp : elt -> elt
val log : elt -> elt
val log2 : elt -> elt
val log10 : elt -> elt
val signum : elt -> elt
val floor : elt -> elt
val ceil : elt -> elt
val round : elt -> elt
val sin : elt -> elt
val cos : elt -> elt
val tan : elt -> elt
val sinh : elt -> elt
val cosh : elt -> elt
val tanh : elt -> elt
val asin : elt -> elt
val acos : elt -> elt
val atan : elt -> elt
val asinh : elt -> elt
val acosh : elt -> elt
val atanh : elt -> elt
val relu : elt -> elt
val dawsn : elt -> elt
val sigmoid : elt -> elt
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Optimise/Algodiff/A/index.html b/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Optimise/Algodiff/A/index.html deleted file mode 100644 index f0778ba1d..000000000 --- a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Optimise/Algodiff/A/index.html +++ /dev/null @@ -1,160 +0,0 @@ - -A (owl-base.Owl_neural_generic.Make_Embedded.Neuron.Optimise.Algodiff.A)

Module Algodiff.A

val empty : int array -> arr
val zeros : int array -> arr
val ones : int array -> arr
val create : int array -> elt -> arr
val sequential : ?a:elt -> ?step:elt -> int array -> arr
val uniform : ?a:elt -> ?b:elt -> int array -> arr
val gaussian : ?mu:elt -> ?sigma:elt -> int array -> arr
val bernoulli : ?p:elt -> int array -> arr
val init : int array -> (int -> elt) -> arr
val init_nd : int array -> (int array -> elt) -> arr
val shape : arr -> int array
val numel : arr -> int
val get : arr -> int array -> elt
val set : arr -> int array -> elt -> unit
val get_slice : int list list -> arr -> arr
val set_slice : int list list -> arr -> arr -> unit
val get_fancy : Owl_types_common.index list -> arr -> arr
val set_fancy : Owl_types_common.index list -> arr -> arr -> unit
val copy : arr -> arr
val copy_ : out:arr -> arr -> unit
val reset : arr -> unit
val reshape : arr -> int array -> arr
val reverse : arr -> arr
val tile : arr -> int array -> arr
val repeat : arr -> int array -> arr
val concatenate : ?axis:int -> arr array -> arr
val stack : ?axis:int -> arr array -> arr
val split : ?axis:int -> int array -> arr -> arr array
val expand : ?hi:bool -> arr -> int -> arr
val squeeze : ?axis:int array -> arr -> arr
val draw : ?axis:int -> arr -> int -> arr * int array
val map : (elt -> elt) -> arr -> arr
val fold : ?axis:int -> (elt -> elt -> elt) -> elt -> arr -> arr
val scan : ?axis:int -> (elt -> elt -> elt) -> arr -> arr
val one_hot : int -> arr -> arr
val pad : ?v:elt -> int list list -> arr -> arr
val print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(elt -> string) -> - arr -> - unit
val abs : arr -> arr
val neg : arr -> arr
val floor : arr -> arr
val ceil : arr -> arr
val round : arr -> arr
val sqr : arr -> arr
val sqrt : arr -> arr
val log : arr -> arr
val log2 : arr -> arr
val log10 : arr -> arr
val exp : arr -> arr
val sin : arr -> arr
val cos : arr -> arr
val tan : arr -> arr
val sinh : arr -> arr
val cosh : arr -> arr
val tanh : arr -> arr
val asin : arr -> arr
val acos : arr -> arr
val atan : arr -> arr
val asinh : arr -> arr
val acosh : arr -> arr
val atanh : arr -> arr
val min : ?axis:int -> ?keep_dims:bool -> arr -> arr
val max : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum_reduce : ?axis:int array -> arr -> arr
val signum : arr -> arr
val sigmoid : arr -> arr
val relu : arr -> arr
val dawsn : arr -> arr
val min' : arr -> elt
val max' : arr -> elt
val sum' : arr -> elt
val log_sum_exp' : arr -> elt
val log_sum_exp : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm' : arr -> elt
val l2norm' : arr -> elt
val l2norm_sqr' : arr -> elt
val clip_by_value : ?amin:elt -> ?amax:elt -> arr -> arr
val clip_by_l2norm : elt -> arr -> arr
val pow : arr -> arr -> arr
val scalar_pow : elt -> arr -> arr
val pow_scalar : arr -> elt -> arr
val atan2 : arr -> arr -> arr
val scalar_atan2 : elt -> arr -> arr
val atan2_scalar : arr -> elt -> arr
val add : arr -> arr -> arr
val sub : arr -> arr -> arr
val mul : arr -> arr -> arr
val div : arr -> arr -> arr
val add_scalar : arr -> elt -> arr
val sub_scalar : arr -> elt -> arr
val mul_scalar : arr -> elt -> arr
val div_scalar : arr -> elt -> arr
val scalar_add : elt -> arr -> arr
val scalar_sub : elt -> arr -> arr
val scalar_mul : elt -> arr -> arr
val scalar_div : elt -> arr -> arr
val fma : arr -> arr -> arr -> arr
val conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val dilated_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val transpose_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val max_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val upsampling2d : arr -> int array -> arr
val conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val dilated_conv1d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv1d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val transpose_conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val max_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val upsampling2d_backward : arr -> int array -> arr -> arr
val row_num : arr -> int
val col_num : arr -> int
val row : arr -> int -> arr
val rows : arr -> int array -> arr
val copy_row_to : arr -> arr -> int -> unit
val copy_col_to : arr -> arr -> int -> unit
val dot : arr -> arr -> arr
val diag : ?k:int -> arr -> arr
val trace : arr -> elt
val transpose : ?axis:int array -> arr -> arr
val to_rows : arr -> arr array
val of_rows : arr array -> arr
val to_cols : arr -> arr array
val of_cols : arr array -> arr
val of_array : elt array -> int array -> arr
val of_arrays : elt array array -> arr
val float_to_elt : float -> elt
val elt_to_float : elt -> float
val elt_equal : arr -> arr -> arr
val elt_not_equal : arr -> arr -> arr
val elt_less : arr -> arr -> arr
val elt_greater : arr -> arr -> arr
val elt_less_equal : arr -> arr -> arr
val elt_greater_equal : arr -> arr -> arr
val elt_equal_scalar : arr -> elt -> arr
val elt_not_equal_scalar : arr -> elt -> arr
val elt_less_scalar : arr -> elt -> arr
val elt_greater_scalar : arr -> elt -> arr
val elt_less_equal_scalar : arr -> elt -> arr
val elt_greater_equal_scalar : arr -> elt -> arr
module Scalar : sig ... end
module Mat : sig ... end
module Linalg : sig ... end
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Optimise/Algodiff/Arr/index.html b/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Optimise/Algodiff/Arr/index.html deleted file mode 100644 index 8e2e095cb..000000000 --- a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Optimise/Algodiff/Arr/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Arr (owl-base.Owl_neural_generic.Make_Embedded.Neuron.Optimise.Algodiff.Arr)

Module Algodiff.Arr

val empty : int array -> t
val zeros : int array -> t
val ones : int array -> t
val uniform : ?a:A.elt -> ?b:A.elt -> int array -> t
val gaussian : ?mu:A.elt -> ?sigma:A.elt -> int array -> t
val shape : t -> int array
val numel : t -> int
val reset : t -> unit
val reshape : t -> int array -> t
val add : t -> t -> t
val sub : t -> t -> t
val mul : t -> t -> t
val div : t -> t -> t
val dot : t -> t -> t
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Optimise/Algodiff/Builder/index.html b/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Optimise/Algodiff/Builder/index.html deleted file mode 100644 index 837cf8cf4..000000000 --- a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Optimise/Algodiff/Builder/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Builder (owl-base.Owl_neural_generic.Make_Embedded.Neuron.Optimise.Algodiff.Builder)

Module Algodiff.Builder

module type Siso = sig ... end
val build_siso : (module Siso) -> t -> t
module type Sipo = sig ... end
val build_sipo : (module Sipo) -> t -> t * t
module type Sito = sig ... end
val build_sito : (module Sito) -> t -> t * t * t
module type Siao = sig ... end
val build_siao : (module Siao) -> t -> t array
module type Piso = sig ... end
val build_piso : (module Piso) -> t -> t -> t
module type Aiso = sig ... end
val build_aiso : (module Aiso) -> t array -> t
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Optimise/Algodiff/Builder/module-type-Aiso/index.html b/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Optimise/Algodiff/Builder/module-type-Aiso/index.html deleted file mode 100644 index c11ac0247..000000000 --- a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Optimise/Algodiff/Builder/module-type-Aiso/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Aiso (owl-base.Owl_neural_generic.Make_Embedded.Neuron.Optimise.Algodiff.Builder.Aiso)

Module type Builder.Aiso

val label : string
val ff : t array -> t
val df : int list -> t -> t array -> t array -> t
val dr : int list -> t array -> t -> t Stdlib.ref -> t list
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Optimise/Algodiff/Builder/module-type-Piso/index.html b/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Optimise/Algodiff/Builder/module-type-Piso/index.html deleted file mode 100644 index 460c739af..000000000 --- a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Optimise/Algodiff/Builder/module-type-Piso/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Piso (owl-base.Owl_neural_generic.Make_Embedded.Neuron.Optimise.Algodiff.Builder.Piso)

Module type Builder.Piso

val label : string
val ff_aa : A.elt -> A.elt -> t
val ff_ab : A.elt -> A.arr -> t
val ff_ba : A.arr -> A.elt -> t
val ff_bb : A.arr -> A.arr -> t
val df_da : t -> t -> t -> t -> t
val df_db : t -> t -> t -> t -> t
val df_dab : t -> t -> t -> t -> t -> t
val dr_ab : t -> t -> t -> t Stdlib.ref -> t * t
val dr_a : t -> t -> t -> t Stdlib.ref -> t
val dr_b : t -> t -> t -> t Stdlib.ref -> t
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Optimise/Algodiff/Builder/module-type-Siao/index.html b/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Optimise/Algodiff/Builder/module-type-Siao/index.html deleted file mode 100644 index 6a99870ee..000000000 --- a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Optimise/Algodiff/Builder/module-type-Siao/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Siao (owl-base.Owl_neural_generic.Make_Embedded.Neuron.Optimise.Algodiff.Builder.Siao)

Module type Builder.Siao

val label : string
val ff_f : A.elt -> t array
val ff_arr : A.arr -> t array
val df : t array -> t -> t -> t array
val dr : t -> t -> t Stdlib.ref array -> t Stdlib.ref array -> t
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Optimise/Algodiff/Builder/module-type-Sipo/index.html b/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Optimise/Algodiff/Builder/module-type-Sipo/index.html deleted file mode 100644 index cce5e8d84..000000000 --- a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Optimise/Algodiff/Builder/module-type-Sipo/index.html +++ /dev/null @@ -1,7 +0,0 @@ - -Sipo (owl-base.Owl_neural_generic.Make_Embedded.Neuron.Optimise.Algodiff.Builder.Sipo)

Module type Builder.Sipo

val label : string
val ff_f : A.elt -> t * t
val ff_arr : A.arr -> t * t
val df : t -> t -> t -> t
val dr : - t -> - t -> - (t Stdlib.ref * t Stdlib.ref) -> - (t Stdlib.ref * t Stdlib.ref) -> - t
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Optimise/Algodiff/Builder/module-type-Siso/index.html b/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Optimise/Algodiff/Builder/module-type-Siso/index.html deleted file mode 100644 index 02edaf23f..000000000 --- a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Optimise/Algodiff/Builder/module-type-Siso/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Siso (owl-base.Owl_neural_generic.Make_Embedded.Neuron.Optimise.Algodiff.Builder.Siso)

Module type Builder.Siso

val label : string
val ff_f : A.elt -> t
val ff_arr : A.arr -> t
val df : t -> t -> t -> t
val dr : t -> t -> t Stdlib.ref -> t
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Optimise/Algodiff/Builder/module-type-Sito/index.html b/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Optimise/Algodiff/Builder/module-type-Sito/index.html deleted file mode 100644 index 1d45fb46a..000000000 --- a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Optimise/Algodiff/Builder/module-type-Sito/index.html +++ /dev/null @@ -1,7 +0,0 @@ - -Sito (owl-base.Owl_neural_generic.Make_Embedded.Neuron.Optimise.Algodiff.Builder.Sito)

Module type Builder.Sito

val label : string
val ff_f : A.elt -> t * t * t
val ff_arr : A.arr -> t * t * t
val df : t -> t -> t -> t
val dr : - t -> - t -> - (t Stdlib.ref * t Stdlib.ref * t Stdlib.ref) -> - (t Stdlib.ref * t Stdlib.ref * t Stdlib.ref) -> - t
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Optimise/Algodiff/Linalg/index.html b/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Optimise/Algodiff/Linalg/index.html deleted file mode 100644 index ffc1fbc66..000000000 --- a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Optimise/Algodiff/Linalg/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -Linalg (owl-base.Owl_neural_generic.Make_Embedded.Neuron.Optimise.Algodiff.Linalg)

Module Algodiff.Linalg

val inv : t -> t
val logdet : t -> t
val chol : ?upper:bool -> t -> t
val qr : t -> t * t
val lq : t -> t * t
val svd : ?thin:bool -> t -> t * t * t
val sylvester : t -> t -> t -> t
val lyapunov : t -> t -> t
val discrete_lyapunov : - ?solver:[ `bilinear | `default | `direct ] -> - t -> - t -> - t
val (/@) : t -> t -> t
val linsolve : ?trans:bool -> ?typ:[ `l | `n | `u ] -> t -> t -> t
val care : ?diag_r:bool -> t -> t -> t -> t -> t
val dare : ?diag_r:bool -> t -> t -> t -> t -> t
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Optimise/Algodiff/Mat/index.html b/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Optimise/Algodiff/Mat/index.html deleted file mode 100644 index dd24581ec..000000000 --- a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Optimise/Algodiff/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl-base.Owl_neural_generic.Make_Embedded.Neuron.Optimise.Algodiff.Mat)

Module Algodiff.Mat

val empty : int -> int -> t
val zeros : int -> int -> t
val eye : int -> t
val ones : int -> int -> t
val uniform : ?a:A.elt -> ?b:A.elt -> int -> int -> t
val gaussian : ?mu:A.elt -> ?sigma:A.elt -> int -> int -> t
val shape : t -> int * int
val numel : t -> int
val row_num : t -> int
val col_num : t -> int
val reset : t -> unit
val reshape : int -> int -> t -> t
val get : t -> int -> int -> t
val set : t -> int -> int -> t -> t
val row : t -> int -> t
val mean : t -> t
val add : t -> t -> t
val sub : t -> t -> t
val mul : t -> t -> t
val div : t -> t -> t
val dot : t -> t -> t
val map_by_row : (t -> t) -> t -> t
val of_arrays : A.elt array array -> t
val init_2d : int -> int -> (int -> int -> t) -> t
val print : t -> unit
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Optimise/Algodiff/Maths/index.html b/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Optimise/Algodiff/Maths/index.html deleted file mode 100644 index 6c61e472b..000000000 --- a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Optimise/Algodiff/Maths/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Maths (owl-base.Owl_neural_generic.Make_Embedded.Neuron.Optimise.Algodiff.Maths)

Module Algodiff.Maths

val (+) : t -> t -> t
val (-) : t -> t -> t
val (*) : t -> t -> t
val (/) : t -> t -> t
val (*@) : t -> t -> t
val (**) : t -> t -> t
val add : t -> t -> t
val sub : t -> t -> t
val mul : t -> t -> t
val div : t -> t -> t
val kron : t -> t -> t
val dot : t -> t -> t
val pow : t -> t -> t
val atan2 : t -> t -> t
val min2 : t -> t -> t
val max2 : t -> t -> t
val cross_entropy : t -> t -> t
val inv : t -> t
val neg : t -> t
val abs : t -> t
val signum : t -> t
val floor : t -> t
val ceil : t -> t
val round : t -> t
val sqr : t -> t
val sqrt : t -> t
val log : t -> t
val log2 : t -> t
val log10 : t -> t
val exp : t -> t
val sin : t -> t
val cos : t -> t
val tan : t -> t
val sinh : t -> t
val cosh : t -> t
val tanh : t -> t
val asin : t -> t
val acos : t -> t
val atan : t -> t
val asinh : t -> t
val acosh : t -> t
val atanh : t -> t
val sum' : t -> t
val log_sum_exp' : t -> t
val log_sum_exp : ?axis:int -> ?keep_dims:bool -> t -> t
val sum : ?axis:int -> ?keep_dims:bool -> t -> t
val sum_reduce : ?axis:int array -> t -> t
val mean : t -> t
val transpose : ?axis:int array -> t -> t
val swap : int -> int -> t -> t
val l1norm' : t -> t
val l2norm' : t -> t
val l2norm_sqr' : t -> t
val sigmoid : t -> t
val relu : t -> t
val dawsn : t -> t
val softplus : t -> t
val softsign : t -> t
val softmax : ?axis:int -> t -> t
val reshape : t -> int array -> t
val flatten : t -> t
val get_item : t -> int -> int -> t
val get_row : t -> int -> t
val concat : axis:int -> t -> t -> t
val split : axis:int -> int array -> t -> t array
val of_arrays : t array array -> t
val to_arrays : t -> t array array
val concatenate : axis:int -> t array -> t
val stack : axis:int -> t array -> t
val get_slice : int list list -> t -> t
val set_slice : int list list -> t -> t -> t
val get_fancy : Owl_types.index list -> t -> t
val set_fancy : Owl_types.index list -> t -> t -> t
val diag : ?k:int -> t -> t
val diagm : ?k:int -> t -> t
val trace : t -> t
val triu : ?k:int -> t -> t
val tril : ?k:int -> t -> t
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Optimise/Algodiff/NN/index.html b/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Optimise/Algodiff/NN/index.html deleted file mode 100644 index 7dc71fa4f..000000000 --- a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Optimise/Algodiff/NN/index.html +++ /dev/null @@ -1,20 +0,0 @@ - -NN (owl-base.Owl_neural_generic.Make_Embedded.Neuron.Optimise.Algodiff.NN)

Module Algodiff.NN

val dropout : ?rate:float -> t -> t
val conv1d : ?padding:Owl_types.padding -> t -> t -> int array -> t
val conv2d : ?padding:Owl_types.padding -> t -> t -> int array -> t
val conv3d : ?padding:Owl_types.padding -> t -> t -> int array -> t
val dilated_conv1d : - ?padding:Owl_types.padding -> - t -> - t -> - int array -> - int array -> - t
val dilated_conv2d : - ?padding:Owl_types.padding -> - t -> - t -> - int array -> - int array -> - t
val dilated_conv3d : - ?padding:Owl_types.padding -> - t -> - t -> - int array -> - int array -> - t
val transpose_conv1d : ?padding:Owl_types.padding -> t -> t -> int array -> t
val transpose_conv2d : ?padding:Owl_types.padding -> t -> t -> int array -> t
val transpose_conv3d : ?padding:Owl_types.padding -> t -> t -> int array -> t
val max_pool1d : Owl_types.padding -> t -> int array -> int array -> t
val max_pool2d : Owl_types.padding -> t -> int array -> int array -> t
val max_pool3d : Owl_types.padding -> t -> int array -> int array -> t
val avg_pool1d : Owl_types.padding -> t -> int array -> int array -> t
val avg_pool2d : Owl_types.padding -> t -> int array -> int array -> t
val avg_pool3d : Owl_types.padding -> t -> int array -> int array -> t
val upsampling2d : t -> int array -> t
val pad : ?v:A.elt -> int list list -> t -> t
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Optimise/Algodiff/index.html b/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Optimise/Algodiff/index.html deleted file mode 100644 index b1d3ba658..000000000 --- a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Optimise/Algodiff/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Algodiff (owl-base.Owl_neural_generic.Make_Embedded.Neuron.Optimise.Algodiff)

Module Optimise.Algodiff

module A : sig ... end
type t = - Owl_neural_neuron.Make(Owl_optimise_generic.Make(Owl_algodiff_generic.Make(A))).Optimise.Algodiff.t = -
  1. | F of A.elt
  2. | Arr of A.arr
  3. | DF of t * t * int
  4. | DR of t * t Stdlib.ref * op * int Stdlib.ref * int * int Stdlib.ref
and adjoint = t -> t Stdlib.ref -> (t * t) list -> (t * t) list
and register = t list -> t list
and label = string * t list
and op = adjoint * register * label
val tag : unit -> int
val primal : t -> t
val primal' : t -> t
val zero : t -> t
val reset_zero : t -> t
val tangent : t -> t
val adjref : t -> t Stdlib.ref
val adjval : t -> t
val shape : t -> int array
val is_float : t -> bool
val is_arr : t -> bool
val row_num : t -> int
val col_num : t -> int
val numel : t -> int
val clip_by_value : amin:A.elt -> amax:A.elt -> t -> t
val clip_by_l2norm : A.elt -> t -> t
val copy_primal' : t -> t
val tile : t -> int array -> t
val repeat : t -> int array -> t
val pack_elt : A.elt -> t
val unpack_elt : t -> A.elt
val pack_flt : float -> t
val _f : float -> t
val unpack_flt : t -> float
val pack_arr : A.arr -> t
val unpack_arr : t -> A.arr
val deep_info : t -> string
val type_info : t -> string
val error_binop : string -> t -> t -> 'a
val error_uniop : string -> t -> 'a
val make_forward : t -> t -> int -> t
val make_reverse : t -> int -> t
val reverse_prop : t -> t -> unit
val diff : (t -> t) -> t -> t
val diff' : (t -> t) -> t -> t * t
val grad : (t -> t) -> t -> t
val grad' : (t -> t) -> t -> t * t
val jacobian : (t -> t) -> t -> t
val jacobian' : (t -> t) -> t -> t * t
val jacobianv : (t -> t) -> t -> t -> t
val jacobianv' : (t -> t) -> t -> t -> t * t
val jacobianTv : (t -> t) -> t -> t -> t
val jacobianTv' : (t -> t) -> t -> t -> t * t
val hessian : (t -> t) -> t -> t
val hessian' : (t -> t) -> t -> t * t
val hessianv : (t -> t) -> t -> t -> t
val hessianv' : (t -> t) -> t -> t -> t * t
val laplacian : (t -> t) -> t -> t
val laplacian' : (t -> t) -> t -> t * t
val gradhessian : (t -> t) -> t -> t * t
val gradhessian' : (t -> t) -> t -> t * t * t
val gradhessianv : (t -> t) -> t -> t -> t * t
val gradhessianv' : (t -> t) -> t -> t -> t * t * t
module Builder : sig ... end
module Maths : sig ... end
module Linalg : sig ... end
module NN : sig ... end
module Mat : sig ... end
module Arr : sig ... end
val to_trace : t list -> string
val to_dot : t list -> string
val pp_num : Stdlib.Format.formatter -> t -> unit
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Optimise/Batch/index.html b/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Optimise/Batch/index.html deleted file mode 100644 index 8a437d15b..000000000 --- a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Optimise/Batch/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Batch (owl-base.Owl_neural_generic.Make_Embedded.Neuron.Optimise.Batch)

Module Optimise.Batch

val run : typ -> Algodiff.t -> Algodiff.t -> int -> Algodiff.t * Algodiff.t
val batches : typ -> Algodiff.t -> int
val to_string : typ -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Optimise/Checkpoint/index.html b/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Optimise/Checkpoint/index.html deleted file mode 100644 index 4d0ca8a6a..000000000 --- a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Optimise/Checkpoint/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -Checkpoint (owl-base.Owl_neural_generic.Make_Embedded.Neuron.Optimise.Checkpoint)

Module Optimise.Checkpoint

type state = - Owl_neural_neuron.Make(Owl_optimise_generic.Make(Owl_algodiff_generic.Make(A))).Optimise.Checkpoint.state = - {
  1. mutable current_batch : int;
  2. mutable batches_per_epoch : int;
  3. mutable epochs : float;
  4. mutable batches : int;
  5. mutable loss : Algodiff.t array;
  6. mutable start_at : float;
  7. mutable stop : bool;
  8. mutable gs : Algodiff.t array array;
  9. mutable ps : Algodiff.t array array;
  10. mutable us : Algodiff.t array array;
  11. mutable ch : Algodiff.t array array array;
}
type typ = - Owl_neural_neuron.Make(Owl_optimise_generic.Make(Owl_algodiff_generic.Make(A))).Optimise.Checkpoint.typ = -
  1. | Batch of int
  2. | Epoch of float
  3. | Custom of state -> unit
  4. | None
val init_state : int -> float -> state
val default_checkpoint_fun : (string -> 'a) -> 'a
val print_state_info : state -> unit
val print_summary : state -> unit
val run : typ -> (string -> unit) -> int -> Algodiff.t -> state -> unit
val to_string : typ -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Optimise/Clipping/index.html b/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Optimise/Clipping/index.html deleted file mode 100644 index 6fb814733..000000000 --- a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Optimise/Clipping/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Clipping (owl-base.Owl_neural_generic.Make_Embedded.Neuron.Optimise.Clipping)

Module Optimise.Clipping

val run : typ -> Algodiff.t -> Algodiff.t
val default : typ -> typ
val to_string : typ -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Optimise/Gradient/index.html b/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Optimise/Gradient/index.html deleted file mode 100644 index c4fba17fd..000000000 --- a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Optimise/Gradient/index.html +++ /dev/null @@ -1,11 +0,0 @@ - -Gradient (owl-base.Owl_neural_generic.Make_Embedded.Neuron.Optimise.Gradient)

Module Optimise.Gradient

type typ = - Owl_neural_neuron.Make(Owl_optimise_generic.Make(Owl_algodiff_generic.Make(A))).Optimise.Gradient.typ = -
  1. | GD
  2. | CG
  3. | CD
  4. | NonlinearCG
  5. | DaiYuanCG
  6. | NewtonCG
  7. | Newton
val run : - typ -> - (Algodiff.t -> Algodiff.t) -> - Algodiff.t -> - Algodiff.t -> - Algodiff.t -> - Algodiff.t -> - Algodiff.t
val to_string : typ -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Optimise/Learning_Rate/index.html b/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Optimise/Learning_Rate/index.html deleted file mode 100644 index ddd1bce3b..000000000 --- a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Optimise/Learning_Rate/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Learning_Rate (owl-base.Owl_neural_generic.Make_Embedded.Neuron.Optimise.Learning_Rate)

Module Optimise.Learning_Rate

type typ = - Owl_neural_neuron.Make(Owl_optimise_generic.Make(Owl_algodiff_generic.Make(A))).Optimise.Learning_Rate.typ = -
  1. | Adagrad of float
  2. | Const of float
  3. | Decay of float * float
  4. | Exp_decay of float * float
  5. | RMSprop of float * float
  6. | Adam of float * float * float
  7. | Schedule of float array
val run : typ -> int -> Algodiff.t -> Algodiff.t array -> Algodiff.t
val default : typ -> typ
val update_ch : typ -> Algodiff.t -> Algodiff.t array -> Algodiff.t array
val to_string : typ -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Optimise/Loss/index.html b/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Optimise/Loss/index.html deleted file mode 100644 index f97ada9ed..000000000 --- a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Optimise/Loss/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Loss (owl-base.Owl_neural_generic.Make_Embedded.Neuron.Optimise.Loss)

Module Optimise.Loss

type typ = - Owl_neural_neuron.Make(Owl_optimise_generic.Make(Owl_algodiff_generic.Make(A))).Optimise.Loss.typ = -
  1. | Hinge
  2. | L1norm
  3. | L2norm
  4. | Quadratic
  5. | Cross_entropy
  6. | Custom of Algodiff.t -> Algodiff.t -> Algodiff.t
val run : typ -> Algodiff.t -> Algodiff.t -> Algodiff.t
val to_string : typ -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Optimise/Momentum/index.html b/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Optimise/Momentum/index.html deleted file mode 100644 index a5d0c6c02..000000000 --- a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Optimise/Momentum/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Momentum (owl-base.Owl_neural_generic.Make_Embedded.Neuron.Optimise.Momentum)

Module Optimise.Momentum

val run : typ -> Algodiff.t -> Algodiff.t -> Algodiff.t
val default : typ -> typ
val to_string : typ -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Optimise/Params/index.html b/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Optimise/Params/index.html deleted file mode 100644 index fa1c2b15f..000000000 --- a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Optimise/Params/index.html +++ /dev/null @@ -1,16 +0,0 @@ - -Params (owl-base.Owl_neural_generic.Make_Embedded.Neuron.Optimise.Params)

Module Optimise.Params

type typ = - Owl_neural_neuron.Make(Owl_optimise_generic.Make(Owl_algodiff_generic.Make(A))).Optimise.Params.typ = - {
  1. mutable epochs : float;
  2. mutable batch : Batch.typ;
  3. mutable gradient : Gradient.typ;
  4. mutable loss : Loss.typ;
  5. mutable learning_rate : Learning_Rate.typ;
  6. mutable regularisation : Regularisation.typ;
  7. mutable momentum : Momentum.typ;
  8. mutable clipping : Clipping.typ;
  9. mutable stopping : Stopping.typ;
  10. mutable checkpoint : Checkpoint.typ;
  11. mutable verbosity : bool;
}
val default : unit -> typ
val config : - ?batch:Batch.typ -> - ?gradient:Gradient.typ -> - ?loss:Loss.typ -> - ?learning_rate:Learning_Rate.typ -> - ?regularisation:Regularisation.typ -> - ?momentum:Momentum.typ -> - ?clipping:Clipping.typ -> - ?stopping:Stopping.typ -> - ?checkpoint:Checkpoint.typ -> - ?verbosity:bool -> - float -> - typ
val to_string : typ -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Optimise/Regularisation/index.html b/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Optimise/Regularisation/index.html deleted file mode 100644 index c164be4d2..000000000 --- a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Optimise/Regularisation/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Regularisation (owl-base.Owl_neural_generic.Make_Embedded.Neuron.Optimise.Regularisation)

Module Optimise.Regularisation

type typ = - Owl_neural_neuron.Make(Owl_optimise_generic.Make(Owl_algodiff_generic.Make(A))).Optimise.Regularisation.typ = -
  1. | L1norm of float
  2. | L2norm of float
  3. | Elastic_net of float * float
  4. | None
val run : typ -> Algodiff.t -> Algodiff.t
val to_string : typ -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Optimise/Stopping/index.html b/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Optimise/Stopping/index.html deleted file mode 100644 index 1621d3282..000000000 --- a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Optimise/Stopping/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Stopping (owl-base.Owl_neural_generic.Make_Embedded.Neuron.Optimise.Stopping)

Module Optimise.Stopping

val run : typ -> float -> bool
val default : typ -> typ
val to_string : typ -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Optimise/Utils/index.html b/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Optimise/Utils/index.html deleted file mode 100644 index 4a59b1f59..000000000 --- a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Optimise/Utils/index.html +++ /dev/null @@ -1,7 +0,0 @@ - -Utils (owl-base.Owl_neural_generic.Make_Embedded.Neuron.Optimise.Utils)

Module Optimise.Utils

val sample_num : Algodiff.t -> int
val draw_samples : Algodiff.t -> Algodiff.t -> int -> Algodiff.t * Algodiff.t
val get_chunk : - Algodiff.t -> - Algodiff.t -> - int -> - int -> - Algodiff.t * Algodiff.t
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Optimise/index.html b/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Optimise/index.html deleted file mode 100644 index 3a79997d8..000000000 --- a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Optimise/index.html +++ /dev/null @@ -1,31 +0,0 @@ - -Optimise (owl-base.Owl_neural_generic.Make_Embedded.Neuron.Optimise)

Module Neuron.Optimise

module Algodiff : sig ... end
module Utils : sig ... end
module Learning_Rate : sig ... end
module Batch : sig ... end
module Loss : sig ... end
module Gradient : sig ... end
module Momentum : sig ... end
module Regularisation : sig ... end
module Clipping : sig ... end
module Stopping : sig ... end
module Checkpoint : sig ... end
module Params : sig ... end
val minimise_weight : - ?state:Checkpoint.state -> - Params.typ -> - (Algodiff.t -> Algodiff.t -> Algodiff.t) -> - Algodiff.t -> - Algodiff.t -> - Algodiff.t -> - Checkpoint.state * Algodiff.t
val minimise_network : - ?state:Checkpoint.state -> - Params.typ -> - (Algodiff.t -> Algodiff.t * Algodiff.t array array) -> - (Algodiff.t -> Algodiff.t array array * Algodiff.t array array) -> - (Algodiff.t array array -> unit) -> - (string -> unit) -> - Algodiff.t -> - Algodiff.t -> - Checkpoint.state
val minimise_fun : - ?state:Checkpoint.state -> - Params.typ -> - (Algodiff.t -> Algodiff.t) -> - Algodiff.t -> - Checkpoint.state * Algodiff.t
val minimise_compiled_network : - ?state:Checkpoint.state -> - Params.typ -> - (Algodiff.t -> Algodiff.t -> Algodiff.t) -> - (unit -> unit) -> - (string -> unit) -> - Algodiff.t -> - Algodiff.t -> - Checkpoint.state
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Padding1D/index.html b/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Padding1D/index.html deleted file mode 100644 index e2269c875..000000000 --- a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Padding1D/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Padding1D (owl-base.Owl_neural_generic.Make_Embedded.Neuron.Padding1D)

Module Neuron.Padding1D

\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Padding2D/index.html b/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Padding2D/index.html deleted file mode 100644 index 60e876396..000000000 --- a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Padding2D/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Padding2D (owl-base.Owl_neural_generic.Make_Embedded.Neuron.Padding2D)

Module Neuron.Padding2D

type neuron_typ = - Owl_neural_neuron.Make(Owl_optimise_generic.Make(Owl_algodiff_generic.Make(A))).Padding2D.neuron_typ = - {
  1. mutable padding : int array array;
  2. mutable in_shape : int array;
  3. mutable out_shape : int array;
}
val create : int array array -> neuron_typ
val connect : int array -> neuron_typ -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Padding3D/index.html b/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Padding3D/index.html deleted file mode 100644 index 76eac08a2..000000000 --- a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Padding3D/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Padding3D (owl-base.Owl_neural_generic.Make_Embedded.Neuron.Padding3D)

Module Neuron.Padding3D

\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Recurrent/index.html b/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Recurrent/index.html deleted file mode 100644 index a257ab26d..000000000 --- a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Recurrent/index.html +++ /dev/null @@ -1,11 +0,0 @@ - -Recurrent (owl-base.Owl_neural_generic.Make_Embedded.Neuron.Recurrent)

Module Neuron.Recurrent

type neuron_typ = - Owl_neural_neuron.Make(Owl_optimise_generic.Make(Owl_algodiff_generic.Make(A))).Recurrent.neuron_typ = - {
  1. mutable whh : Optimise.Algodiff.t;
  2. mutable wxh : Optimise.Algodiff.t;
  3. mutable why : Optimise.Algodiff.t;
  4. mutable bh : Optimise.Algodiff.t;
  5. mutable by : Optimise.Algodiff.t;
  6. mutable h : Optimise.Algodiff.t;
  7. mutable hiddens : int;
  8. mutable act : Activation.typ;
  9. mutable init_typ : Init.typ;
  10. mutable in_shape : int array;
  11. mutable out_shape : int array;
}
val create : - ?time_steps:int -> - ?inputs:int -> - int -> - int -> - Activation.typ -> - Init.typ -> - neuron_typ
val connect : int array -> neuron_typ -> unit
val init : neuron_typ -> unit
val reset : neuron_typ -> unit
val mktag : int -> neuron_typ -> unit
val mkpar : neuron_typ -> Optimise.Algodiff.t array
val mkpri : neuron_typ -> Optimise.Algodiff.t array
val mkadj : neuron_typ -> Optimise.Algodiff.t array
val update : neuron_typ -> Optimise.Algodiff.t array -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Reshape/index.html b/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Reshape/index.html deleted file mode 100644 index f056e35b9..000000000 --- a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Reshape/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Reshape (owl-base.Owl_neural_generic.Make_Embedded.Neuron.Reshape)

Module Neuron.Reshape

type neuron_typ = - Owl_neural_neuron.Make(Owl_optimise_generic.Make(Owl_algodiff_generic.Make(A))).Reshape.neuron_typ = - {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
}
val create : ?inputs:int array -> int array -> neuron_typ
val connect : int array -> neuron_typ -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Slice/index.html b/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Slice/index.html deleted file mode 100644 index 3e95f4b98..000000000 --- a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/Slice/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Slice (owl-base.Owl_neural_generic.Make_Embedded.Neuron.Slice)

Module Neuron.Slice

type neuron_typ = - Owl_neural_neuron.Make(Owl_optimise_generic.Make(Owl_algodiff_generic.Make(A))).Slice.neuron_typ = - {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
  3. mutable slice : int list list;
}
val create : int list list -> neuron_typ
val connect : int array -> neuron_typ -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/TransposeConv1D/index.html b/owl-base/Owl_neural_generic/Make_Embedded/Neuron/TransposeConv1D/index.html deleted file mode 100644 index 289f89732..000000000 --- a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/TransposeConv1D/index.html +++ /dev/null @@ -1,10 +0,0 @@ - -TransposeConv1D (owl-base.Owl_neural_generic.Make_Embedded.Neuron.TransposeConv1D)

Module Neuron.TransposeConv1D

type neuron_typ = - Owl_neural_neuron.Make(Owl_optimise_generic.Make(Owl_algodiff_generic.Make(A))).TransposeConv1D.neuron_typ = - {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable b : Optimise.Algodiff.t;
  3. mutable kernel : int array;
  4. mutable stride : int array;
  5. mutable padding : Owl_types.padding;
  6. mutable init_typ : Init.typ;
  7. mutable in_shape : int array;
  8. mutable out_shape : int array;
}
val create : - ?inputs:int array -> - Owl_types.padding -> - int array -> - int array -> - Init.typ -> - neuron_typ
val connect : int array -> neuron_typ -> unit
val init : neuron_typ -> unit
val reset : neuron_typ -> unit
val mktag : int -> neuron_typ -> unit
val mkpar : neuron_typ -> Optimise.Algodiff.t array
val mkpri : neuron_typ -> Optimise.Algodiff.t array
val mkadj : neuron_typ -> Optimise.Algodiff.t array
val update : neuron_typ -> Optimise.Algodiff.t array -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/TransposeConv2D/index.html b/owl-base/Owl_neural_generic/Make_Embedded/Neuron/TransposeConv2D/index.html deleted file mode 100644 index ddc7f3926..000000000 --- a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/TransposeConv2D/index.html +++ /dev/null @@ -1,10 +0,0 @@ - -TransposeConv2D (owl-base.Owl_neural_generic.Make_Embedded.Neuron.TransposeConv2D)

Module Neuron.TransposeConv2D

type neuron_typ = - Owl_neural_neuron.Make(Owl_optimise_generic.Make(Owl_algodiff_generic.Make(A))).TransposeConv2D.neuron_typ = - {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable b : Optimise.Algodiff.t;
  3. mutable kernel : int array;
  4. mutable stride : int array;
  5. mutable padding : Owl_types.padding;
  6. mutable init_typ : Init.typ;
  7. mutable in_shape : int array;
  8. mutable out_shape : int array;
}
val create : - ?inputs:int array -> - Owl_types.padding -> - int array -> - int array -> - Init.typ -> - neuron_typ
val connect : int array -> neuron_typ -> unit
val init : neuron_typ -> unit
val reset : neuron_typ -> unit
val mktag : int -> neuron_typ -> unit
val mkpar : neuron_typ -> Optimise.Algodiff.t array
val mkpri : neuron_typ -> Optimise.Algodiff.t array
val mkadj : neuron_typ -> Optimise.Algodiff.t array
val update : neuron_typ -> Optimise.Algodiff.t array -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/TransposeConv3D/index.html b/owl-base/Owl_neural_generic/Make_Embedded/Neuron/TransposeConv3D/index.html deleted file mode 100644 index fac2cf27e..000000000 --- a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/TransposeConv3D/index.html +++ /dev/null @@ -1,10 +0,0 @@ - -TransposeConv3D (owl-base.Owl_neural_generic.Make_Embedded.Neuron.TransposeConv3D)

Module Neuron.TransposeConv3D

type neuron_typ = - Owl_neural_neuron.Make(Owl_optimise_generic.Make(Owl_algodiff_generic.Make(A))).TransposeConv3D.neuron_typ = - {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable b : Optimise.Algodiff.t;
  3. mutable kernel : int array;
  4. mutable stride : int array;
  5. mutable padding : Owl_types.padding;
  6. mutable init_typ : Init.typ;
  7. mutable in_shape : int array;
  8. mutable out_shape : int array;
}
val create : - ?inputs:int array -> - Owl_types.padding -> - int array -> - int array -> - Init.typ -> - neuron_typ
val connect : int array -> neuron_typ -> unit
val init : neuron_typ -> unit
val reset : neuron_typ -> unit
val mktag : int -> neuron_typ -> unit
val mkpar : neuron_typ -> Optimise.Algodiff.t array
val mkpri : neuron_typ -> Optimise.Algodiff.t array
val mkadj : neuron_typ -> Optimise.Algodiff.t array
val update : neuron_typ -> Optimise.Algodiff.t array -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/UpSampling1D/index.html b/owl-base/Owl_neural_generic/Make_Embedded/Neuron/UpSampling1D/index.html deleted file mode 100644 index f042c3be7..000000000 --- a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/UpSampling1D/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -UpSampling1D (owl-base.Owl_neural_generic.Make_Embedded.Neuron.UpSampling1D)

Module Neuron.UpSampling1D

\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/UpSampling2D/index.html b/owl-base/Owl_neural_generic/Make_Embedded/Neuron/UpSampling2D/index.html deleted file mode 100644 index e2fb09be2..000000000 --- a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/UpSampling2D/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -UpSampling2D (owl-base.Owl_neural_generic.Make_Embedded.Neuron.UpSampling2D)

Module Neuron.UpSampling2D

type neuron_typ = - Owl_neural_neuron.Make(Owl_optimise_generic.Make(Owl_algodiff_generic.Make(A))).UpSampling2D.neuron_typ = - {
  1. mutable size : int array;
  2. mutable in_shape : int array;
  3. mutable out_shape : int array;
}
val create : int array -> neuron_typ
val connect : int array -> neuron_typ -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/UpSampling3D/index.html b/owl-base/Owl_neural_generic/Make_Embedded/Neuron/UpSampling3D/index.html deleted file mode 100644 index 111713578..000000000 --- a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/UpSampling3D/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -UpSampling3D (owl-base.Owl_neural_generic.Make_Embedded.Neuron.UpSampling3D)

Module Neuron.UpSampling3D

\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/index.html b/owl-base/Owl_neural_generic/Make_Embedded/Neuron/index.html deleted file mode 100644 index 73ac6fec8..000000000 --- a/owl-base/Owl_neural_generic/Make_Embedded/Neuron/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Neuron (owl-base.Owl_neural_generic.Make_Embedded.Neuron)

Module Make_Embedded.Neuron

module Optimise : sig ... end
module Init : sig ... end
module Input : sig ... end
module Activation : sig ... end
module Linear : sig ... end
module LinearNoBias : sig ... end
module Recurrent : sig ... end
module LSTM : sig ... end
module GRU : sig ... end
module Conv1D : sig ... end
module Conv2D : sig ... end
module Conv3D : sig ... end
module DilatedConv1D : sig ... end
module DilatedConv2D : sig ... end
module DilatedConv3D : sig ... end
module TransposeConv1D : sig ... end
module TransposeConv2D : sig ... end
module TransposeConv3D : sig ... end
module FullyConnected : sig ... end
module MaxPool1D : sig ... end
module MaxPool2D : sig ... end
module AvgPool1D : sig ... end
module AvgPool2D : sig ... end
module GlobalMaxPool1D : sig ... end
module GlobalMaxPool2D : sig ... end
module GlobalAvgPool1D : sig ... end
module GlobalAvgPool2D : sig ... end
module UpSampling1D : sig ... end
module UpSampling2D : sig ... end
module UpSampling3D : sig ... end
module Padding1D : sig ... end
module Padding2D : sig ... end
module Padding3D : sig ... end
module Lambda : sig ... end
module LambdaArray : sig ... end
module Dropout : sig ... end
module Reshape : sig ... end
module Flatten : sig ... end
module Slice : sig ... end
module Add : sig ... end
module Mul : sig ... end
module Dot : sig ... end
module Max : sig ... end
module Average : sig ... end
module Concatenate : sig ... end
module Normalisation : sig ... end
module GaussianNoise : sig ... end
module GaussianDropout : sig ... end
module AlphaDropout : sig ... end
module Embedding : sig ... end
module Masking : sig ... end
type neuron = - Owl_neural_neuron.Make(Owl_optimise_generic.Make(Owl_algodiff_generic.Make(A))).neuron = -
  1. | Input of Input.neuron_typ
  2. | Linear of Linear.neuron_typ
  3. | LinearNoBias of LinearNoBias.neuron_typ
  4. | Embedding of Embedding.neuron_typ
  5. | LSTM of LSTM.neuron_typ
  6. | GRU of GRU.neuron_typ
  7. | Recurrent of Recurrent.neuron_typ
  8. | Conv1D of Conv1D.neuron_typ
  9. | Conv2D of Conv2D.neuron_typ
  10. | Conv3D of Conv3D.neuron_typ
  11. | DilatedConv1D of DilatedConv1D.neuron_typ
  12. | DilatedConv2D of DilatedConv2D.neuron_typ
  13. | DilatedConv3D of DilatedConv3D.neuron_typ
  14. | TransposeConv1D of TransposeConv1D.neuron_typ
  15. | TransposeConv2D of TransposeConv2D.neuron_typ
  16. | TransposeConv3D of TransposeConv3D.neuron_typ
  17. | FullyConnected of FullyConnected.neuron_typ
  18. | MaxPool1D of MaxPool1D.neuron_typ
  19. | MaxPool2D of MaxPool2D.neuron_typ
  20. | AvgPool1D of AvgPool1D.neuron_typ
  21. | AvgPool2D of AvgPool2D.neuron_typ
  22. | GlobalMaxPool1D of GlobalMaxPool1D.neuron_typ
  23. | GlobalMaxPool2D of GlobalMaxPool2D.neuron_typ
  24. | GlobalAvgPool1D of GlobalAvgPool1D.neuron_typ
  25. | GlobalAvgPool2D of GlobalAvgPool2D.neuron_typ
  26. | UpSampling2D of UpSampling2D.neuron_typ
  27. | Padding2D of Padding2D.neuron_typ
  28. | Dropout of Dropout.neuron_typ
  29. | Reshape of Reshape.neuron_typ
  30. | Flatten of Flatten.neuron_typ
  31. | Slice of Slice.neuron_typ
  32. | Lambda of Lambda.neuron_typ
  33. | LambdaArray of LambdaArray.neuron_typ
  34. | Activation of Activation.neuron_typ
  35. | GaussianNoise of GaussianNoise.neuron_typ
  36. | GaussianDropout of GaussianDropout.neuron_typ
  37. | AlphaDropout of AlphaDropout.neuron_typ
  38. | Normalisation of Normalisation.neuron_typ
  39. | Add of Add.neuron_typ
  40. | Mul of Mul.neuron_typ
  41. | Dot of Dot.neuron_typ
  42. | Max of Max.neuron_typ
  43. | Average of Average.neuron_typ
  44. | Concatenate of Concatenate.neuron_typ
val get_in_out_shape : neuron -> int array * int array
val get_in_shape : neuron -> int array
val get_out_shape : neuron -> int array
val connect : int array array -> neuron -> unit
val init : neuron -> unit
val reset : neuron -> unit
val mktag : int -> neuron -> unit
val mkpar : neuron -> Optimise.Algodiff.t array
val mkpri : neuron -> Optimise.Algodiff.t array
val mkadj : neuron -> Optimise.Algodiff.t array
val update : neuron -> Optimise.Algodiff.t array -> unit
val load_weights : neuron -> Optimise.Algodiff.t array -> unit
val save_weights : neuron -> Optimise.Algodiff.t array
val copy : neuron -> neuron
val to_string : neuron -> string
val to_name : neuron -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make_Embedded/argument-1-A/Linalg/index.html b/owl-base/Owl_neural_generic/Make_Embedded/argument-1-A/Linalg/index.html deleted file mode 100644 index 27820ab4a..000000000 --- a/owl-base/Owl_neural_generic/Make_Embedded/argument-1-A/Linalg/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -Linalg (owl-base.Owl_neural_generic.Make_Embedded.A.Linalg)

Module A.Linalg

val inv : arr -> arr
val logdet : arr -> elt
val chol : ?upper:bool -> arr -> arr
val svd : ?thin:bool -> arr -> arr * arr * arr
val qr : arr -> arr * arr
val lq : arr -> arr * arr
val sylvester : arr -> arr -> arr -> arr
val lyapunov : arr -> arr -> arr
val discrete_lyapunov : - ?solver:[ `default | `bilinear | `direct ] -> - arr -> - arr -> - arr
val linsolve : ?trans:bool -> ?typ:[ `n | `u | `l ] -> arr -> arr -> arr
val care : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
val dare : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make_Embedded/argument-1-A/Mat/index.html b/owl-base/Owl_neural_generic/Make_Embedded/argument-1-A/Mat/index.html deleted file mode 100644 index d0af63bee..000000000 --- a/owl-base/Owl_neural_generic/Make_Embedded/argument-1-A/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl-base.Owl_neural_generic.Make_Embedded.A.Mat)

Module A.Mat

val diagm : ?k:int -> arr -> arr
val triu : ?k:int -> arr -> arr
val tril : ?k:int -> arr -> arr
val eye : int -> arr
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make_Embedded/argument-1-A/Scalar/index.html b/owl-base/Owl_neural_generic/Make_Embedded/argument-1-A/Scalar/index.html deleted file mode 100644 index 9a616012c..000000000 --- a/owl-base/Owl_neural_generic/Make_Embedded/argument-1-A/Scalar/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Scalar (owl-base.Owl_neural_generic.Make_Embedded.A.Scalar)

Module A.Scalar

val add : elt -> elt -> elt
val sub : elt -> elt -> elt
val mul : elt -> elt -> elt
val div : elt -> elt -> elt
val pow : elt -> elt -> elt
val atan2 : elt -> elt -> elt
val abs : elt -> elt
val neg : elt -> elt
val sqr : elt -> elt
val sqrt : elt -> elt
val exp : elt -> elt
val log : elt -> elt
val log2 : elt -> elt
val log10 : elt -> elt
val signum : elt -> elt
val floor : elt -> elt
val ceil : elt -> elt
val round : elt -> elt
val sin : elt -> elt
val cos : elt -> elt
val tan : elt -> elt
val sinh : elt -> elt
val cosh : elt -> elt
val tanh : elt -> elt
val asin : elt -> elt
val acos : elt -> elt
val atan : elt -> elt
val asinh : elt -> elt
val acosh : elt -> elt
val atanh : elt -> elt
val relu : elt -> elt
val dawsn : elt -> elt
val sigmoid : elt -> elt
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make_Embedded/argument-1-A/index.html b/owl-base/Owl_neural_generic/Make_Embedded/argument-1-A/index.html deleted file mode 100644 index d7510ff15..000000000 --- a/owl-base/Owl_neural_generic/Make_Embedded/argument-1-A/index.html +++ /dev/null @@ -1,158 +0,0 @@ - -A (owl-base.Owl_neural_generic.Make_Embedded.A)

Parameter Make_Embedded.A

include Owl_types_ndarray_eltcmp.Sig
include Owl_types_ndarray_basic.Sig
type arr
type elt
val empty : int array -> arr
val zeros : int array -> arr
val ones : int array -> arr
val create : int array -> elt -> arr
val sequential : ?a:elt -> ?step:elt -> int array -> arr
val uniform : ?a:elt -> ?b:elt -> int array -> arr
val gaussian : ?mu:elt -> ?sigma:elt -> int array -> arr
val bernoulli : ?p:elt -> int array -> arr
val init : int array -> (int -> elt) -> arr
val init_nd : int array -> (int array -> elt) -> arr
val shape : arr -> int array
val numel : arr -> int
val get : arr -> int array -> elt
val set : arr -> int array -> elt -> unit
val get_slice : int list list -> arr -> arr
val set_slice : int list list -> arr -> arr -> unit
val get_fancy : Owl_types_common.index list -> arr -> arr
val set_fancy : Owl_types_common.index list -> arr -> arr -> unit
val copy : arr -> arr
val copy_ : out:arr -> arr -> unit
val reset : arr -> unit
val reshape : arr -> int array -> arr
val reverse : arr -> arr
val tile : arr -> int array -> arr
val repeat : arr -> int array -> arr
val concatenate : ?axis:int -> arr array -> arr
val stack : ?axis:int -> arr array -> arr
val split : ?axis:int -> int array -> arr -> arr array
val expand : ?hi:bool -> arr -> int -> arr
val squeeze : ?axis:int array -> arr -> arr
val draw : ?axis:int -> arr -> int -> arr * int array
val map : (elt -> elt) -> arr -> arr
val fold : ?axis:int -> (elt -> elt -> elt) -> elt -> arr -> arr
val scan : ?axis:int -> (elt -> elt -> elt) -> arr -> arr
val one_hot : int -> arr -> arr
val pad : ?v:elt -> int list list -> arr -> arr
val print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(elt -> string) -> - arr -> - unit
val abs : arr -> arr
val neg : arr -> arr
val floor : arr -> arr
val ceil : arr -> arr
val round : arr -> arr
val sqr : arr -> arr
val sqrt : arr -> arr
val log : arr -> arr
val log2 : arr -> arr
val log10 : arr -> arr
val exp : arr -> arr
val sin : arr -> arr
val cos : arr -> arr
val tan : arr -> arr
val sinh : arr -> arr
val cosh : arr -> arr
val tanh : arr -> arr
val asin : arr -> arr
val acos : arr -> arr
val atan : arr -> arr
val asinh : arr -> arr
val acosh : arr -> arr
val atanh : arr -> arr
val min : ?axis:int -> ?keep_dims:bool -> arr -> arr
val max : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum_reduce : ?axis:int array -> arr -> arr
val signum : arr -> arr
val sigmoid : arr -> arr
val relu : arr -> arr
val dawsn : arr -> arr
val min' : arr -> elt
val max' : arr -> elt
val sum' : arr -> elt
val log_sum_exp' : arr -> elt
val log_sum_exp : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm' : arr -> elt
val l2norm' : arr -> elt
val l2norm_sqr' : arr -> elt
val clip_by_value : ?amin:elt -> ?amax:elt -> arr -> arr
val clip_by_l2norm : elt -> arr -> arr
val pow : arr -> arr -> arr
val scalar_pow : elt -> arr -> arr
val pow_scalar : arr -> elt -> arr
val atan2 : arr -> arr -> arr
val scalar_atan2 : elt -> arr -> arr
val atan2_scalar : arr -> elt -> arr
val add : arr -> arr -> arr
val sub : arr -> arr -> arr
val mul : arr -> arr -> arr
val div : arr -> arr -> arr
val add_scalar : arr -> elt -> arr
val sub_scalar : arr -> elt -> arr
val mul_scalar : arr -> elt -> arr
val div_scalar : arr -> elt -> arr
val scalar_add : elt -> arr -> arr
val scalar_sub : elt -> arr -> arr
val scalar_mul : elt -> arr -> arr
val scalar_div : elt -> arr -> arr
val fma : arr -> arr -> arr -> arr
val conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val dilated_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val transpose_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val max_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val upsampling2d : arr -> int array -> arr
val conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val dilated_conv1d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv1d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val transpose_conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val max_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val upsampling2d_backward : arr -> int array -> arr -> arr
val row_num : arr -> int
val col_num : arr -> int
val row : arr -> int -> arr
val rows : arr -> int array -> arr
val copy_row_to : arr -> arr -> int -> unit
val copy_col_to : arr -> arr -> int -> unit
val dot : arr -> arr -> arr
val diag : ?k:int -> arr -> arr
val trace : arr -> elt
val transpose : ?axis:int array -> arr -> arr
val to_rows : arr -> arr array
val of_rows : arr array -> arr
val to_cols : arr -> arr array
val of_cols : arr array -> arr
val of_array : elt array -> int array -> arr
val of_arrays : elt array array -> arr
val float_to_elt : float -> elt
val elt_to_float : elt -> float
val elt_equal : arr -> arr -> arr
val elt_not_equal : arr -> arr -> arr
val elt_less : arr -> arr -> arr
val elt_greater : arr -> arr -> arr
val elt_less_equal : arr -> arr -> arr
val elt_greater_equal : arr -> arr -> arr
val elt_equal_scalar : arr -> elt -> arr
val elt_not_equal_scalar : arr -> elt -> arr
val elt_less_scalar : arr -> elt -> arr
val elt_greater_scalar : arr -> elt -> arr
val elt_less_equal_scalar : arr -> elt -> arr
val elt_greater_equal_scalar : arr -> elt -> arr
module Scalar : sig ... end
module Mat : sig ... end
module Linalg : sig ... end
\ No newline at end of file diff --git a/owl-base/Owl_neural_generic/Make_Embedded/index.html b/owl-base/Owl_neural_generic/Make_Embedded/index.html deleted file mode 100644 index 2b074853d..000000000 --- a/owl-base/Owl_neural_generic/Make_Embedded/index.html +++ /dev/null @@ -1,247 +0,0 @@ - -Make_Embedded (owl-base.Owl_neural_generic.Make_Embedded)

Module Owl_neural_generic.Make_Embedded

Parameters

Signature

include sig ... end
module Neuron : sig ... end
type node = - Owl_neural_graph.Make(Owl_neural_neuron.Make(Owl_optimise_generic.Make(Owl_algodiff_generic.Make(A)))).node = - {
  1. mutable name : string;
  2. mutable prev : node array;
  3. mutable next : node array;
  4. mutable neuron : Neuron.neuron;
  5. mutable output : Neuron.Optimise.Algodiff.t option;
  6. mutable network : network;
  7. mutable train : bool;
}
and network = - Owl_neural_graph.Make(Owl_neural_neuron.Make(Owl_optimise_generic.Make(Owl_algodiff_generic.Make(A)))).network = - {
  1. mutable nnid : string;
  2. mutable size : int;
  3. mutable roots : node array;
  4. mutable outputs : node array;
  5. mutable topo : node array;
}
val make_network : ?nnid:string -> int -> node array -> node array -> network
val make_node : - ?name:string -> - ?train:bool -> - node array -> - node array -> - Neuron.neuron -> - Neuron.Optimise.Algodiff.t option -> - network -> - node
val get_roots : network -> node array
val get_outputs : network -> node array
val get_node : network -> string -> node
val get_network : ?name:string -> node -> network
val outputs : ?name:string -> node array -> network
val get_network_name : network -> string
val set_network_name : network -> string -> unit
val input_shape : network -> int array
val input_shapes : network -> int array array
val collect_output : node array -> Neuron.Optimise.Algodiff.t array
val connect_pair : node -> node -> unit
val connect_to_parents : node array -> node -> unit
val add_node : - ?act_typ:Neuron.Activation.typ -> - network -> - node array -> - node -> - node
val init : network -> unit
val reset : network -> unit
val mktag : int -> network -> unit
val mkpar : network -> Neuron.Optimise.Algodiff.t array array
val mkpri : network -> Neuron.Optimise.Algodiff.t array array
val mkadj : network -> Neuron.Optimise.Algodiff.t array array
val update : network -> Neuron.Optimise.Algodiff.t array array -> unit
val run_inputs : - Neuron.Optimise.Algodiff.t array -> - network -> - Neuron.Optimise.Algodiff.t array
val forward_inputs : - network -> - Neuron.Optimise.Algodiff.t array -> - Neuron.Optimise.Algodiff.t array * Neuron.Optimise.Algodiff.t array array
val backward : - network -> - Neuron.Optimise.Algodiff.t -> - Neuron.Optimise.Algodiff.t array array - * Neuron.Optimise.Algodiff.t array array
val copy : network -> network
val _remove_training_nodes : network -> unit
val model_inputs : - network -> - Neuron.Optimise.Algodiff.A.arr array -> - Neuron.Optimise.Algodiff.A.arr array
val input : ?name:string -> int array -> node
val inputs : ?names:string array -> int array array -> node array
val activation : ?name:string -> Neuron.Activation.typ -> node -> node
val linear : - ?name:string -> - ?init_typ:Neuron.Init.typ -> - ?act_typ:Neuron.Activation.typ -> - int -> - node -> - node
val linear_nobias : - ?name:string -> - ?init_typ:Neuron.Init.typ -> - ?act_typ:Neuron.Activation.typ -> - int -> - node -> - node
val embedding : - ?name:string -> - ?init_typ:Neuron.Init.typ -> - ?act_typ:Neuron.Activation.typ -> - int -> - int -> - node -> - node
val recurrent : - ?name:string -> - ?init_typ:Neuron.Init.typ -> - act_typ:Neuron.Activation.typ -> - int -> - int -> - node -> - node
val lstm : ?name:string -> ?init_typ:Neuron.Init.typ -> int -> node -> node
val gru : ?name:string -> ?init_typ:Neuron.Init.typ -> int -> node -> node
val conv1d : - ?name:string -> - ?padding:Owl_types.padding -> - ?init_typ:Neuron.Init.typ -> - ?act_typ:Neuron.Activation.typ -> - int array -> - int array -> - node -> - node
val conv2d : - ?name:string -> - ?padding:Owl_types.padding -> - ?init_typ:Neuron.Init.typ -> - ?act_typ:Neuron.Activation.typ -> - int array -> - int array -> - node -> - node
val conv3d : - ?name:string -> - ?padding:Owl_types.padding -> - ?init_typ:Neuron.Init.typ -> - ?act_typ:Neuron.Activation.typ -> - int array -> - int array -> - node -> - node
val dilated_conv1d : - ?name:string -> - ?padding:Owl_types.padding -> - ?init_typ:Neuron.Init.typ -> - ?act_typ:Neuron.Activation.typ -> - int array -> - int array -> - int array -> - node -> - node
val dilated_conv2d : - ?name:string -> - ?padding:Owl_types.padding -> - ?init_typ:Neuron.Init.typ -> - ?act_typ:Neuron.Activation.typ -> - int array -> - int array -> - int array -> - node -> - node
val dilated_conv3d : - ?name:string -> - ?padding:Owl_types.padding -> - ?init_typ:Neuron.Init.typ -> - ?act_typ:Neuron.Activation.typ -> - int array -> - int array -> - int array -> - node -> - node
val transpose_conv1d : - ?name:string -> - ?padding:Owl_types.padding -> - ?init_typ:Neuron.Init.typ -> - ?act_typ:Neuron.Activation.typ -> - int array -> - int array -> - node -> - node
val transpose_conv2d : - ?name:string -> - ?padding:Owl_types.padding -> - ?init_typ:Neuron.Init.typ -> - ?act_typ:Neuron.Activation.typ -> - int array -> - int array -> - node -> - node
val transpose_conv3d : - ?name:string -> - ?padding:Owl_types.padding -> - ?init_typ:Neuron.Init.typ -> - ?act_typ:Neuron.Activation.typ -> - int array -> - int array -> - node -> - node
val fully_connected : - ?name:string -> - ?init_typ:Neuron.Init.typ -> - ?act_typ:Neuron.Activation.typ -> - int -> - node -> - node
val max_pool1d : - ?name:string -> - ?padding:Owl_types.padding -> - ?act_typ:Neuron.Activation.typ -> - int array -> - int array -> - node -> - node
val max_pool2d : - ?name:string -> - ?padding:Owl_types.padding -> - ?act_typ:Neuron.Activation.typ -> - int array -> - int array -> - node -> - node
val avg_pool1d : - ?name:string -> - ?padding:Owl_types.padding -> - ?act_typ:Neuron.Activation.typ -> - int array -> - int array -> - node -> - node
val avg_pool2d : - ?name:string -> - ?padding:Owl_types.padding -> - ?act_typ:Neuron.Activation.typ -> - int array -> - int array -> - node -> - node
val global_max_pool1d : - ?name:string -> - ?act_typ:Neuron.Activation.typ -> - node -> - node
val global_max_pool2d : - ?name:string -> - ?act_typ:Neuron.Activation.typ -> - node -> - node
val global_avg_pool1d : - ?name:string -> - ?act_typ:Neuron.Activation.typ -> - node -> - node
val global_avg_pool2d : - ?name:string -> - ?act_typ:Neuron.Activation.typ -> - node -> - node
val upsampling2d : - ?name:string -> - ?act_typ:Neuron.Activation.typ -> - int array -> - node -> - node
val padding2d : - ?name:string -> - ?act_typ:Neuron.Activation.typ -> - int array array -> - node -> - node
val dropout : ?name:string -> float -> node -> node
val gaussian_noise : ?name:string -> float -> node -> node
val gaussian_dropout : ?name:string -> float -> node -> node
val alpha_dropout : ?name:string -> float -> node -> node
val normalisation : - ?name:string -> - ?axis:int -> - ?training:bool -> - ?decay:float -> - ?mu:Neuron.Optimise.Algodiff.A.arr -> - ?var:Neuron.Optimise.Algodiff.A.arr -> - node -> - node
val reshape : ?name:string -> int array -> node -> node
val flatten : ?name:string -> node -> node
val slice : ?name:string -> int list list -> node -> node
val lambda : - ?name:string -> - ?act_typ:Neuron.Activation.typ -> - ?out_shape:int array -> - (Neuron.Optimise.Algodiff.t -> Neuron.Optimise.Algodiff.t) -> - node -> - node
val lambda_array : - ?name:string -> - ?act_typ:Neuron.Activation.typ -> - int array -> - (Neuron.Optimise.Algodiff.t array -> Neuron.Optimise.Algodiff.t) -> - node array -> - node
val add : ?name:string -> ?act_typ:Neuron.Activation.typ -> node array -> node
val mul : ?name:string -> ?act_typ:Neuron.Activation.typ -> node array -> node
val dot : ?name:string -> ?act_typ:Neuron.Activation.typ -> node array -> node
val max : ?name:string -> ?act_typ:Neuron.Activation.typ -> node array -> node
val average : - ?name:string -> - ?act_typ:Neuron.Activation.typ -> - node array -> - node
val concatenate : - ?name:string -> - ?act_typ:Neuron.Activation.typ -> - int -> - node array -> - node
val to_string : network -> string
val pp_network : Stdlib.Format.formatter -> network -> unit
val print : network -> unit
val save : ?unsafe:bool -> network -> string -> unit
val load : string -> network
val save_weights : network -> string -> unit
val load_weights : network -> string -> unit
val make_subnetwork : - ?copy:bool -> - ?make_inputs:string array -> - network -> - string array -> - network
\ No newline at end of file diff --git a/owl-base/Owl_neural_graph/.dummy b/owl-base/Owl_neural_graph/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Activation/index.html b/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Activation/index.html deleted file mode 100644 index a79a8d931..000000000 --- a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Activation/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Activation (owl-base.Owl_neural_graph.Make.Neuron.Activation)

Module Neuron.Activation

type typ =
  1. | Elu
  2. | Relu
  3. | Sigmoid
  4. | HardSigmoid
  5. | Softmax of int
  6. | Softplus
  7. | Softsign
  8. | Tanh
  9. | Relu6
  10. | LeakyRelu of float
  11. | TRelu of float
  12. | Custom of Optimise.Algodiff.t -> Optimise.Algodiff.t
  13. | None
    (*

    Types of activation functions.

    *)
type neuron_typ = {
  1. mutable activation : typ;
  2. mutable in_shape : int array;
  3. mutable out_shape : int array;
}

Neuron type definition.

val create : typ -> neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val run_activation : Optimise.Algodiff.t -> typ -> Optimise.Algodiff.t

Run one specific activation function.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val activation_to_string : typ -> string

Return the name of a specific activation function.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Add/index.html b/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Add/index.html deleted file mode 100644 index 9ff50802d..000000000 --- a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Add/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Add (owl-base.Owl_neural_graph.Make.Neuron.Add)

Module Neuron.Add

type neuron_typ = {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
}

Neuron type definition.

val create : unit -> neuron_typ

Create the neuron.

val connect : int array array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val copy : 'a -> neuron_typ

Make a deep copy of the neuron and its parameters.

val run : Optimise.Algodiff.t array -> 'a -> Optimise.Algodiff.t

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/AlphaDropout/index.html b/owl-base/Owl_neural_graph/Make/argument-1-Neuron/AlphaDropout/index.html deleted file mode 100644 index d90dcb7d8..000000000 --- a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/AlphaDropout/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -AlphaDropout (owl-base.Owl_neural_graph.Make.Neuron.AlphaDropout)

Module Neuron.AlphaDropout

type neuron_typ = {
  1. mutable rate : float;
  2. mutable in_shape : int array;
  3. mutable out_shape : int array;
}

Neuron type definition.

val create : float -> neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Average/index.html b/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Average/index.html deleted file mode 100644 index c8afe80da..000000000 --- a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Average/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Average (owl-base.Owl_neural_graph.Make.Neuron.Average)

Module Neuron.Average

type neuron_typ = {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
}

Neuron type definition.

val create : unit -> neuron_typ

Create the neuron.

val connect : int array array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val copy : 'a -> neuron_typ

Make a deep copy of the neuron and its parameters.

val run : Optimise.Algodiff.t array -> 'a -> Optimise.Algodiff.t

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/AvgPool1D/index.html b/owl-base/Owl_neural_graph/Make/argument-1-Neuron/AvgPool1D/index.html deleted file mode 100644 index 4603fafe6..000000000 --- a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/AvgPool1D/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -AvgPool1D (owl-base.Owl_neural_graph.Make.Neuron.AvgPool1D)

Module Neuron.AvgPool1D

type neuron_typ = {
  1. mutable padding : Owl_types.padding;
  2. mutable kernel : int array;
  3. mutable stride : int array;
  4. mutable in_shape : int array;
  5. mutable out_shape : int array;
}

Neuron type definition.

val create : Owl_types.padding -> int array -> int array -> neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/AvgPool2D/index.html b/owl-base/Owl_neural_graph/Make/argument-1-Neuron/AvgPool2D/index.html deleted file mode 100644 index 443397b03..000000000 --- a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/AvgPool2D/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -AvgPool2D (owl-base.Owl_neural_graph.Make.Neuron.AvgPool2D)

Module Neuron.AvgPool2D

type neuron_typ = {
  1. mutable padding : Owl_types.padding;
  2. mutable kernel : int array;
  3. mutable stride : int array;
  4. mutable in_shape : int array;
  5. mutable out_shape : int array;
}

Neuron type definition.

val create : Owl_types.padding -> int array -> int array -> neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Concatenate/index.html b/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Concatenate/index.html deleted file mode 100644 index 624a04374..000000000 --- a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Concatenate/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Concatenate (owl-base.Owl_neural_graph.Make.Neuron.Concatenate)

Module Neuron.Concatenate

type neuron_typ = {
  1. mutable axis : int;
  2. mutable in_shape : int array;
  3. mutable out_shape : int array;
}

Neuron type definition.

val create : int -> neuron_typ

Create the neuron.

val connect : int array array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Conv1D/index.html b/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Conv1D/index.html deleted file mode 100644 index 296ba9275..000000000 --- a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Conv1D/index.html +++ /dev/null @@ -1,8 +0,0 @@ - -Conv1D (owl-base.Owl_neural_graph.Make.Neuron.Conv1D)

Module Neuron.Conv1D

type neuron_typ = {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable b : Optimise.Algodiff.t;
  3. mutable kernel : int array;
  4. mutable stride : int array;
  5. mutable padding : Owl_types.padding;
  6. mutable init_typ : Init.typ;
  7. mutable in_shape : int array;
  8. mutable out_shape : int array;
}

Neuron type definition.

val create : - ?inputs:int array -> - Owl_types.padding -> - int array -> - int array -> - Init.typ -> - neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val init : neuron_typ -> unit

Initialise the neuron and its parameters.

val reset : neuron_typ -> unit

Reset the parameters in a neuron.

val mktag : int -> neuron_typ -> unit

Tag the neuron, used by Algodiff module.

val mkpar : neuron_typ -> Optimise.Algodiff.t array

Assemble all the parameters in an array, used by Optimise module.

val mkpri : neuron_typ -> Optimise.Algodiff.t array

Assemble all the primial values in an array, used by Optimise module.

val mkadj : neuron_typ -> Optimise.Algodiff.t array

Assemble all the adjacent values in an array, used by Optimise module.

val update : neuron_typ -> Optimise.Algodiff.t array -> unit

Update parameters in a neuron, used by Optimise module.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Conv2D/index.html b/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Conv2D/index.html deleted file mode 100644 index e67e2052c..000000000 --- a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Conv2D/index.html +++ /dev/null @@ -1,8 +0,0 @@ - -Conv2D (owl-base.Owl_neural_graph.Make.Neuron.Conv2D)

Module Neuron.Conv2D

type neuron_typ = {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable b : Optimise.Algodiff.t;
  3. mutable kernel : int array;
  4. mutable stride : int array;
  5. mutable padding : Owl_types.padding;
  6. mutable init_typ : Init.typ;
  7. mutable in_shape : int array;
  8. mutable out_shape : int array;
}

Neuron type definition.

val create : - ?inputs:int array -> - Owl_types.padding -> - int array -> - int array -> - Init.typ -> - neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val init : neuron_typ -> unit

Initialise the neuron and its parameters.

val reset : neuron_typ -> unit

Reset the parameters in a neuron.

val mktag : int -> neuron_typ -> unit

Tag the neuron, used by Algodiff module.

val mkpar : neuron_typ -> Optimise.Algodiff.t array

Assemble all the parameters in an array, used by Optimise module.

val mkpri : neuron_typ -> Optimise.Algodiff.t array

Assemble all the primial values in an array, used by Optimise module.

val mkadj : neuron_typ -> Optimise.Algodiff.t array

Assemble all the adjacent values in an array, used by Optimise module.

val update : neuron_typ -> Optimise.Algodiff.t array -> unit

Update parameters in a neuron, used by Optimise module.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Conv3D/index.html b/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Conv3D/index.html deleted file mode 100644 index a8816cc51..000000000 --- a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Conv3D/index.html +++ /dev/null @@ -1,8 +0,0 @@ - -Conv3D (owl-base.Owl_neural_graph.Make.Neuron.Conv3D)

Module Neuron.Conv3D

type neuron_typ = {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable b : Optimise.Algodiff.t;
  3. mutable kernel : int array;
  4. mutable stride : int array;
  5. mutable padding : Owl_types.padding;
  6. mutable init_typ : Init.typ;
  7. mutable in_shape : int array;
  8. mutable out_shape : int array;
}

Neuron type definition.

val create : - ?inputs:int array -> - Owl_types.padding -> - int array -> - int array -> - Init.typ -> - neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val init : neuron_typ -> unit

Initialise the neuron and its parameters.

val reset : neuron_typ -> unit

Reset the parameters in a neuron.

val mktag : int -> neuron_typ -> unit

Tag the neuron, used by Algodiff module.

val mkpar : neuron_typ -> Optimise.Algodiff.t array

Assemble all the parameters in an array, used by Optimise module.

val mkpri : neuron_typ -> Optimise.Algodiff.t array

Assemble all the primial values in an array, used by Optimise module.

val mkadj : neuron_typ -> Optimise.Algodiff.t array

Assemble all the adjacent values in an array, used by Optimise module.

val update : neuron_typ -> Optimise.Algodiff.t array -> unit

Update parameters in a neuron, used by Optimise module.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/DilatedConv1D/index.html b/owl-base/Owl_neural_graph/Make/argument-1-Neuron/DilatedConv1D/index.html deleted file mode 100644 index a5b6611cd..000000000 --- a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/DilatedConv1D/index.html +++ /dev/null @@ -1,9 +0,0 @@ - -DilatedConv1D (owl-base.Owl_neural_graph.Make.Neuron.DilatedConv1D)

Module Neuron.DilatedConv1D

type neuron_typ = {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable b : Optimise.Algodiff.t;
  3. mutable kernel : int array;
  4. mutable stride : int array;
  5. mutable rate : int array;
  6. mutable padding : Owl_types.padding;
  7. mutable init_typ : Init.typ;
  8. mutable in_shape : int array;
  9. mutable out_shape : int array;
}

Neuron type definition.

val create : - ?inputs:int array -> - Owl_types.padding -> - int array -> - int array -> - int array -> - Init.typ -> - neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val init : neuron_typ -> unit

Initialise the neuron and its parameters.

val reset : neuron_typ -> unit

Reset the parameters in a neuron.

val mktag : int -> neuron_typ -> unit

Tag the neuron, used by Algodiff module.

val mkpar : neuron_typ -> Optimise.Algodiff.t array

Assemble all the parameters in an array, used by Optimise module.

val mkpri : neuron_typ -> Optimise.Algodiff.t array

Assemble all the primial values in an array, used by Optimise module.

val mkadj : neuron_typ -> Optimise.Algodiff.t array

Assemble all the adjacent values in an array, used by Optimise module.

val update : neuron_typ -> Optimise.Algodiff.t array -> unit

Update parameters in a neuron, used by Optimise module.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/DilatedConv2D/index.html b/owl-base/Owl_neural_graph/Make/argument-1-Neuron/DilatedConv2D/index.html deleted file mode 100644 index c49701dbf..000000000 --- a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/DilatedConv2D/index.html +++ /dev/null @@ -1,9 +0,0 @@ - -DilatedConv2D (owl-base.Owl_neural_graph.Make.Neuron.DilatedConv2D)

Module Neuron.DilatedConv2D

type neuron_typ = {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable b : Optimise.Algodiff.t;
  3. mutable kernel : int array;
  4. mutable stride : int array;
  5. mutable rate : int array;
  6. mutable padding : Owl_types.padding;
  7. mutable init_typ : Init.typ;
  8. mutable in_shape : int array;
  9. mutable out_shape : int array;
}

Neuron type definition.

val create : - ?inputs:int array -> - Owl_types.padding -> - int array -> - int array -> - int array -> - Init.typ -> - neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val init : neuron_typ -> unit

Initialise the neuron and its parameters.

val reset : neuron_typ -> unit

Reset the parameters in a neuron.

val mktag : int -> neuron_typ -> unit

Tag the neuron, used by Algodiff module.

val mkpar : neuron_typ -> Optimise.Algodiff.t array

Assemble all the parameters in an array, used by Optimise module.

val mkpri : neuron_typ -> Optimise.Algodiff.t array

Assemble all the primial values in an array, used by Optimise module.

val mkadj : neuron_typ -> Optimise.Algodiff.t array

Assemble all the adjacent values in an array, used by Optimise module.

val update : neuron_typ -> Optimise.Algodiff.t array -> unit

Update parameters in a neuron, used by Optimise module.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/DilatedConv3D/index.html b/owl-base/Owl_neural_graph/Make/argument-1-Neuron/DilatedConv3D/index.html deleted file mode 100644 index 770e3278f..000000000 --- a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/DilatedConv3D/index.html +++ /dev/null @@ -1,9 +0,0 @@ - -DilatedConv3D (owl-base.Owl_neural_graph.Make.Neuron.DilatedConv3D)

Module Neuron.DilatedConv3D

type neuron_typ = {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable b : Optimise.Algodiff.t;
  3. mutable kernel : int array;
  4. mutable stride : int array;
  5. mutable rate : int array;
  6. mutable padding : Owl_types.padding;
  7. mutable init_typ : Init.typ;
  8. mutable in_shape : int array;
  9. mutable out_shape : int array;
}

Neuron type definition.

val create : - ?inputs:int array -> - Owl_types.padding -> - int array -> - int array -> - int array -> - Init.typ -> - neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val init : neuron_typ -> unit

Initialise the neuron and its parameters.

val reset : neuron_typ -> unit

Reset the parameters in a neuron.

val mktag : int -> neuron_typ -> unit

Tag the neuron, used by Algodiff module.

val mkpar : neuron_typ -> Optimise.Algodiff.t array

Assemble all the parameters in an array, used by Optimise module.

val mkpri : neuron_typ -> Optimise.Algodiff.t array

Assemble all the primial values in an array, used by Optimise module.

val mkadj : neuron_typ -> Optimise.Algodiff.t array

Assemble all the adjacent values in an array, used by Optimise module.

val update : neuron_typ -> Optimise.Algodiff.t array -> unit

Update parameters in a neuron, used by Optimise module.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Dot/index.html b/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Dot/index.html deleted file mode 100644 index 801a2f012..000000000 --- a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Dot/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Dot (owl-base.Owl_neural_graph.Make.Neuron.Dot)

Module Neuron.Dot

type neuron_typ = {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
}

Neuron type definition.

val create : unit -> neuron_typ

Create the neuron.

val connect : int array array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val copy : 'a -> neuron_typ

Make a deep copy of the neuron and its parameters.

val run : Optimise.Algodiff.t array -> 'a -> Optimise.Algodiff.t

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Dropout/index.html b/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Dropout/index.html deleted file mode 100644 index f0d2ebeb3..000000000 --- a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Dropout/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Dropout (owl-base.Owl_neural_graph.Make.Neuron.Dropout)

Module Neuron.Dropout

type neuron_typ = {
  1. mutable rate : float;
  2. mutable in_shape : int array;
  3. mutable out_shape : int array;
}

Neuron type definition.

val create : float -> neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Embedding/index.html b/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Embedding/index.html deleted file mode 100644 index d28a469cd..000000000 --- a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Embedding/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Embedding (owl-base.Owl_neural_graph.Make.Neuron.Embedding)

Module Neuron.Embedding

type neuron_typ = {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable init_typ : Init.typ;
  3. mutable in_dim : int;
  4. mutable in_shape : int array;
  5. mutable out_shape : int array;
}

Neuron type definition.

val create : ?inputs:int -> int -> int -> Init.typ -> neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val init : neuron_typ -> unit

Initialise the neuron and its parameters.

val reset : neuron_typ -> unit

Reset the parameters in a neuron.

val mktag : int -> neuron_typ -> unit

Tag the neuron, used by Algodiff module.

val mkpar : neuron_typ -> Optimise.Algodiff.t array

Assemble all the parameters in an array, used by Optimise module.

val mkpri : neuron_typ -> Optimise.Algodiff.t array

Assemble all the primial values in an array, used by Optimise module.

val mkadj : neuron_typ -> Optimise.Algodiff.t array

Assemble all the adjacent values in an array, used by Optimise module.

val update : neuron_typ -> Optimise.Algodiff.t array -> unit

Update parameters in a neuron, used by Optimise module.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Flatten/index.html b/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Flatten/index.html deleted file mode 100644 index 698c2270d..000000000 --- a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Flatten/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Flatten (owl-base.Owl_neural_graph.Make.Neuron.Flatten)

Module Neuron.Flatten

type neuron_typ = {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
}

Neuron type definition.

val create : unit -> neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val copy : 'a -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/FullyConnected/index.html b/owl-base/Owl_neural_graph/Make/argument-1-Neuron/FullyConnected/index.html deleted file mode 100644 index 8b33d6060..000000000 --- a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/FullyConnected/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -FullyConnected (owl-base.Owl_neural_graph.Make.Neuron.FullyConnected)

Module Neuron.FullyConnected

type neuron_typ = {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable b : Optimise.Algodiff.t;
  3. mutable init_typ : Init.typ;
  4. mutable in_shape : int array;
  5. mutable out_shape : int array;
}

Neuron type definition.

val create : ?inputs:int -> int -> Init.typ -> neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val init : neuron_typ -> unit

Initialise the neuron and its parameters.

val reset : neuron_typ -> unit

Reset the parameters in a neuron.

val mktag : int -> neuron_typ -> unit

Tag the neuron, used by Algodiff module.

val mkpar : neuron_typ -> Optimise.Algodiff.t array

Assemble all the parameters in an array, used by Optimise module.

val mkpri : neuron_typ -> Optimise.Algodiff.t array

Assemble all the primial values in an array, used by Optimise module.

val mkadj : neuron_typ -> Optimise.Algodiff.t array

Assemble all the adjacent values in an array, used by Optimise module.

val update : neuron_typ -> Optimise.Algodiff.t array -> unit

Update parameters in a neuron, used by Optimise module.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/GRU/index.html b/owl-base/Owl_neural_graph/Make/argument-1-Neuron/GRU/index.html deleted file mode 100644 index c80e1a7e2..000000000 --- a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/GRU/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -GRU (owl-base.Owl_neural_graph.Make.Neuron.GRU)

Module Neuron.GRU

type neuron_typ = {
  1. mutable wxz : Optimise.Algodiff.t;
  2. mutable whz : Optimise.Algodiff.t;
  3. mutable wxr : Optimise.Algodiff.t;
  4. mutable whr : Optimise.Algodiff.t;
  5. mutable wxh : Optimise.Algodiff.t;
  6. mutable whh : Optimise.Algodiff.t;
  7. mutable bz : Optimise.Algodiff.t;
  8. mutable br : Optimise.Algodiff.t;
  9. mutable bh : Optimise.Algodiff.t;
  10. mutable h : Optimise.Algodiff.t;
  11. mutable init_typ : Init.typ;
  12. mutable in_shape : int array;
  13. mutable out_shape : int array;
}

Neuron type definition.

val create : ?time_steps:int -> ?inputs:int -> int -> Init.typ -> neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val init : neuron_typ -> unit

Initialise the neuron and its parameters.

val reset : neuron_typ -> unit

Reset the parameters in a neuron.

val mktag : int -> neuron_typ -> unit

Tag the neuron, used by Algodiff module.

val mkpar : neuron_typ -> Optimise.Algodiff.t array

Assemble all the parameters in an array, used by Optimise module.

val mkpri : neuron_typ -> Optimise.Algodiff.t array

Assemble all the primial values in an array, used by Optimise module.

val mkadj : neuron_typ -> Optimise.Algodiff.t array

Assemble all the adjacent values in an array, used by Optimise module.

val update : neuron_typ -> Optimise.Algodiff.t array -> unit

Update parameters in a neuron, used by Optimise module.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/GaussianDropout/index.html b/owl-base/Owl_neural_graph/Make/argument-1-Neuron/GaussianDropout/index.html deleted file mode 100644 index 3885111fb..000000000 --- a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/GaussianDropout/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -GaussianDropout (owl-base.Owl_neural_graph.Make.Neuron.GaussianDropout)

Module Neuron.GaussianDropout

type neuron_typ = {
  1. mutable rate : float;
  2. mutable in_shape : int array;
  3. mutable out_shape : int array;
}

Neuron type definition.

val create : float -> neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/GaussianNoise/index.html b/owl-base/Owl_neural_graph/Make/argument-1-Neuron/GaussianNoise/index.html deleted file mode 100644 index e67c79268..000000000 --- a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/GaussianNoise/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -GaussianNoise (owl-base.Owl_neural_graph.Make.Neuron.GaussianNoise)

Module Neuron.GaussianNoise

type neuron_typ = {
  1. mutable sigma : float;
  2. mutable in_shape : int array;
  3. mutable out_shape : int array;
}

Neuron type definition.

val create : float -> neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/GlobalAvgPool1D/index.html b/owl-base/Owl_neural_graph/Make/argument-1-Neuron/GlobalAvgPool1D/index.html deleted file mode 100644 index e69e1fa12..000000000 --- a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/GlobalAvgPool1D/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -GlobalAvgPool1D (owl-base.Owl_neural_graph.Make.Neuron.GlobalAvgPool1D)

Module Neuron.GlobalAvgPool1D

type neuron_typ = {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
}

Neuron type definition.

val create : unit -> neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val copy : 'a -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/GlobalAvgPool2D/index.html b/owl-base/Owl_neural_graph/Make/argument-1-Neuron/GlobalAvgPool2D/index.html deleted file mode 100644 index d0961b67d..000000000 --- a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/GlobalAvgPool2D/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -GlobalAvgPool2D (owl-base.Owl_neural_graph.Make.Neuron.GlobalAvgPool2D)

Module Neuron.GlobalAvgPool2D

type neuron_typ = {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
}

Neuron type definition.

val create : unit -> neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val copy : 'a -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/GlobalMaxPool1D/index.html b/owl-base/Owl_neural_graph/Make/argument-1-Neuron/GlobalMaxPool1D/index.html deleted file mode 100644 index e0f193a1f..000000000 --- a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/GlobalMaxPool1D/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -GlobalMaxPool1D (owl-base.Owl_neural_graph.Make.Neuron.GlobalMaxPool1D)

Module Neuron.GlobalMaxPool1D

type neuron_typ = {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
}

Neuron type definition.

val create : unit -> neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val copy : 'a -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/GlobalMaxPool2D/index.html b/owl-base/Owl_neural_graph/Make/argument-1-Neuron/GlobalMaxPool2D/index.html deleted file mode 100644 index fd87b5d04..000000000 --- a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/GlobalMaxPool2D/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -GlobalMaxPool2D (owl-base.Owl_neural_graph.Make.Neuron.GlobalMaxPool2D)

Module Neuron.GlobalMaxPool2D

type neuron_typ = {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
}

Neuron type definition.

val create : unit -> neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val copy : 'a -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Init/index.html b/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Init/index.html deleted file mode 100644 index 0cf981481..000000000 --- a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Init/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Init (owl-base.Owl_neural_graph.Make.Neuron.Init)

Module Neuron.Init

type typ =
  1. | Uniform of float * float
  2. | Gaussian of float * float
  3. | Standard
  4. | Tanh
  5. | GlorotNormal
  6. | GlorotUniform
  7. | LecunNormal
  8. | HeNormal
  9. | Custom of int array -> Optimise.Algodiff.t
    (*

    Initialisation types

    *)
val calc_fans : int array -> float * float

Calculate fan-in and fan-out of weights.

val run : typ -> int array -> Optimise.Algodiff.t -> Optimise.Algodiff.t

Execute the computation in this neuron.

val to_string : typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Input/index.html b/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Input/index.html deleted file mode 100644 index a216c41f4..000000000 --- a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Input/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Input (owl-base.Owl_neural_graph.Make.Neuron.Input)

Module Neuron.Input

type neuron_typ = {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
}

Neuron type definition.

val create : int array -> neuron_typ

Create the neuron.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/LSTM/index.html b/owl-base/Owl_neural_graph/Make/argument-1-Neuron/LSTM/index.html deleted file mode 100644 index c1dac4790..000000000 --- a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/LSTM/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -LSTM (owl-base.Owl_neural_graph.Make.Neuron.LSTM)

Module Neuron.LSTM

type neuron_typ = {
  1. mutable wxi : Optimise.Algodiff.t;
  2. mutable whi : Optimise.Algodiff.t;
  3. mutable wxc : Optimise.Algodiff.t;
  4. mutable whc : Optimise.Algodiff.t;
  5. mutable wxf : Optimise.Algodiff.t;
  6. mutable whf : Optimise.Algodiff.t;
  7. mutable wxo : Optimise.Algodiff.t;
  8. mutable who : Optimise.Algodiff.t;
  9. mutable bi : Optimise.Algodiff.t;
  10. mutable bc : Optimise.Algodiff.t;
  11. mutable bf : Optimise.Algodiff.t;
  12. mutable bo : Optimise.Algodiff.t;
  13. mutable c : Optimise.Algodiff.t;
  14. mutable h : Optimise.Algodiff.t;
  15. mutable init_typ : Init.typ;
  16. mutable in_shape : int array;
  17. mutable out_shape : int array;
}

Neuron type definition.

val create : ?time_steps:int -> ?inputs:int -> int -> Init.typ -> neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val init : neuron_typ -> unit

Initialise the neuron and its parameters.

val reset : neuron_typ -> unit

Reset the parameters in a neuron.

val mktag : int -> neuron_typ -> unit

Tag the neuron, used by Algodiff module.

val mkpar : neuron_typ -> Optimise.Algodiff.t array

Assemble all the parameters in an array, used by Optimise module.

val mkpri : neuron_typ -> Optimise.Algodiff.t array

Assemble all the primial values in an array, used by Optimise module.

val mkadj : neuron_typ -> Optimise.Algodiff.t array

Assemble all the adjacent values in an array, used by Optimise module.

val update : neuron_typ -> Optimise.Algodiff.t array -> unit

Update parameters in a neuron, used by Optimise module.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Lambda/index.html b/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Lambda/index.html deleted file mode 100644 index 87bcfa3b7..000000000 --- a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Lambda/index.html +++ /dev/null @@ -1,5 +0,0 @@ - -Lambda (owl-base.Owl_neural_graph.Make.Neuron.Lambda)

Module Neuron.Lambda

type neuron_typ = {
  1. mutable lambda : Optimise.Algodiff.t -> Optimise.Algodiff.t;
  2. mutable in_shape : int array;
  3. mutable out_shape : int array;
}

Neuron type definition.

val create : - ?out_shape:int array -> - (Optimise.Algodiff.t -> Optimise.Algodiff.t) -> - neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/LambdaArray/index.html b/owl-base/Owl_neural_graph/Make/argument-1-Neuron/LambdaArray/index.html deleted file mode 100644 index 392f88d52..000000000 --- a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/LambdaArray/index.html +++ /dev/null @@ -1,5 +0,0 @@ - -LambdaArray (owl-base.Owl_neural_graph.Make.Neuron.LambdaArray)

Module Neuron.LambdaArray

type neuron_typ = {
  1. mutable lambda : Optimise.Algodiff.t array -> Optimise.Algodiff.t;
  2. mutable in_shape : int array;
  3. mutable out_shape : int array;
}

Neuron type definition.

val create : - int array -> - (Optimise.Algodiff.t array -> Optimise.Algodiff.t) -> - neuron_typ

Create the neuron.

val connect : int array array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Linear/index.html b/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Linear/index.html deleted file mode 100644 index 39cbd260d..000000000 --- a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Linear/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Linear (owl-base.Owl_neural_graph.Make.Neuron.Linear)

Module Neuron.Linear

type neuron_typ = {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable b : Optimise.Algodiff.t;
  3. mutable init_typ : Init.typ;
  4. mutable in_shape : int array;
  5. mutable out_shape : int array;
}

Neuron type definition.

val create : ?inputs:int -> int -> Init.typ -> neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val init : neuron_typ -> unit

Initialise the neuron and its parameters.

val reset : neuron_typ -> unit

Reset the parameters in a neuron.

val mktag : int -> neuron_typ -> unit

Tag the neuron, used by Algodiff module.

val mkpar : neuron_typ -> Optimise.Algodiff.t array

Assemble all the parameters in an array, used by Optimise module.

val mkpri : neuron_typ -> Optimise.Algodiff.t array

Assemble all the primial values in an array, used by Optimise module.

val mkadj : neuron_typ -> Optimise.Algodiff.t array

Assemble all the adjacent values in an array, used by Optimise module.

val update : neuron_typ -> Optimise.Algodiff.t array -> unit

Update parameters in a neuron, used by Optimise module.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/LinearNoBias/index.html b/owl-base/Owl_neural_graph/Make/argument-1-Neuron/LinearNoBias/index.html deleted file mode 100644 index 3ccce410f..000000000 --- a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/LinearNoBias/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -LinearNoBias (owl-base.Owl_neural_graph.Make.Neuron.LinearNoBias)

Module Neuron.LinearNoBias

type neuron_typ = {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable init_typ : Init.typ;
  3. mutable in_shape : int array;
  4. mutable out_shape : int array;
}

Neuron type definition.

val create : ?inputs:int -> int -> Init.typ -> neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val init : neuron_typ -> unit

Initialise the neuron and its parameters.

val reset : neuron_typ -> unit

Reset the parameters in a neuron.

val mktag : int -> neuron_typ -> unit

Tag the neuron, used by Algodiff module.

val mkpar : neuron_typ -> Optimise.Algodiff.t array

Assemble all the parameters in an array, used by Optimise module.

val mkpri : neuron_typ -> Optimise.Algodiff.t array

Assemble all the primial values in an array, used by Optimise module.

val mkadj : neuron_typ -> Optimise.Algodiff.t array

Assemble all the adjacent values in an array, used by Optimise module.

val update : neuron_typ -> Optimise.Algodiff.t array -> unit

Update parameters in a neuron, used by Optimise module.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Masking/index.html b/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Masking/index.html deleted file mode 100644 index d29f6d891..000000000 --- a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Masking/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Masking (owl-base.Owl_neural_graph.Make.Neuron.Masking)

Module Neuron.Masking

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Max/index.html b/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Max/index.html deleted file mode 100644 index 3d563ce31..000000000 --- a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Max/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Max (owl-base.Owl_neural_graph.Make.Neuron.Max)

Module Neuron.Max

type neuron_typ = {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
}

Neuron type definition.

val create : unit -> neuron_typ

Create the neuron.

val connect : int array array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val copy : 'a -> neuron_typ

Make a deep copy of the neuron and its parameters.

val run : Optimise.Algodiff.t array -> 'a -> Optimise.Algodiff.t

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/MaxPool1D/index.html b/owl-base/Owl_neural_graph/Make/argument-1-Neuron/MaxPool1D/index.html deleted file mode 100644 index b116ea3db..000000000 --- a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/MaxPool1D/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -MaxPool1D (owl-base.Owl_neural_graph.Make.Neuron.MaxPool1D)

Module Neuron.MaxPool1D

type neuron_typ = {
  1. mutable padding : Owl_types.padding;
  2. mutable kernel : int array;
  3. mutable stride : int array;
  4. mutable in_shape : int array;
  5. mutable out_shape : int array;
}

Neuron type definition.

val create : Owl_types.padding -> int array -> int array -> neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/MaxPool2D/index.html b/owl-base/Owl_neural_graph/Make/argument-1-Neuron/MaxPool2D/index.html deleted file mode 100644 index 8a2e71a39..000000000 --- a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/MaxPool2D/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -MaxPool2D (owl-base.Owl_neural_graph.Make.Neuron.MaxPool2D)

Module Neuron.MaxPool2D

type neuron_typ = {
  1. mutable padding : Owl_types.padding;
  2. mutable kernel : int array;
  3. mutable stride : int array;
  4. mutable in_shape : int array;
  5. mutable out_shape : int array;
}

Neuron type definition.

val create : Owl_types.padding -> int array -> int array -> neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Mul/index.html b/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Mul/index.html deleted file mode 100644 index cc5760cb7..000000000 --- a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Mul/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mul (owl-base.Owl_neural_graph.Make.Neuron.Mul)

Module Neuron.Mul

type neuron_typ = {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
}

Neuron type definition.

val create : unit -> neuron_typ

Create the neuron.

val connect : int array array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val copy : 'a -> neuron_typ

Make a deep copy of the neuron and its parameters.

val run : Optimise.Algodiff.t array -> 'a -> Optimise.Algodiff.t

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Normalisation/index.html b/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Normalisation/index.html deleted file mode 100644 index ce9b8f935..000000000 --- a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Normalisation/index.html +++ /dev/null @@ -1,8 +0,0 @@ - -Normalisation (owl-base.Owl_neural_graph.Make.Neuron.Normalisation)

Module Neuron.Normalisation

type neuron_typ = {
  1. mutable axis : int;
  2. mutable beta : Optimise.Algodiff.t;
  3. mutable gamma : Optimise.Algodiff.t;
  4. mutable mu : Optimise.Algodiff.t;
  5. mutable var : Optimise.Algodiff.t;
  6. mutable decay : Optimise.Algodiff.t;
  7. mutable training : bool;
  8. mutable in_shape : int array;
  9. mutable out_shape : int array;
}

Neuron type definition.

val create : - ?training:bool -> - ?decay:float -> - ?mu:Optimise.Algodiff.A.arr -> - ?var:Optimise.Algodiff.A.arr -> - int -> - neuron_typ

Create the neuron. Note that axis 0 is the batch axis.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val init : neuron_typ -> unit

Initialise the neuron and its parameters.

val reset : neuron_typ -> unit

Reset the parameters in a neuron.

val mktag : int -> neuron_typ -> unit

Tag the neuron, used by Algodiff module.

val mkpar : neuron_typ -> Optimise.Algodiff.t array

Assemble all the trainable parameters in an array, used by Optimise module.

val mkpri : neuron_typ -> Optimise.Algodiff.t array

Assemble all the primial values in an array, used by Optimise module.

val mkadj : neuron_typ -> Optimise.Algodiff.t array

Assemble all the adjacent values in an array, used by Optimise module.

val update : neuron_typ -> Optimise.Algodiff.t array -> unit

Update trainable parameters of the neuron, used by Optimise module.

val load_weights : neuron_typ -> Optimise.Algodiff.t array -> unit

Load both trainable and non-trainable parameters into the neuron.

val save_weights : neuron_typ -> Optimise.Algodiff.t array

Assemble both trainable and non-trainable parameters of the neuron.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Optimise/Algodiff/A/Linalg/index.html b/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Optimise/Algodiff/A/Linalg/index.html deleted file mode 100644 index 117d3e3ae..000000000 --- a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Optimise/Algodiff/A/Linalg/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -Linalg (owl-base.Owl_neural_graph.Make.Neuron.Optimise.Algodiff.A.Linalg)

Module A.Linalg

val inv : arr -> arr
val logdet : arr -> elt
val chol : ?upper:bool -> arr -> arr
val svd : ?thin:bool -> arr -> arr * arr * arr
val qr : arr -> arr * arr
val lq : arr -> arr * arr
val sylvester : arr -> arr -> arr -> arr
val lyapunov : arr -> arr -> arr
val discrete_lyapunov : - ?solver:[ `default | `bilinear | `direct ] -> - arr -> - arr -> - arr
val linsolve : ?trans:bool -> ?typ:[ `n | `u | `l ] -> arr -> arr -> arr
val care : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
val dare : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
\ No newline at end of file diff --git a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Optimise/Algodiff/A/Mat/index.html b/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Optimise/Algodiff/A/Mat/index.html deleted file mode 100644 index 3bb88d20f..000000000 --- a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Optimise/Algodiff/A/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl-base.Owl_neural_graph.Make.Neuron.Optimise.Algodiff.A.Mat)

Module A.Mat

val diagm : ?k:int -> arr -> arr
val triu : ?k:int -> arr -> arr
val tril : ?k:int -> arr -> arr
val eye : int -> arr
\ No newline at end of file diff --git a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Optimise/Algodiff/A/Scalar/index.html b/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Optimise/Algodiff/A/Scalar/index.html deleted file mode 100644 index b47a4dd87..000000000 --- a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Optimise/Algodiff/A/Scalar/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Scalar (owl-base.Owl_neural_graph.Make.Neuron.Optimise.Algodiff.A.Scalar)

Module A.Scalar

val add : elt -> elt -> elt
val sub : elt -> elt -> elt
val mul : elt -> elt -> elt
val div : elt -> elt -> elt
val pow : elt -> elt -> elt
val atan2 : elt -> elt -> elt
val abs : elt -> elt
val neg : elt -> elt
val sqr : elt -> elt
val sqrt : elt -> elt
val exp : elt -> elt
val log : elt -> elt
val log2 : elt -> elt
val log10 : elt -> elt
val signum : elt -> elt
val floor : elt -> elt
val ceil : elt -> elt
val round : elt -> elt
val sin : elt -> elt
val cos : elt -> elt
val tan : elt -> elt
val sinh : elt -> elt
val cosh : elt -> elt
val tanh : elt -> elt
val asin : elt -> elt
val acos : elt -> elt
val atan : elt -> elt
val asinh : elt -> elt
val acosh : elt -> elt
val atanh : elt -> elt
val relu : elt -> elt
val dawsn : elt -> elt
val sigmoid : elt -> elt
\ No newline at end of file diff --git a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Optimise/Algodiff/A/index.html b/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Optimise/Algodiff/A/index.html deleted file mode 100644 index 52b9c0080..000000000 --- a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Optimise/Algodiff/A/index.html +++ /dev/null @@ -1,158 +0,0 @@ - -A (owl-base.Owl_neural_graph.Make.Neuron.Optimise.Algodiff.A)

Module Algodiff.A

include Owl_types_ndarray_eltcmp.Sig
include Owl_types_ndarray_basic.Sig
type arr
type elt
val empty : int array -> arr
val zeros : int array -> arr
val ones : int array -> arr
val create : int array -> elt -> arr
val sequential : ?a:elt -> ?step:elt -> int array -> arr
val uniform : ?a:elt -> ?b:elt -> int array -> arr
val gaussian : ?mu:elt -> ?sigma:elt -> int array -> arr
val bernoulli : ?p:elt -> int array -> arr
val init : int array -> (int -> elt) -> arr
val init_nd : int array -> (int array -> elt) -> arr
val shape : arr -> int array
val numel : arr -> int
val get : arr -> int array -> elt
val set : arr -> int array -> elt -> unit
val get_slice : int list list -> arr -> arr
val set_slice : int list list -> arr -> arr -> unit
val get_fancy : Owl_types_common.index list -> arr -> arr
val set_fancy : Owl_types_common.index list -> arr -> arr -> unit
val copy : arr -> arr
val copy_ : out:arr -> arr -> unit
val reset : arr -> unit
val reshape : arr -> int array -> arr
val reverse : arr -> arr
val tile : arr -> int array -> arr
val repeat : arr -> int array -> arr
val concatenate : ?axis:int -> arr array -> arr
val stack : ?axis:int -> arr array -> arr
val split : ?axis:int -> int array -> arr -> arr array
val expand : ?hi:bool -> arr -> int -> arr
val squeeze : ?axis:int array -> arr -> arr
val draw : ?axis:int -> arr -> int -> arr * int array
val map : (elt -> elt) -> arr -> arr
val fold : ?axis:int -> (elt -> elt -> elt) -> elt -> arr -> arr
val scan : ?axis:int -> (elt -> elt -> elt) -> arr -> arr
val one_hot : int -> arr -> arr
val pad : ?v:elt -> int list list -> arr -> arr
val print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(elt -> string) -> - arr -> - unit
val abs : arr -> arr
val neg : arr -> arr
val floor : arr -> arr
val ceil : arr -> arr
val round : arr -> arr
val sqr : arr -> arr
val sqrt : arr -> arr
val log : arr -> arr
val log2 : arr -> arr
val log10 : arr -> arr
val exp : arr -> arr
val sin : arr -> arr
val cos : arr -> arr
val tan : arr -> arr
val sinh : arr -> arr
val cosh : arr -> arr
val tanh : arr -> arr
val asin : arr -> arr
val acos : arr -> arr
val atan : arr -> arr
val asinh : arr -> arr
val acosh : arr -> arr
val atanh : arr -> arr
val min : ?axis:int -> ?keep_dims:bool -> arr -> arr
val max : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum_reduce : ?axis:int array -> arr -> arr
val signum : arr -> arr
val sigmoid : arr -> arr
val relu : arr -> arr
val dawsn : arr -> arr
val min' : arr -> elt
val max' : arr -> elt
val sum' : arr -> elt
val log_sum_exp' : arr -> elt
val log_sum_exp : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm' : arr -> elt
val l2norm' : arr -> elt
val l2norm_sqr' : arr -> elt
val clip_by_value : ?amin:elt -> ?amax:elt -> arr -> arr
val clip_by_l2norm : elt -> arr -> arr
val pow : arr -> arr -> arr
val scalar_pow : elt -> arr -> arr
val pow_scalar : arr -> elt -> arr
val atan2 : arr -> arr -> arr
val scalar_atan2 : elt -> arr -> arr
val atan2_scalar : arr -> elt -> arr
val add : arr -> arr -> arr
val sub : arr -> arr -> arr
val mul : arr -> arr -> arr
val div : arr -> arr -> arr
val add_scalar : arr -> elt -> arr
val sub_scalar : arr -> elt -> arr
val mul_scalar : arr -> elt -> arr
val div_scalar : arr -> elt -> arr
val scalar_add : elt -> arr -> arr
val scalar_sub : elt -> arr -> arr
val scalar_mul : elt -> arr -> arr
val scalar_div : elt -> arr -> arr
val fma : arr -> arr -> arr -> arr
val conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val dilated_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val transpose_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val max_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val upsampling2d : arr -> int array -> arr
val conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val dilated_conv1d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv1d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val transpose_conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val max_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val upsampling2d_backward : arr -> int array -> arr -> arr
val row_num : arr -> int
val col_num : arr -> int
val row : arr -> int -> arr
val rows : arr -> int array -> arr
val copy_row_to : arr -> arr -> int -> unit
val copy_col_to : arr -> arr -> int -> unit
val dot : arr -> arr -> arr
val diag : ?k:int -> arr -> arr
val trace : arr -> elt
val transpose : ?axis:int array -> arr -> arr
val to_rows : arr -> arr array
val of_rows : arr array -> arr
val to_cols : arr -> arr array
val of_cols : arr array -> arr
val of_array : elt array -> int array -> arr
val of_arrays : elt array array -> arr
val float_to_elt : float -> elt
val elt_to_float : elt -> float
val elt_equal : arr -> arr -> arr
val elt_not_equal : arr -> arr -> arr
val elt_less : arr -> arr -> arr
val elt_greater : arr -> arr -> arr
val elt_less_equal : arr -> arr -> arr
val elt_greater_equal : arr -> arr -> arr
val elt_equal_scalar : arr -> elt -> arr
val elt_not_equal_scalar : arr -> elt -> arr
val elt_less_scalar : arr -> elt -> arr
val elt_greater_scalar : arr -> elt -> arr
val elt_less_equal_scalar : arr -> elt -> arr
val elt_greater_equal_scalar : arr -> elt -> arr
module Scalar : sig ... end
module Mat : sig ... end
module Linalg : sig ... end
\ No newline at end of file diff --git a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Optimise/Algodiff/Arr/index.html b/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Optimise/Algodiff/Arr/index.html deleted file mode 100644 index 91b8ccee5..000000000 --- a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Optimise/Algodiff/Arr/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Arr (owl-base.Owl_neural_graph.Make.Neuron.Optimise.Algodiff.Arr)

Module Algodiff.Arr

val empty : int array -> t
val zeros : int array -> t
val ones : int array -> t
val uniform : ?a:A.elt -> ?b:A.elt -> int array -> t
val gaussian : ?mu:A.elt -> ?sigma:A.elt -> int array -> t
val shape : t -> int array
val numel : t -> int
val reset : t -> unit
val reshape : t -> int array -> t
val add : t -> t -> t
val sub : t -> t -> t
val mul : t -> t -> t
val div : t -> t -> t
val dot : t -> t -> t
\ No newline at end of file diff --git a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Optimise/Algodiff/Builder/index.html b/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Optimise/Algodiff/Builder/index.html deleted file mode 100644 index df686d1f4..000000000 --- a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Optimise/Algodiff/Builder/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Builder (owl-base.Owl_neural_graph.Make.Neuron.Optimise.Algodiff.Builder)

Module Algodiff.Builder

Ops Builder
module type Siso = sig ... end
val build_siso : (module Siso) -> t -> t

build single input single output operations

module type Sipo = sig ... end
val build_sipo : (module Sipo) -> t -> t * t

build single input pair outputs operations

module type Sito = sig ... end
val build_sito : (module Sito) -> t -> t * t * t

build single input triple outputs operations

module type Siao = sig ... end
val build_siao : (module Siao) -> t -> t array

build single input array output operations

module type Piso = sig ... end
val build_piso : (module Piso) -> t -> t -> t

build pair inputs single output operations

module type Aiso = sig ... end
val build_aiso : (module Aiso) -> t array -> t

build array input single output operations

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Optimise/Algodiff/Builder/module-type-Aiso/index.html b/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Optimise/Algodiff/Builder/module-type-Aiso/index.html deleted file mode 100644 index f722fbe11..000000000 --- a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Optimise/Algodiff/Builder/module-type-Aiso/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Aiso (owl-base.Owl_neural_graph.Make.Neuron.Optimise.Algodiff.Builder.Aiso)

Module type Builder.Aiso

val label : string
val ff : t array -> t
val df : int list -> t -> t array -> t array -> t
val dr : int list -> t array -> t -> t Stdlib.ref -> t list
\ No newline at end of file diff --git a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Optimise/Algodiff/Builder/module-type-Piso/index.html b/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Optimise/Algodiff/Builder/module-type-Piso/index.html deleted file mode 100644 index 3903456e9..000000000 --- a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Optimise/Algodiff/Builder/module-type-Piso/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Piso (owl-base.Owl_neural_graph.Make.Neuron.Optimise.Algodiff.Builder.Piso)

Module type Builder.Piso

val label : string
val ff_aa : A.elt -> A.elt -> t
val ff_ab : A.elt -> A.arr -> t
val ff_ba : A.arr -> A.elt -> t
val ff_bb : A.arr -> A.arr -> t
val df_da : t -> t -> t -> t -> t
val df_db : t -> t -> t -> t -> t
val df_dab : t -> t -> t -> t -> t -> t
val dr_ab : t -> t -> t -> t Stdlib.ref -> t * t
val dr_a : t -> t -> t -> t Stdlib.ref -> t
val dr_b : t -> t -> t -> t Stdlib.ref -> t
\ No newline at end of file diff --git a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Optimise/Algodiff/Builder/module-type-Siao/index.html b/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Optimise/Algodiff/Builder/module-type-Siao/index.html deleted file mode 100644 index f480b7351..000000000 --- a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Optimise/Algodiff/Builder/module-type-Siao/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Siao (owl-base.Owl_neural_graph.Make.Neuron.Optimise.Algodiff.Builder.Siao)

Module type Builder.Siao

val label : string
val ff_f : A.elt -> t array
val ff_arr : A.arr -> t array
val df : t array -> t -> t -> t array
val dr : t -> t -> t Stdlib.ref array -> t Stdlib.ref array -> t
\ No newline at end of file diff --git a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Optimise/Algodiff/Builder/module-type-Sipo/index.html b/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Optimise/Algodiff/Builder/module-type-Sipo/index.html deleted file mode 100644 index 8df05ab54..000000000 --- a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Optimise/Algodiff/Builder/module-type-Sipo/index.html +++ /dev/null @@ -1,7 +0,0 @@ - -Sipo (owl-base.Owl_neural_graph.Make.Neuron.Optimise.Algodiff.Builder.Sipo)

Module type Builder.Sipo

val label : string
val ff_f : A.elt -> t * t
val ff_arr : A.arr -> t * t
val df : t -> t -> t -> t
val dr : - t -> - t -> - (t Stdlib.ref * t Stdlib.ref) -> - (t Stdlib.ref * t Stdlib.ref) -> - t
\ No newline at end of file diff --git a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Optimise/Algodiff/Builder/module-type-Siso/index.html b/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Optimise/Algodiff/Builder/module-type-Siso/index.html deleted file mode 100644 index 3f2a6aa72..000000000 --- a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Optimise/Algodiff/Builder/module-type-Siso/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Siso (owl-base.Owl_neural_graph.Make.Neuron.Optimise.Algodiff.Builder.Siso)

Module type Builder.Siso

val label : string
val ff_f : A.elt -> t
val ff_arr : A.arr -> t
val df : t -> t -> t -> t
val dr : t -> t -> t Stdlib.ref -> t
\ No newline at end of file diff --git a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Optimise/Algodiff/Builder/module-type-Sito/index.html b/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Optimise/Algodiff/Builder/module-type-Sito/index.html deleted file mode 100644 index 8089a13ae..000000000 --- a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Optimise/Algodiff/Builder/module-type-Sito/index.html +++ /dev/null @@ -1,7 +0,0 @@ - -Sito (owl-base.Owl_neural_graph.Make.Neuron.Optimise.Algodiff.Builder.Sito)

Module type Builder.Sito

val label : string
val ff_f : A.elt -> t * t * t
val ff_arr : A.arr -> t * t * t
val df : t -> t -> t -> t
val dr : - t -> - t -> - (t Stdlib.ref * t Stdlib.ref * t Stdlib.ref) -> - (t Stdlib.ref * t Stdlib.ref * t Stdlib.ref) -> - t
\ No newline at end of file diff --git a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Optimise/Algodiff/Linalg/index.html b/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Optimise/Algodiff/Linalg/index.html deleted file mode 100644 index c653eb076..000000000 --- a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Optimise/Algodiff/Linalg/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -Linalg (owl-base.Owl_neural_graph.Make.Neuron.Optimise.Algodiff.Linalg)

Module Algodiff.Linalg

val inv : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val logdet : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val chol : ?upper:bool -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val qr : t -> t * t

Refer to :doc:`owl_dense_ndarray_generic`

val lq : t -> t * t

Refer to :doc:`owl_dense_ndarray_generic`

val svd : ?thin:bool -> t -> t * t * t

Refer to :doc:`owl_dense_ndarray_generic`

val sylvester : t -> t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val lyapunov : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val discrete_lyapunov : - ?solver:[ `default | `bilinear | `direct ] -> - t -> - t -> - t

Refer to :doc:`owl_dense_ndarray_generic`

val (/@) : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val linsolve : ?trans:bool -> ?typ:[ `n | `u | `l ] -> t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val care : ?diag_r:bool -> t -> t -> t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val dare : ?diag_r:bool -> t -> t -> t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Optimise/Algodiff/Mat/index.html b/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Optimise/Algodiff/Mat/index.html deleted file mode 100644 index 81ff51996..000000000 --- a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Optimise/Algodiff/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl-base.Owl_neural_graph.Make.Neuron.Optimise.Algodiff.Mat)

Module Algodiff.Mat

val empty : int -> int -> t
val zeros : int -> int -> t
val eye : int -> t
val ones : int -> int -> t
val uniform : ?a:A.elt -> ?b:A.elt -> int -> int -> t
val gaussian : ?mu:A.elt -> ?sigma:A.elt -> int -> int -> t
val shape : t -> int * int
val numel : t -> int
val row_num : t -> int
val col_num : t -> int
val reset : t -> unit
val reshape : int -> int -> t -> t
val get : t -> int -> int -> t
val set : t -> int -> int -> t -> t
val row : t -> int -> t
val mean : t -> t
val add : t -> t -> t
val sub : t -> t -> t
val mul : t -> t -> t
val div : t -> t -> t
val dot : t -> t -> t
val map_by_row : (t -> t) -> t -> t
val of_arrays : A.elt array array -> t
val init_2d : int -> int -> (int -> int -> t) -> t
val print : t -> unit
\ No newline at end of file diff --git a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Optimise/Algodiff/Maths/index.html b/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Optimise/Algodiff/Maths/index.html deleted file mode 100644 index 208adef2c..000000000 --- a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Optimise/Algodiff/Maths/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Maths (owl-base.Owl_neural_graph.Make.Neuron.Optimise.Algodiff.Maths)

Module Algodiff.Maths

val (+) : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val (-) : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val (*) : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val (/) : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val (*@) : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val (**) : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val add : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val sub : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val mul : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val div : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val kron : t -> t -> t

Refer to :doc:`owl_dense_matrix_generic`

val dot : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val pow : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val atan2 : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val min2 : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val max2 : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val cross_entropy : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val inv : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val neg : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val abs : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val signum : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val floor : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val ceil : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val round : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val sqr : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val sqrt : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val log : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val log2 : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val log10 : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val exp : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val sin : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val cos : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val tan : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val sinh : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val cosh : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val tanh : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val asin : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val acos : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val atan : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val asinh : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val acosh : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val atanh : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val sum' : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val log_sum_exp' : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val log_sum_exp : ?axis:int -> ?keep_dims:bool -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val sum : ?axis:int -> ?keep_dims:bool -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val sum_reduce : ?axis:int array -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val mean : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val transpose : ?axis:int array -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val swap : int -> int -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val l1norm' : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val l2norm' : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val l2norm_sqr' : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val sigmoid : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val relu : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val dawsn : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val softplus : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val softsign : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val softmax : ?axis:int -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val reshape : t -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val flatten : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val get_item : t -> int -> int -> t

Refer to :doc:`owl_dense_ndarray_generic`

val get_row : t -> int -> t

Refer to :doc:`owl_dense_ndarray_generic`

val concat : axis:int -> t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val split : axis:int -> int array -> t -> t array

Refer to :doc:`owl_dense_ndarray_generic`

val of_arrays : t array array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val to_arrays : t -> t array array

Refer to :doc:`owl_dense_ndarray_generic`

val concatenate : axis:int -> t array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val stack : axis:int -> t array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val get_slice : int list list -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val set_slice : int list list -> t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val get_fancy : Owl_types.index list -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val set_fancy : Owl_types.index list -> t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val diag : ?k:int -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val diagm : ?k:int -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val trace : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val triu : ?k:int -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val tril : ?k:int -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Optimise/Algodiff/NN/index.html b/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Optimise/Algodiff/NN/index.html deleted file mode 100644 index 1e52fdd22..000000000 --- a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Optimise/Algodiff/NN/index.html +++ /dev/null @@ -1,20 +0,0 @@ - -NN (owl-base.Owl_neural_graph.Make.Neuron.Optimise.Algodiff.NN)

Module Algodiff.NN

val dropout : ?rate:float -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val conv1d : ?padding:Owl_types.padding -> t -> t -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val conv2d : ?padding:Owl_types.padding -> t -> t -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val conv3d : ?padding:Owl_types.padding -> t -> t -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val dilated_conv1d : - ?padding:Owl_types.padding -> - t -> - t -> - int array -> - int array -> - t

Refer to :doc:`owl_dense_ndarray_generic`

val dilated_conv2d : - ?padding:Owl_types.padding -> - t -> - t -> - int array -> - int array -> - t

Refer to :doc:`owl_dense_ndarray_generic`

val dilated_conv3d : - ?padding:Owl_types.padding -> - t -> - t -> - int array -> - int array -> - t

Refer to :doc:`owl_dense_ndarray_generic`

val transpose_conv1d : ?padding:Owl_types.padding -> t -> t -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val transpose_conv2d : ?padding:Owl_types.padding -> t -> t -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val transpose_conv3d : ?padding:Owl_types.padding -> t -> t -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val max_pool1d : Owl_types.padding -> t -> int array -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val max_pool2d : Owl_types.padding -> t -> int array -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val max_pool3d : Owl_types.padding -> t -> int array -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val avg_pool1d : Owl_types.padding -> t -> int array -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val avg_pool2d : Owl_types.padding -> t -> int array -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val avg_pool3d : Owl_types.padding -> t -> int array -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val upsampling2d : t -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val pad : ?v:A.elt -> int list list -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Optimise/Algodiff/index.html b/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Optimise/Algodiff/index.html deleted file mode 100644 index 31a04b8b0..000000000 --- a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Optimise/Algodiff/index.html +++ /dev/null @@ -1,11 +0,0 @@ - -Algodiff (owl-base.Owl_neural_graph.Make.Neuron.Optimise.Algodiff)

Module Optimise.Algodiff

include Owl_algodiff_core_sig.Sig
Type definition
include Owl_algodiff_types_sig.Sig with type elt := A.elt and type arr := A.arr
type t =
  1. | F of A.elt
  2. | Arr of A.arr
  3. | DF of t * t * int
  4. | DR of t * t Stdlib.ref * op * int Stdlib.ref * int * int Stdlib.ref
and adjoint = t -> t Stdlib.ref -> (t * t) list -> (t * t) list
and register = t list -> t list
and label = string * t list
and op = adjoint * register * label
Core functions
val tag : unit -> int

TODO

val primal : t -> t

TODO

val primal' : t -> t

TODO

val zero : t -> t

TODO

val reset_zero : t -> t

TODO

val tangent : t -> t

TODO

val adjref : t -> t Stdlib.ref

TODO

val adjval : t -> t

TODO

val shape : t -> int array

TODO

val is_float : t -> bool

TODO

val is_arr : t -> bool

TODO

val row_num : t -> int

number of rows

val col_num : t -> int

number of columns

val numel : t -> int

number of elements

val clip_by_value : amin:A.elt -> amax:A.elt -> t -> t

other functions, without tracking gradient

val clip_by_l2norm : A.elt -> t -> t

other functions, without tracking gradient

val copy_primal' : t -> t

TODO

val tile : t -> int array -> t

TODO

val repeat : t -> int array -> t

TODO

val pack_elt : A.elt -> t

convert from elt type to t type.

val unpack_elt : t -> A.elt

convert from t type to elt type.

val pack_flt : float -> t

convert from float type to t type.

val _f : float -> t

A shortcut function for F A.(float_to_elt x).

val unpack_flt : t -> float

convert from t type to float type.

val pack_arr : A.arr -> t

convert from arr type to t type.

val unpack_arr : t -> A.arr

convert from t type to arr type.

val deep_info : t -> string

TODO

val type_info : t -> string

TODO

val error_binop : string -> t -> t -> 'a

TODO

val error_uniop : string -> t -> 'a

TODO

val make_forward : t -> t -> int -> t

TODO

val make_reverse : t -> int -> t

TODO

val reverse_prop : t -> t -> unit

TODO

val diff : (t -> t) -> t -> t

diff f x returns the exat derivative of a function f : scalar -> scalar at point x. Simply calling diff f will return its derivative function g of the same type, i.e. g : scalar -> scalar.

Keep calling this function will give you higher-order derivatives of f, i.e. f |> diff |> diff |> diff |> ...

val diff' : (t -> t) -> t -> t * t

similar to diff, but return (f x, diff f x).

val grad : (t -> t) -> t -> t

gradient of f : (vector -> scalar) at x, reverse ad.

val grad' : (t -> t) -> t -> t * t

similar to grad, but return (f x, grad f x).

val jacobian : (t -> t) -> t -> t

jacobian of f : (vector -> vector) at x, both x and y are row vectors.

val jacobian' : (t -> t) -> t -> t * t

similar to jacobian, but return (f x, jacobian f x)

val jacobianv : (t -> t) -> t -> t -> t

jacobian vector product of f : (vector -> vector) at x along v, forward ad. Namely, it calcultes (jacobian x) v

val jacobianv' : (t -> t) -> t -> t -> t * t

similar to jacobianv', but return (f x, jacobianv f x v)

val jacobianTv : (t -> t) -> t -> t -> t

transposed jacobian vector product of f : (vector -> vector) at x along v, backward ad. Namely, it calculates transpose ((jacobianv f x v)).

val jacobianTv' : (t -> t) -> t -> t -> t * t

similar to jacobianTv, but return (f x, transpose (jacobianv f x v))

val hessian : (t -> t) -> t -> t

hessian of f : (scalar -> scalar) at x.

val hessian' : (t -> t) -> t -> t * t

simiarl to hessian, but return (f x, hessian f x)

val hessianv : (t -> t) -> t -> t -> t

hessian vector product of f : (scalar -> scalar) at x along v. Namely, it calculates (hessian x) v.

val hessianv' : (t -> t) -> t -> t -> t * t

similar to hessianv, but return (f x, hessianv f x v).

val laplacian : (t -> t) -> t -> t

laplacian of f : (scalar -> scalar) at x.

val laplacian' : (t -> t) -> t -> t * t

similar to laplacian, but return (f x, laplacian f x).

val gradhessian : (t -> t) -> t -> t * t

return (grad f x, hessian f x), f : (scalar -> scalar)

val gradhessian' : (t -> t) -> t -> t * t * t

return (f x, grad f x, hessian f x)

val gradhessianv : (t -> t) -> t -> t -> t * t

return (grad f x v, hessian f x v)

val gradhessianv' : (t -> t) -> t -> t -> t * t * t

return (f x, grad f x v, hessian f x v)

include Owl_algodiff_ops_sig.Sig - with type t := t - and type elt := A.elt - and type arr := A.arr - and type op := op
module Builder : - Owl_algodiff_ops_builder_sig.Sig - with type t := t - and type elt := A.elt - and type arr := A.arr - and type op := op
Supported Maths functions
module Maths : sig ... end
Supported Linalg functions
module Linalg : sig ... end
Supported Neural Network functions
module NN : sig ... end
Supported Mat functions
module Mat : sig ... end
Supported Arr functions
module Arr : sig ... end
Helper functions
include Owl_algodiff_graph_convert_sig.Sig with type t := t
val to_trace : t list -> string

to_trace [t0; t1; ...] outputs the trace of computation graph on the terminal in a human-readable format.

val to_dot : t list -> string

to_dot [t0; t1; ...] outputs the trace of computation graph in the dot file format which you can use other tools further visualisation, such as Graphviz.

val pp_num : Stdlib.Format.formatter -> t -> unit

pp_num t pretty prints the abstract number used in Algodiff.

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Optimise/Batch/index.html b/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Optimise/Batch/index.html deleted file mode 100644 index c5f68a2a6..000000000 --- a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Optimise/Batch/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Batch (owl-base.Owl_neural_graph.Make.Neuron.Optimise.Batch)

Module Optimise.Batch

Batch module

type typ =
  1. | Full
  2. | Mini of int
  3. | Sample of int
  4. | Stochastic

Types of batches.

val run : typ -> Algodiff.t -> Algodiff.t -> int -> Algodiff.t * Algodiff.t

Execute the computations defined in module typ.

val batches : typ -> Algodiff.t -> int

Return the total number of batches given a batch typ.

val to_string : typ -> string

Convert the module typ to its string representation.

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Optimise/Checkpoint/index.html b/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Optimise/Checkpoint/index.html deleted file mode 100644 index 2d775aaf8..000000000 --- a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Optimise/Checkpoint/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Checkpoint (owl-base.Owl_neural_graph.Make.Neuron.Optimise.Checkpoint)

Module Optimise.Checkpoint

Checkpoint module

type state = {
  1. mutable current_batch : int;
  2. mutable batches_per_epoch : int;
  3. mutable epochs : float;
  4. mutable batches : int;
  5. mutable loss : Algodiff.t array;
  6. mutable start_at : float;
  7. mutable stop : bool;
  8. mutable gs : Algodiff.t array array;
  9. mutable ps : Algodiff.t array array;
  10. mutable us : Algodiff.t array array;
  11. mutable ch : Algodiff.t array array array;
}

Type definition of checkpoint

type typ =
  1. | Batch of int
  2. | Epoch of float
  3. | Custom of state -> unit
  4. | None

Batch type.

val init_state : int -> float -> state

init_state batches_per_epoch epochs initialises a state by specifying the number of batches per epoch and the number of epochs in total.

val default_checkpoint_fun : (string -> 'a) -> 'a

This function is used for saving intermediate files during optimisation.

val print_state_info : state -> unit

Print out the detail information of current state.

val print_summary : state -> unit

Print out the summary of current state.

val run : typ -> (string -> unit) -> int -> Algodiff.t -> state -> unit

Execute the computations defined in module typ.

val to_string : typ -> string

Convert the module typ to its string representation.

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Optimise/Clipping/index.html b/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Optimise/Clipping/index.html deleted file mode 100644 index a320fcb9a..000000000 --- a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Optimise/Clipping/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Clipping (owl-base.Owl_neural_graph.Make.Neuron.Optimise.Clipping)

Module Optimise.Clipping

Clipping module

type typ =
  1. | L2norm of float
  2. | Value of float * float
  3. | None

Types of clipping functions.

val run : typ -> Algodiff.t -> Algodiff.t

Execute the computations defined in module typ.

val default : typ -> typ

Create module typ with default values.

val to_string : typ -> string

Convert the module typ to its string representation.

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Optimise/Gradient/index.html b/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Optimise/Gradient/index.html deleted file mode 100644 index c78a61d2c..000000000 --- a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Optimise/Gradient/index.html +++ /dev/null @@ -1,9 +0,0 @@ - -Gradient (owl-base.Owl_neural_graph.Make.Neuron.Optimise.Gradient)

Module Optimise.Gradient

Gradient module

type typ =
  1. | GD
  2. | CG
  3. | CD
  4. | NonlinearCG
  5. | DaiYuanCG
  6. | NewtonCG
  7. | Newton

Types of gradient function.

val run : - typ -> - (Algodiff.t -> Algodiff.t) -> - Algodiff.t -> - Algodiff.t -> - Algodiff.t -> - Algodiff.t -> - Algodiff.t

Execute the computations defined in module typ.

val to_string : typ -> string

Convert the module typ to its string representation.

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Optimise/Learning_Rate/index.html b/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Optimise/Learning_Rate/index.html deleted file mode 100644 index d57d92786..000000000 --- a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Optimise/Learning_Rate/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Learning_Rate (owl-base.Owl_neural_graph.Make.Neuron.Optimise.Learning_Rate)

Module Optimise.Learning_Rate

Strategies for learning rate update

type typ =
  1. | Adagrad of float
  2. | Const of float
  3. | Decay of float * float
  4. | Exp_decay of float * float
  5. | RMSprop of float * float
  6. | Adam of float * float * float
  7. | Schedule of float array

Representation of learning rate update strategies. Possible values include:

  • Adam (alpha, beta1, beta2), see ref for parameter meaning
val run : typ -> int -> Algodiff.t -> Algodiff.t array -> Algodiff.t

Execute the computations defined in module typ.

val default : typ -> typ

Create module typ with default values.

val update_ch : typ -> Algodiff.t -> Algodiff.t array -> Algodiff.t array

Update the cache of gradients.

val to_string : typ -> string

Convert the module typ to its string representation.

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Optimise/Loss/index.html b/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Optimise/Loss/index.html deleted file mode 100644 index 98ad780fc..000000000 --- a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Optimise/Loss/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Loss (owl-base.Owl_neural_graph.Make.Neuron.Optimise.Loss)

Module Optimise.Loss

Loss module

type typ =
  1. | Hinge
  2. | L1norm
  3. | L2norm
  4. | Quadratic
  5. | Cross_entropy
  6. | Custom of Algodiff.t -> Algodiff.t -> Algodiff.t

Types of loss functions.

val run : typ -> Algodiff.t -> Algodiff.t -> Algodiff.t

Execute the computations defined in module typ.

val to_string : typ -> string

Convert the module typ to its string representation.

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Optimise/Momentum/index.html b/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Optimise/Momentum/index.html deleted file mode 100644 index dfe97ac85..000000000 --- a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Optimise/Momentum/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Momentum (owl-base.Owl_neural_graph.Make.Neuron.Optimise.Momentum)

Module Optimise.Momentum

Momentum module

type typ =
  1. | Standard of float
  2. | Nesterov of float
  3. | None

Types of momentum functions.

val run : typ -> Algodiff.t -> Algodiff.t -> Algodiff.t

Execute the computations defined in module typ.

val default : typ -> typ

Create module typ with default values.

val to_string : typ -> string

Convert the module typ to its string representation.

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Optimise/Params/index.html b/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Optimise/Params/index.html deleted file mode 100644 index 731ef9162..000000000 --- a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Optimise/Params/index.html +++ /dev/null @@ -1,14 +0,0 @@ - -Params (owl-base.Owl_neural_graph.Make.Neuron.Optimise.Params)

Module Optimise.Params

Params module

type typ = {
  1. mutable epochs : float;
  2. mutable batch : Batch.typ;
  3. mutable gradient : Gradient.typ;
  4. mutable loss : Loss.typ;
  5. mutable learning_rate : Learning_Rate.typ;
  6. mutable regularisation : Regularisation.typ;
  7. mutable momentum : Momentum.typ;
  8. mutable clipping : Clipping.typ;
  9. mutable stopping : Stopping.typ;
  10. mutable checkpoint : Checkpoint.typ;
  11. mutable verbosity : bool;
}

Type definition of parameter.

val default : unit -> typ

Create module typ with default values.

val config : - ?batch:Batch.typ -> - ?gradient:Gradient.typ -> - ?loss:Loss.typ -> - ?learning_rate:Learning_Rate.typ -> - ?regularisation:Regularisation.typ -> - ?momentum:Momentum.typ -> - ?clipping:Clipping.typ -> - ?stopping:Stopping.typ -> - ?checkpoint:Checkpoint.typ -> - ?verbosity:bool -> - float -> - typ

This function creates a parameter object with many configurations.

val to_string : typ -> string

Convert the module typ to its string representation.

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Optimise/Regularisation/index.html b/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Optimise/Regularisation/index.html deleted file mode 100644 index 890eabfc6..000000000 --- a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Optimise/Regularisation/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Regularisation (owl-base.Owl_neural_graph.Make.Neuron.Optimise.Regularisation)

Module Optimise.Regularisation

Regularisation module

type typ =
  1. | L1norm of float
  2. | L2norm of float
  3. | Elastic_net of float * float
  4. | None

Types of regularisation functions.

val run : typ -> Algodiff.t -> Algodiff.t

Execute the computations defined in module typ.

val to_string : typ -> string

Convert the module typ to its string representation.

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Optimise/Stopping/index.html b/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Optimise/Stopping/index.html deleted file mode 100644 index 6e34552df..000000000 --- a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Optimise/Stopping/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Stopping (owl-base.Owl_neural_graph.Make.Neuron.Optimise.Stopping)

Module Optimise.Stopping

Stopping module

type typ =
  1. | Const of float
  2. | Early of int * int
  3. | None

Types of stopping functions.

val run : typ -> float -> bool

Execute the computations defined in module typ.

val default : typ -> typ

Create module typ with default values.

val to_string : typ -> string

Convert the module typ to its string representation.

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Optimise/Utils/index.html b/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Optimise/Utils/index.html deleted file mode 100644 index 28688414a..000000000 --- a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Optimise/Utils/index.html +++ /dev/null @@ -1,7 +0,0 @@ - -Utils (owl-base.Owl_neural_graph.Make.Neuron.Optimise.Utils)

Module Optimise.Utils

Utils module

val sample_num : Algodiff.t -> int

Return the total number of samples in passed in ndarray.

val draw_samples : Algodiff.t -> Algodiff.t -> int -> Algodiff.t * Algodiff.t

draw_samples x y draws samples from both x (observations) and y (labels). The samples will be drew along axis 0, so x and y must agree along axis 0.

val get_chunk : - Algodiff.t -> - Algodiff.t -> - int -> - int -> - Algodiff.t * Algodiff.t

get_chunk x y i c gets a continuous chunk of c samples from position i from x (observations) and y (labels).

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Optimise/index.html b/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Optimise/index.html deleted file mode 100644 index c9ab7aead..000000000 --- a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Optimise/index.html +++ /dev/null @@ -1,31 +0,0 @@ - -Optimise (owl-base.Owl_neural_graph.Make.Neuron.Optimise)

Module Neuron.Optimise

module Utils : sig ... end

Utils module

module Learning_Rate : sig ... end

Strategies for learning rate update

module Batch : sig ... end

Batch module

module Loss : sig ... end

Loss module

module Gradient : sig ... end

Gradient module

module Momentum : sig ... end

Momentum module

module Regularisation : sig ... end

Regularisation module

module Clipping : sig ... end

Clipping module

module Stopping : sig ... end

Stopping module

module Checkpoint : sig ... end

Checkpoint module

module Params : sig ... end

Params module

Core functions
val minimise_weight : - ?state:Checkpoint.state -> - Params.typ -> - (Algodiff.t -> Algodiff.t -> Algodiff.t) -> - Algodiff.t -> - Algodiff.t -> - Algodiff.t -> - Checkpoint.state * Algodiff.t

This function minimises the weight w of passed-in function f.

* f is a function f : w -> x -> y. * w is a row vector but y can have any shape.

val minimise_network : - ?state:Checkpoint.state -> - Params.typ -> - (Algodiff.t -> Algodiff.t * Algodiff.t array array) -> - (Algodiff.t -> Algodiff.t array array * Algodiff.t array array) -> - (Algodiff.t array array -> unit) -> - (string -> unit) -> - Algodiff.t -> - Algodiff.t -> - Checkpoint.state

This function is specifically designed for minimising the weights in a neural network of graph structure. In Owl's earlier versions, the functions in the regression module were actually implemented using this function.

val minimise_fun : - ?state:Checkpoint.state -> - Params.typ -> - (Algodiff.t -> Algodiff.t) -> - Algodiff.t -> - Checkpoint.state * Algodiff.t

This function minimises f : x -> y w.r.t x.

x is an ndarray; and y is an scalar value.

val minimise_compiled_network : - ?state:Checkpoint.state -> - Params.typ -> - (Algodiff.t -> Algodiff.t -> Algodiff.t) -> - (unit -> unit) -> - (string -> unit) -> - Algodiff.t -> - Algodiff.t -> - Checkpoint.state

TODO

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Padding1D/index.html b/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Padding1D/index.html deleted file mode 100644 index cd58b3280..000000000 --- a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Padding1D/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Padding1D (owl-base.Owl_neural_graph.Make.Neuron.Padding1D)

Module Neuron.Padding1D

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Padding2D/index.html b/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Padding2D/index.html deleted file mode 100644 index 17bc7479a..000000000 --- a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Padding2D/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Padding2D (owl-base.Owl_neural_graph.Make.Neuron.Padding2D)

Module Neuron.Padding2D

type neuron_typ = {
  1. mutable padding : int array array;
  2. mutable in_shape : int array;
  3. mutable out_shape : int array;
}

Neuron type definition.

val create : int array array -> neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Padding3D/index.html b/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Padding3D/index.html deleted file mode 100644 index c07460aed..000000000 --- a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Padding3D/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Padding3D (owl-base.Owl_neural_graph.Make.Neuron.Padding3D)

Module Neuron.Padding3D

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Recurrent/index.html b/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Recurrent/index.html deleted file mode 100644 index 96055d474..000000000 --- a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Recurrent/index.html +++ /dev/null @@ -1,9 +0,0 @@ - -Recurrent (owl-base.Owl_neural_graph.Make.Neuron.Recurrent)

Module Neuron.Recurrent

type neuron_typ = {
  1. mutable whh : Optimise.Algodiff.t;
  2. mutable wxh : Optimise.Algodiff.t;
  3. mutable why : Optimise.Algodiff.t;
  4. mutable bh : Optimise.Algodiff.t;
  5. mutable by : Optimise.Algodiff.t;
  6. mutable h : Optimise.Algodiff.t;
  7. mutable hiddens : int;
  8. mutable act : Activation.typ;
  9. mutable init_typ : Init.typ;
  10. mutable in_shape : int array;
  11. mutable out_shape : int array;
}

Neuron type definition.

val create : - ?time_steps:int -> - ?inputs:int -> - int -> - int -> - Activation.typ -> - Init.typ -> - neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val init : neuron_typ -> unit

Initialise the neuron and its parameters.

val reset : neuron_typ -> unit

Reset the parameters in a neuron.

val mktag : int -> neuron_typ -> unit

Tag the neuron, used by Algodiff module.

val mkpar : neuron_typ -> Optimise.Algodiff.t array

Assemble all the parameters in an array, used by Optimise module.

val mkpri : neuron_typ -> Optimise.Algodiff.t array

Assemble all the primial values in an array, used by Optimise module.

val mkadj : neuron_typ -> Optimise.Algodiff.t array

Assemble all the adjacent values in an array, used by Optimise module.

val update : neuron_typ -> Optimise.Algodiff.t array -> unit

Update parameters in a neuron, used by Optimise module.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Reshape/index.html b/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Reshape/index.html deleted file mode 100644 index df0146462..000000000 --- a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Reshape/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Reshape (owl-base.Owl_neural_graph.Make.Neuron.Reshape)

Module Neuron.Reshape

type neuron_typ = {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
}

Neuron type definition.

val create : ?inputs:int array -> int array -> neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Slice/index.html b/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Slice/index.html deleted file mode 100644 index b3568f4b1..000000000 --- a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/Slice/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Slice (owl-base.Owl_neural_graph.Make.Neuron.Slice)

Module Neuron.Slice

type neuron_typ = {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
  3. mutable slice : int list list;
}

Neuron type definition.

val create : int list list -> neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/TransposeConv1D/index.html b/owl-base/Owl_neural_graph/Make/argument-1-Neuron/TransposeConv1D/index.html deleted file mode 100644 index 463cc3454..000000000 --- a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/TransposeConv1D/index.html +++ /dev/null @@ -1,8 +0,0 @@ - -TransposeConv1D (owl-base.Owl_neural_graph.Make.Neuron.TransposeConv1D)

Module Neuron.TransposeConv1D

type neuron_typ = {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable b : Optimise.Algodiff.t;
  3. mutable kernel : int array;
  4. mutable stride : int array;
  5. mutable padding : Owl_types.padding;
  6. mutable init_typ : Init.typ;
  7. mutable in_shape : int array;
  8. mutable out_shape : int array;
}

Neuron type definition.

val create : - ?inputs:int array -> - Owl_types.padding -> - int array -> - int array -> - Init.typ -> - neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val init : neuron_typ -> unit

Initialise the neuron and its parameters.

val reset : neuron_typ -> unit

Reset the parameters in a neuron.

val mktag : int -> neuron_typ -> unit

Tag the neuron, used by Algodiff module.

val mkpar : neuron_typ -> Optimise.Algodiff.t array

Assemble all the parameters in an array, used by Optimise module.

val mkpri : neuron_typ -> Optimise.Algodiff.t array

Assemble all the primial values in an array, used by Optimise module.

val mkadj : neuron_typ -> Optimise.Algodiff.t array

Assemble all the adjacent values in an array, used by Optimise module.

val update : neuron_typ -> Optimise.Algodiff.t array -> unit

Update parameters in a neuron, used by Optimise module.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/TransposeConv2D/index.html b/owl-base/Owl_neural_graph/Make/argument-1-Neuron/TransposeConv2D/index.html deleted file mode 100644 index d77de180f..000000000 --- a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/TransposeConv2D/index.html +++ /dev/null @@ -1,8 +0,0 @@ - -TransposeConv2D (owl-base.Owl_neural_graph.Make.Neuron.TransposeConv2D)

Module Neuron.TransposeConv2D

type neuron_typ = {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable b : Optimise.Algodiff.t;
  3. mutable kernel : int array;
  4. mutable stride : int array;
  5. mutable padding : Owl_types.padding;
  6. mutable init_typ : Init.typ;
  7. mutable in_shape : int array;
  8. mutable out_shape : int array;
}

Neuron type definition.

val create : - ?inputs:int array -> - Owl_types.padding -> - int array -> - int array -> - Init.typ -> - neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val init : neuron_typ -> unit

Initialise the neuron and its parameters.

val reset : neuron_typ -> unit

Reset the parameters in a neuron.

val mktag : int -> neuron_typ -> unit

Tag the neuron, used by Algodiff module.

val mkpar : neuron_typ -> Optimise.Algodiff.t array

Assemble all the parameters in an array, used by Optimise module.

val mkpri : neuron_typ -> Optimise.Algodiff.t array

Assemble all the primial values in an array, used by Optimise module.

val mkadj : neuron_typ -> Optimise.Algodiff.t array

Assemble all the adjacent values in an array, used by Optimise module.

val update : neuron_typ -> Optimise.Algodiff.t array -> unit

Update parameters in a neuron, used by Optimise module.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/TransposeConv3D/index.html b/owl-base/Owl_neural_graph/Make/argument-1-Neuron/TransposeConv3D/index.html deleted file mode 100644 index ac8257b01..000000000 --- a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/TransposeConv3D/index.html +++ /dev/null @@ -1,8 +0,0 @@ - -TransposeConv3D (owl-base.Owl_neural_graph.Make.Neuron.TransposeConv3D)

Module Neuron.TransposeConv3D

type neuron_typ = {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable b : Optimise.Algodiff.t;
  3. mutable kernel : int array;
  4. mutable stride : int array;
  5. mutable padding : Owl_types.padding;
  6. mutable init_typ : Init.typ;
  7. mutable in_shape : int array;
  8. mutable out_shape : int array;
}

Neuron type definition.

val create : - ?inputs:int array -> - Owl_types.padding -> - int array -> - int array -> - Init.typ -> - neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val init : neuron_typ -> unit

Initialise the neuron and its parameters.

val reset : neuron_typ -> unit

Reset the parameters in a neuron.

val mktag : int -> neuron_typ -> unit

Tag the neuron, used by Algodiff module.

val mkpar : neuron_typ -> Optimise.Algodiff.t array

Assemble all the parameters in an array, used by Optimise module.

val mkpri : neuron_typ -> Optimise.Algodiff.t array

Assemble all the primial values in an array, used by Optimise module.

val mkadj : neuron_typ -> Optimise.Algodiff.t array

Assemble all the adjacent values in an array, used by Optimise module.

val update : neuron_typ -> Optimise.Algodiff.t array -> unit

Update parameters in a neuron, used by Optimise module.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/UpSampling1D/index.html b/owl-base/Owl_neural_graph/Make/argument-1-Neuron/UpSampling1D/index.html deleted file mode 100644 index 8c8c1e79a..000000000 --- a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/UpSampling1D/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -UpSampling1D (owl-base.Owl_neural_graph.Make.Neuron.UpSampling1D)

Module Neuron.UpSampling1D

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/UpSampling2D/index.html b/owl-base/Owl_neural_graph/Make/argument-1-Neuron/UpSampling2D/index.html deleted file mode 100644 index 20b9a96bd..000000000 --- a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/UpSampling2D/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -UpSampling2D (owl-base.Owl_neural_graph.Make.Neuron.UpSampling2D)

Module Neuron.UpSampling2D

type neuron_typ = {
  1. mutable size : int array;
  2. mutable in_shape : int array;
  3. mutable out_shape : int array;
}

Neuron type definition.

val create : int array -> neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/UpSampling3D/index.html b/owl-base/Owl_neural_graph/Make/argument-1-Neuron/UpSampling3D/index.html deleted file mode 100644 index ccbfca582..000000000 --- a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/UpSampling3D/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -UpSampling3D (owl-base.Owl_neural_graph.Make.Neuron.UpSampling3D)

Module Neuron.UpSampling3D

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/index.html b/owl-base/Owl_neural_graph/Make/argument-1-Neuron/index.html deleted file mode 100644 index 41dc6026f..000000000 --- a/owl-base/Owl_neural_graph/Make/argument-1-Neuron/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Neuron (owl-base.Owl_neural_graph.Make.Neuron)

Parameter Make.Neuron

Init neuron
module Init : sig ... end
Input neuron
module Input : sig ... end
Activation neuron
module Activation : sig ... end
Linear neuron
module Linear : sig ... end
LinearNoBias neuron
module LinearNoBias : sig ... end
Recurrent neuron
module Recurrent : sig ... end
LSTM neuron
module LSTM : sig ... end
GRU neuron
module GRU : sig ... end
Conv1D neuron
module Conv1D : sig ... end
Conv2D neuron
module Conv2D : sig ... end
Conv3D neuron
module Conv3D : sig ... end
DilatedConv1D neuron
module DilatedConv1D : sig ... end
DilatedConv2D neuron
module DilatedConv2D : sig ... end
DilatedConv3D neuron
module DilatedConv3D : sig ... end
TransposeConv1D neuron
module TransposeConv1D : sig ... end
TransposeConv2D neuron
module TransposeConv2D : sig ... end
TransposeConv3D neuron
module TransposeConv3D : sig ... end
FullyConnected neuron
module FullyConnected : sig ... end
MaxPool1D neuron
module MaxPool1D : sig ... end
MaxPool2D neuron
module MaxPool2D : sig ... end
AvgPool1D neuron
module AvgPool1D : sig ... end
AvgPool2D neuron
module AvgPool2D : sig ... end
GlobalMaxPool1D neuron
module GlobalMaxPool1D : sig ... end
GlobalMaxPool2D neuron
module GlobalMaxPool2D : sig ... end
GlobalAvgPool1D neuron
module GlobalAvgPool1D : sig ... end
GlobalAvgPool2D neuron
module GlobalAvgPool2D : sig ... end
UpSampling1D neuron
module UpSampling1D : sig ... end
UpSampling2D neuron
module UpSampling2D : sig ... end
UpSampling3D neuron
module UpSampling3D : sig ... end
Padding1D neuron
module Padding1D : sig ... end
Padding2D neuron
module Padding2D : sig ... end
Padding3D neuron
module Padding3D : sig ... end
Lambda neuron
module Lambda : sig ... end
LambdaArray neuron
module LambdaArray : sig ... end
Dropout neuron
module Dropout : sig ... end
Reshape neuron
module Reshape : sig ... end
Flatten neuron
module Flatten : sig ... end
Slice neuron
module Slice : sig ... end
Add neuron
module Add : sig ... end
Mul neuron
module Mul : sig ... end
Dot neuron
module Dot : sig ... end
Max neuron
module Max : sig ... end
Average neuron
module Average : sig ... end
Concatenate neuron
module Concatenate : sig ... end
Normalisation neuron
module Normalisation : sig ... end
GaussianNoise neuron
module GaussianNoise : sig ... end
GaussianDropout neuron
module GaussianDropout : sig ... end
AlphaDropout neuron
module AlphaDropout : sig ... end
Embedding neuron
module Embedding : sig ... end
Masking neuron
module Masking : sig ... end
Core functions
type neuron =
  1. | Input of Input.neuron_typ
  2. | Linear of Linear.neuron_typ
  3. | LinearNoBias of LinearNoBias.neuron_typ
  4. | Embedding of Embedding.neuron_typ
  5. | LSTM of LSTM.neuron_typ
  6. | GRU of GRU.neuron_typ
  7. | Recurrent of Recurrent.neuron_typ
  8. | Conv1D of Conv1D.neuron_typ
  9. | Conv2D of Conv2D.neuron_typ
  10. | Conv3D of Conv3D.neuron_typ
  11. | DilatedConv1D of DilatedConv1D.neuron_typ
  12. | DilatedConv2D of DilatedConv2D.neuron_typ
  13. | DilatedConv3D of DilatedConv3D.neuron_typ
  14. | TransposeConv1D of TransposeConv1D.neuron_typ
  15. | TransposeConv2D of TransposeConv2D.neuron_typ
  16. | TransposeConv3D of TransposeConv3D.neuron_typ
  17. | FullyConnected of FullyConnected.neuron_typ
  18. | MaxPool1D of MaxPool1D.neuron_typ
  19. | MaxPool2D of MaxPool2D.neuron_typ
  20. | AvgPool1D of AvgPool1D.neuron_typ
  21. | AvgPool2D of AvgPool2D.neuron_typ
  22. | GlobalMaxPool1D of GlobalMaxPool1D.neuron_typ
  23. | GlobalMaxPool2D of GlobalMaxPool2D.neuron_typ
  24. | GlobalAvgPool1D of GlobalAvgPool1D.neuron_typ
  25. | GlobalAvgPool2D of GlobalAvgPool2D.neuron_typ
  26. | UpSampling2D of UpSampling2D.neuron_typ
  27. | Padding2D of Padding2D.neuron_typ
  28. | Dropout of Dropout.neuron_typ
  29. | Reshape of Reshape.neuron_typ
  30. | Flatten of Flatten.neuron_typ
  31. | Slice of Slice.neuron_typ
  32. | Lambda of Lambda.neuron_typ
  33. | LambdaArray of LambdaArray.neuron_typ
  34. | Activation of Activation.neuron_typ
  35. | GaussianNoise of GaussianNoise.neuron_typ
  36. | GaussianDropout of GaussianDropout.neuron_typ
  37. | AlphaDropout of AlphaDropout.neuron_typ
  38. | Normalisation of Normalisation.neuron_typ
  39. | Add of Add.neuron_typ
  40. | Mul of Mul.neuron_typ
  41. | Dot of Dot.neuron_typ
  42. | Max of Max.neuron_typ
  43. | Average of Average.neuron_typ
  44. | Concatenate of Concatenate.neuron_typ
    (*

    Types of neuron.

    *)
val get_in_out_shape : neuron -> int array * int array

Get both input and output shapes of a neuron.

val get_in_shape : neuron -> int array

Get the input shape of a neuron.

val get_out_shape : neuron -> int array

Get the output shape of a neuron.

val connect : int array array -> neuron -> unit

Connect this neuron to others in a neural network.

val init : neuron -> unit

Initialise the neuron and its parameters.

val reset : neuron -> unit

Reset the parameters in a neuron.

val mktag : int -> neuron -> unit

Tag the neuron, used by Algodiff module.

val mkpar : neuron -> Optimise.Algodiff.t array

Assemble all the trainable parameters in an array, used by Optimise module.

val mkpri : neuron -> Optimise.Algodiff.t array

Assemble all the primal values in an array, used by Optimise module.

val mkadj : neuron -> Optimise.Algodiff.t array

Assemble all the adjacent values in an array, used by Optimise module.

val update : neuron -> Optimise.Algodiff.t array -> unit

Update trainable parameters in a neuron, used by Optimise module.

val load_weights : neuron -> Optimise.Algodiff.t array -> unit

Load both trainable and non-trainable parameters into the neuron.

val save_weights : neuron -> Optimise.Algodiff.t array

Assemble both trainable and non-trainable parameters of the neuron.

val copy : neuron -> neuron

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : neuron -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph/Make/index.html b/owl-base/Owl_neural_graph/Make/index.html deleted file mode 100644 index 2004fa51c..000000000 --- a/owl-base/Owl_neural_graph/Make/index.html +++ /dev/null @@ -1,243 +0,0 @@ - -Make (owl-base.Owl_neural_graph.Make)

Module Owl_neural_graph.Make

Parameters

Signature

module Neuron = Neuron
type node = {
  1. mutable name : string;
  2. mutable prev : node array;
  3. mutable next : node array;
  4. mutable neuron : Neuron.neuron;
  5. mutable output : Neuron.Optimise.Algodiff.t option;
  6. mutable network : network;
  7. mutable train : bool;
}
and network = {
  1. mutable nnid : string;
  2. mutable size : int;
  3. mutable roots : node array;
  4. mutable outputs : node array;
  5. mutable topo : node array;
}
val make_network : ?nnid:string -> int -> node array -> node array -> network
val make_node : - ?name:string -> - ?train:bool -> - node array -> - node array -> - Neuron.neuron -> - Neuron.Optimise.Algodiff.t option -> - network -> - node
val get_roots : network -> node array
val get_outputs : network -> node array
val get_node : network -> string -> node
val get_network : ?name:string -> node -> network
val outputs : ?name:string -> node array -> network
val get_network_name : network -> string
val set_network_name : network -> string -> unit
val input_shape : network -> int array
val input_shapes : network -> int array array
val collect_output : node array -> Neuron.Optimise.Algodiff.t array
val connect_pair : node -> node -> unit
val connect_to_parents : node array -> node -> unit
val add_node : - ?act_typ:Neuron.Activation.typ -> - network -> - node array -> - node -> - node
val init : network -> unit
val reset : network -> unit
val mktag : int -> network -> unit
val mkpar : network -> Neuron.Optimise.Algodiff.t array array
val mkpri : network -> Neuron.Optimise.Algodiff.t array array
val mkadj : network -> Neuron.Optimise.Algodiff.t array array
val update : network -> Neuron.Optimise.Algodiff.t array array -> unit
val run_inputs : - Neuron.Optimise.Algodiff.t array -> - network -> - Neuron.Optimise.Algodiff.t array
val forward_inputs : - network -> - Neuron.Optimise.Algodiff.t array -> - Neuron.Optimise.Algodiff.t array * Neuron.Optimise.Algodiff.t array array
val backward : - network -> - Neuron.Optimise.Algodiff.t -> - Neuron.Optimise.Algodiff.t array array - * Neuron.Optimise.Algodiff.t array array
val copy : network -> network
val _remove_training_nodes : network -> unit
val model_inputs : - network -> - Neuron.Optimise.Algodiff.A.arr array -> - Neuron.Optimise.Algodiff.A.arr array
val input : ?name:string -> int array -> node
val inputs : ?names:string array -> int array array -> node array
val activation : ?name:string -> Neuron.Activation.typ -> node -> node
val linear : - ?name:string -> - ?init_typ:Neuron.Init.typ -> - ?act_typ:Neuron.Activation.typ -> - int -> - node -> - node
val linear_nobias : - ?name:string -> - ?init_typ:Neuron.Init.typ -> - ?act_typ:Neuron.Activation.typ -> - int -> - node -> - node
val embedding : - ?name:string -> - ?init_typ:Neuron.Init.typ -> - ?act_typ:Neuron.Activation.typ -> - int -> - int -> - node -> - node
val recurrent : - ?name:string -> - ?init_typ:Neuron.Init.typ -> - act_typ:Neuron.Activation.typ -> - int -> - int -> - node -> - node
val lstm : ?name:string -> ?init_typ:Neuron.Init.typ -> int -> node -> node
val gru : ?name:string -> ?init_typ:Neuron.Init.typ -> int -> node -> node
val conv1d : - ?name:string -> - ?padding:Owl_types.padding -> - ?init_typ:Neuron.Init.typ -> - ?act_typ:Neuron.Activation.typ -> - int array -> - int array -> - node -> - node
val conv2d : - ?name:string -> - ?padding:Owl_types.padding -> - ?init_typ:Neuron.Init.typ -> - ?act_typ:Neuron.Activation.typ -> - int array -> - int array -> - node -> - node
val conv3d : - ?name:string -> - ?padding:Owl_types.padding -> - ?init_typ:Neuron.Init.typ -> - ?act_typ:Neuron.Activation.typ -> - int array -> - int array -> - node -> - node
val dilated_conv1d : - ?name:string -> - ?padding:Owl_types.padding -> - ?init_typ:Neuron.Init.typ -> - ?act_typ:Neuron.Activation.typ -> - int array -> - int array -> - int array -> - node -> - node
val dilated_conv2d : - ?name:string -> - ?padding:Owl_types.padding -> - ?init_typ:Neuron.Init.typ -> - ?act_typ:Neuron.Activation.typ -> - int array -> - int array -> - int array -> - node -> - node
val dilated_conv3d : - ?name:string -> - ?padding:Owl_types.padding -> - ?init_typ:Neuron.Init.typ -> - ?act_typ:Neuron.Activation.typ -> - int array -> - int array -> - int array -> - node -> - node
val transpose_conv1d : - ?name:string -> - ?padding:Owl_types.padding -> - ?init_typ:Neuron.Init.typ -> - ?act_typ:Neuron.Activation.typ -> - int array -> - int array -> - node -> - node
val transpose_conv2d : - ?name:string -> - ?padding:Owl_types.padding -> - ?init_typ:Neuron.Init.typ -> - ?act_typ:Neuron.Activation.typ -> - int array -> - int array -> - node -> - node
val transpose_conv3d : - ?name:string -> - ?padding:Owl_types.padding -> - ?init_typ:Neuron.Init.typ -> - ?act_typ:Neuron.Activation.typ -> - int array -> - int array -> - node -> - node
val fully_connected : - ?name:string -> - ?init_typ:Neuron.Init.typ -> - ?act_typ:Neuron.Activation.typ -> - int -> - node -> - node
val max_pool1d : - ?name:string -> - ?padding:Owl_types.padding -> - ?act_typ:Neuron.Activation.typ -> - int array -> - int array -> - node -> - node
val max_pool2d : - ?name:string -> - ?padding:Owl_types.padding -> - ?act_typ:Neuron.Activation.typ -> - int array -> - int array -> - node -> - node
val avg_pool1d : - ?name:string -> - ?padding:Owl_types.padding -> - ?act_typ:Neuron.Activation.typ -> - int array -> - int array -> - node -> - node
val avg_pool2d : - ?name:string -> - ?padding:Owl_types.padding -> - ?act_typ:Neuron.Activation.typ -> - int array -> - int array -> - node -> - node
val global_max_pool1d : - ?name:string -> - ?act_typ:Neuron.Activation.typ -> - node -> - node
val global_max_pool2d : - ?name:string -> - ?act_typ:Neuron.Activation.typ -> - node -> - node
val global_avg_pool1d : - ?name:string -> - ?act_typ:Neuron.Activation.typ -> - node -> - node
val global_avg_pool2d : - ?name:string -> - ?act_typ:Neuron.Activation.typ -> - node -> - node
val upsampling2d : - ?name:string -> - ?act_typ:Neuron.Activation.typ -> - int array -> - node -> - node
val padding2d : - ?name:string -> - ?act_typ:Neuron.Activation.typ -> - int array array -> - node -> - node
val dropout : ?name:string -> float -> node -> node
val gaussian_noise : ?name:string -> float -> node -> node
val gaussian_dropout : ?name:string -> float -> node -> node
val alpha_dropout : ?name:string -> float -> node -> node
val normalisation : - ?name:string -> - ?axis:int -> - ?training:bool -> - ?decay:float -> - ?mu:Neuron.Optimise.Algodiff.A.arr -> - ?var:Neuron.Optimise.Algodiff.A.arr -> - node -> - node
val reshape : ?name:string -> int array -> node -> node
val flatten : ?name:string -> node -> node
val slice : ?name:string -> int list list -> node -> node
val lambda : - ?name:string -> - ?act_typ:Neuron.Activation.typ -> - ?out_shape:int array -> - (Neuron.Optimise.Algodiff.t -> Neuron.Optimise.Algodiff.t) -> - node -> - node
val lambda_array : - ?name:string -> - ?act_typ:Neuron.Activation.typ -> - int array -> - (Neuron.Optimise.Algodiff.t array -> Neuron.Optimise.Algodiff.t) -> - node array -> - node
val add : ?name:string -> ?act_typ:Neuron.Activation.typ -> node array -> node
val mul : ?name:string -> ?act_typ:Neuron.Activation.typ -> node array -> node
val dot : ?name:string -> ?act_typ:Neuron.Activation.typ -> node array -> node
val max : ?name:string -> ?act_typ:Neuron.Activation.typ -> node array -> node
val average : - ?name:string -> - ?act_typ:Neuron.Activation.typ -> - node array -> - node
val concatenate : - ?name:string -> - ?act_typ:Neuron.Activation.typ -> - int -> - node array -> - node
val to_string : network -> string
val pp_network : Stdlib.Format.formatter -> network -> unit
val print : network -> unit
val save : ?unsafe:bool -> network -> string -> unit
val load : string -> network
val save_weights : network -> string -> unit
val load_weights : network -> string -> unit
val make_subnetwork : - ?copy:bool -> - ?make_inputs:string array -> - network -> - string array -> - network
\ No newline at end of file diff --git a/owl-base/Owl_neural_graph_sig/.dummy b/owl-base/Owl_neural_graph_sig/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Activation/index.html b/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Activation/index.html deleted file mode 100644 index 6adeb915c..000000000 --- a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Activation/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Activation (owl-base.Owl_neural_graph_sig.Sig.Neuron.Activation)

Module Neuron.Activation

type typ =
  1. | Elu
  2. | Relu
  3. | Sigmoid
  4. | HardSigmoid
  5. | Softmax of int
  6. | Softplus
  7. | Softsign
  8. | Tanh
  9. | Relu6
  10. | LeakyRelu of float
  11. | TRelu of float
  12. | Custom of Optimise.Algodiff.t -> Optimise.Algodiff.t
  13. | None
    (*

    Types of activation functions.

    *)
type neuron_typ = {
  1. mutable activation : typ;
  2. mutable in_shape : int array;
  3. mutable out_shape : int array;
}

Neuron type definition.

val create : typ -> neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val run_activation : Optimise.Algodiff.t -> typ -> Optimise.Algodiff.t

Run one specific activation function.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val activation_to_string : typ -> string

Return the name of a specific activation function.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Add/index.html b/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Add/index.html deleted file mode 100644 index 4d57a0bec..000000000 --- a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Add/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Add (owl-base.Owl_neural_graph_sig.Sig.Neuron.Add)

Module Neuron.Add

type neuron_typ = {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
}

Neuron type definition.

val create : unit -> neuron_typ

Create the neuron.

val connect : int array array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val copy : 'a -> neuron_typ

Make a deep copy of the neuron and its parameters.

val run : Optimise.Algodiff.t array -> 'a -> Optimise.Algodiff.t

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/AlphaDropout/index.html b/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/AlphaDropout/index.html deleted file mode 100644 index fe7e439c6..000000000 --- a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/AlphaDropout/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -AlphaDropout (owl-base.Owl_neural_graph_sig.Sig.Neuron.AlphaDropout)

Module Neuron.AlphaDropout

type neuron_typ = {
  1. mutable rate : float;
  2. mutable in_shape : int array;
  3. mutable out_shape : int array;
}

Neuron type definition.

val create : float -> neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Average/index.html b/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Average/index.html deleted file mode 100644 index ba90cccf3..000000000 --- a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Average/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Average (owl-base.Owl_neural_graph_sig.Sig.Neuron.Average)

Module Neuron.Average

type neuron_typ = {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
}

Neuron type definition.

val create : unit -> neuron_typ

Create the neuron.

val connect : int array array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val copy : 'a -> neuron_typ

Make a deep copy of the neuron and its parameters.

val run : Optimise.Algodiff.t array -> 'a -> Optimise.Algodiff.t

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/AvgPool1D/index.html b/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/AvgPool1D/index.html deleted file mode 100644 index 267c7dfff..000000000 --- a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/AvgPool1D/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -AvgPool1D (owl-base.Owl_neural_graph_sig.Sig.Neuron.AvgPool1D)

Module Neuron.AvgPool1D

type neuron_typ = {
  1. mutable padding : Owl_types.padding;
  2. mutable kernel : int array;
  3. mutable stride : int array;
  4. mutable in_shape : int array;
  5. mutable out_shape : int array;
}

Neuron type definition.

val create : Owl_types.padding -> int array -> int array -> neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/AvgPool2D/index.html b/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/AvgPool2D/index.html deleted file mode 100644 index 03683b616..000000000 --- a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/AvgPool2D/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -AvgPool2D (owl-base.Owl_neural_graph_sig.Sig.Neuron.AvgPool2D)

Module Neuron.AvgPool2D

type neuron_typ = {
  1. mutable padding : Owl_types.padding;
  2. mutable kernel : int array;
  3. mutable stride : int array;
  4. mutable in_shape : int array;
  5. mutable out_shape : int array;
}

Neuron type definition.

val create : Owl_types.padding -> int array -> int array -> neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Concatenate/index.html b/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Concatenate/index.html deleted file mode 100644 index 7f10769e4..000000000 --- a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Concatenate/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Concatenate (owl-base.Owl_neural_graph_sig.Sig.Neuron.Concatenate)

Module Neuron.Concatenate

type neuron_typ = {
  1. mutable axis : int;
  2. mutable in_shape : int array;
  3. mutable out_shape : int array;
}

Neuron type definition.

val create : int -> neuron_typ

Create the neuron.

val connect : int array array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Conv1D/index.html b/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Conv1D/index.html deleted file mode 100644 index 3eff861bc..000000000 --- a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Conv1D/index.html +++ /dev/null @@ -1,8 +0,0 @@ - -Conv1D (owl-base.Owl_neural_graph_sig.Sig.Neuron.Conv1D)

Module Neuron.Conv1D

type neuron_typ = {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable b : Optimise.Algodiff.t;
  3. mutable kernel : int array;
  4. mutable stride : int array;
  5. mutable padding : Owl_types.padding;
  6. mutable init_typ : Init.typ;
  7. mutable in_shape : int array;
  8. mutable out_shape : int array;
}

Neuron type definition.

val create : - ?inputs:int array -> - Owl_types.padding -> - int array -> - int array -> - Init.typ -> - neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val init : neuron_typ -> unit

Initialise the neuron and its parameters.

val reset : neuron_typ -> unit

Reset the parameters in a neuron.

val mktag : int -> neuron_typ -> unit

Tag the neuron, used by Algodiff module.

val mkpar : neuron_typ -> Optimise.Algodiff.t array

Assemble all the parameters in an array, used by Optimise module.

val mkpri : neuron_typ -> Optimise.Algodiff.t array

Assemble all the primial values in an array, used by Optimise module.

val mkadj : neuron_typ -> Optimise.Algodiff.t array

Assemble all the adjacent values in an array, used by Optimise module.

val update : neuron_typ -> Optimise.Algodiff.t array -> unit

Update parameters in a neuron, used by Optimise module.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Conv2D/index.html b/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Conv2D/index.html deleted file mode 100644 index 4cc5ba537..000000000 --- a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Conv2D/index.html +++ /dev/null @@ -1,8 +0,0 @@ - -Conv2D (owl-base.Owl_neural_graph_sig.Sig.Neuron.Conv2D)

Module Neuron.Conv2D

type neuron_typ = {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable b : Optimise.Algodiff.t;
  3. mutable kernel : int array;
  4. mutable stride : int array;
  5. mutable padding : Owl_types.padding;
  6. mutable init_typ : Init.typ;
  7. mutable in_shape : int array;
  8. mutable out_shape : int array;
}

Neuron type definition.

val create : - ?inputs:int array -> - Owl_types.padding -> - int array -> - int array -> - Init.typ -> - neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val init : neuron_typ -> unit

Initialise the neuron and its parameters.

val reset : neuron_typ -> unit

Reset the parameters in a neuron.

val mktag : int -> neuron_typ -> unit

Tag the neuron, used by Algodiff module.

val mkpar : neuron_typ -> Optimise.Algodiff.t array

Assemble all the parameters in an array, used by Optimise module.

val mkpri : neuron_typ -> Optimise.Algodiff.t array

Assemble all the primial values in an array, used by Optimise module.

val mkadj : neuron_typ -> Optimise.Algodiff.t array

Assemble all the adjacent values in an array, used by Optimise module.

val update : neuron_typ -> Optimise.Algodiff.t array -> unit

Update parameters in a neuron, used by Optimise module.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Conv3D/index.html b/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Conv3D/index.html deleted file mode 100644 index 99fc66ff4..000000000 --- a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Conv3D/index.html +++ /dev/null @@ -1,8 +0,0 @@ - -Conv3D (owl-base.Owl_neural_graph_sig.Sig.Neuron.Conv3D)

Module Neuron.Conv3D

type neuron_typ = {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable b : Optimise.Algodiff.t;
  3. mutable kernel : int array;
  4. mutable stride : int array;
  5. mutable padding : Owl_types.padding;
  6. mutable init_typ : Init.typ;
  7. mutable in_shape : int array;
  8. mutable out_shape : int array;
}

Neuron type definition.

val create : - ?inputs:int array -> - Owl_types.padding -> - int array -> - int array -> - Init.typ -> - neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val init : neuron_typ -> unit

Initialise the neuron and its parameters.

val reset : neuron_typ -> unit

Reset the parameters in a neuron.

val mktag : int -> neuron_typ -> unit

Tag the neuron, used by Algodiff module.

val mkpar : neuron_typ -> Optimise.Algodiff.t array

Assemble all the parameters in an array, used by Optimise module.

val mkpri : neuron_typ -> Optimise.Algodiff.t array

Assemble all the primial values in an array, used by Optimise module.

val mkadj : neuron_typ -> Optimise.Algodiff.t array

Assemble all the adjacent values in an array, used by Optimise module.

val update : neuron_typ -> Optimise.Algodiff.t array -> unit

Update parameters in a neuron, used by Optimise module.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/DilatedConv1D/index.html b/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/DilatedConv1D/index.html deleted file mode 100644 index 71d59816e..000000000 --- a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/DilatedConv1D/index.html +++ /dev/null @@ -1,9 +0,0 @@ - -DilatedConv1D (owl-base.Owl_neural_graph_sig.Sig.Neuron.DilatedConv1D)

Module Neuron.DilatedConv1D

type neuron_typ = {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable b : Optimise.Algodiff.t;
  3. mutable kernel : int array;
  4. mutable stride : int array;
  5. mutable rate : int array;
  6. mutable padding : Owl_types.padding;
  7. mutable init_typ : Init.typ;
  8. mutable in_shape : int array;
  9. mutable out_shape : int array;
}

Neuron type definition.

val create : - ?inputs:int array -> - Owl_types.padding -> - int array -> - int array -> - int array -> - Init.typ -> - neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val init : neuron_typ -> unit

Initialise the neuron and its parameters.

val reset : neuron_typ -> unit

Reset the parameters in a neuron.

val mktag : int -> neuron_typ -> unit

Tag the neuron, used by Algodiff module.

val mkpar : neuron_typ -> Optimise.Algodiff.t array

Assemble all the parameters in an array, used by Optimise module.

val mkpri : neuron_typ -> Optimise.Algodiff.t array

Assemble all the primial values in an array, used by Optimise module.

val mkadj : neuron_typ -> Optimise.Algodiff.t array

Assemble all the adjacent values in an array, used by Optimise module.

val update : neuron_typ -> Optimise.Algodiff.t array -> unit

Update parameters in a neuron, used by Optimise module.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/DilatedConv2D/index.html b/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/DilatedConv2D/index.html deleted file mode 100644 index 4ce533f95..000000000 --- a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/DilatedConv2D/index.html +++ /dev/null @@ -1,9 +0,0 @@ - -DilatedConv2D (owl-base.Owl_neural_graph_sig.Sig.Neuron.DilatedConv2D)

Module Neuron.DilatedConv2D

type neuron_typ = {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable b : Optimise.Algodiff.t;
  3. mutable kernel : int array;
  4. mutable stride : int array;
  5. mutable rate : int array;
  6. mutable padding : Owl_types.padding;
  7. mutable init_typ : Init.typ;
  8. mutable in_shape : int array;
  9. mutable out_shape : int array;
}

Neuron type definition.

val create : - ?inputs:int array -> - Owl_types.padding -> - int array -> - int array -> - int array -> - Init.typ -> - neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val init : neuron_typ -> unit

Initialise the neuron and its parameters.

val reset : neuron_typ -> unit

Reset the parameters in a neuron.

val mktag : int -> neuron_typ -> unit

Tag the neuron, used by Algodiff module.

val mkpar : neuron_typ -> Optimise.Algodiff.t array

Assemble all the parameters in an array, used by Optimise module.

val mkpri : neuron_typ -> Optimise.Algodiff.t array

Assemble all the primial values in an array, used by Optimise module.

val mkadj : neuron_typ -> Optimise.Algodiff.t array

Assemble all the adjacent values in an array, used by Optimise module.

val update : neuron_typ -> Optimise.Algodiff.t array -> unit

Update parameters in a neuron, used by Optimise module.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/DilatedConv3D/index.html b/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/DilatedConv3D/index.html deleted file mode 100644 index ed7fd0be4..000000000 --- a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/DilatedConv3D/index.html +++ /dev/null @@ -1,9 +0,0 @@ - -DilatedConv3D (owl-base.Owl_neural_graph_sig.Sig.Neuron.DilatedConv3D)

Module Neuron.DilatedConv3D

type neuron_typ = {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable b : Optimise.Algodiff.t;
  3. mutable kernel : int array;
  4. mutable stride : int array;
  5. mutable rate : int array;
  6. mutable padding : Owl_types.padding;
  7. mutable init_typ : Init.typ;
  8. mutable in_shape : int array;
  9. mutable out_shape : int array;
}

Neuron type definition.

val create : - ?inputs:int array -> - Owl_types.padding -> - int array -> - int array -> - int array -> - Init.typ -> - neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val init : neuron_typ -> unit

Initialise the neuron and its parameters.

val reset : neuron_typ -> unit

Reset the parameters in a neuron.

val mktag : int -> neuron_typ -> unit

Tag the neuron, used by Algodiff module.

val mkpar : neuron_typ -> Optimise.Algodiff.t array

Assemble all the parameters in an array, used by Optimise module.

val mkpri : neuron_typ -> Optimise.Algodiff.t array

Assemble all the primial values in an array, used by Optimise module.

val mkadj : neuron_typ -> Optimise.Algodiff.t array

Assemble all the adjacent values in an array, used by Optimise module.

val update : neuron_typ -> Optimise.Algodiff.t array -> unit

Update parameters in a neuron, used by Optimise module.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Dot/index.html b/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Dot/index.html deleted file mode 100644 index 3888ee708..000000000 --- a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Dot/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Dot (owl-base.Owl_neural_graph_sig.Sig.Neuron.Dot)

Module Neuron.Dot

type neuron_typ = {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
}

Neuron type definition.

val create : unit -> neuron_typ

Create the neuron.

val connect : int array array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val copy : 'a -> neuron_typ

Make a deep copy of the neuron and its parameters.

val run : Optimise.Algodiff.t array -> 'a -> Optimise.Algodiff.t

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Dropout/index.html b/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Dropout/index.html deleted file mode 100644 index a9676a6de..000000000 --- a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Dropout/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Dropout (owl-base.Owl_neural_graph_sig.Sig.Neuron.Dropout)

Module Neuron.Dropout

type neuron_typ = {
  1. mutable rate : float;
  2. mutable in_shape : int array;
  3. mutable out_shape : int array;
}

Neuron type definition.

val create : float -> neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Embedding/index.html b/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Embedding/index.html deleted file mode 100644 index ba86621c7..000000000 --- a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Embedding/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Embedding (owl-base.Owl_neural_graph_sig.Sig.Neuron.Embedding)

Module Neuron.Embedding

type neuron_typ = {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable init_typ : Init.typ;
  3. mutable in_dim : int;
  4. mutable in_shape : int array;
  5. mutable out_shape : int array;
}

Neuron type definition.

val create : ?inputs:int -> int -> int -> Init.typ -> neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val init : neuron_typ -> unit

Initialise the neuron and its parameters.

val reset : neuron_typ -> unit

Reset the parameters in a neuron.

val mktag : int -> neuron_typ -> unit

Tag the neuron, used by Algodiff module.

val mkpar : neuron_typ -> Optimise.Algodiff.t array

Assemble all the parameters in an array, used by Optimise module.

val mkpri : neuron_typ -> Optimise.Algodiff.t array

Assemble all the primial values in an array, used by Optimise module.

val mkadj : neuron_typ -> Optimise.Algodiff.t array

Assemble all the adjacent values in an array, used by Optimise module.

val update : neuron_typ -> Optimise.Algodiff.t array -> unit

Update parameters in a neuron, used by Optimise module.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Flatten/index.html b/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Flatten/index.html deleted file mode 100644 index f87984963..000000000 --- a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Flatten/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Flatten (owl-base.Owl_neural_graph_sig.Sig.Neuron.Flatten)

Module Neuron.Flatten

type neuron_typ = {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
}

Neuron type definition.

val create : unit -> neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val copy : 'a -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/FullyConnected/index.html b/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/FullyConnected/index.html deleted file mode 100644 index ea345c796..000000000 --- a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/FullyConnected/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -FullyConnected (owl-base.Owl_neural_graph_sig.Sig.Neuron.FullyConnected)

Module Neuron.FullyConnected

type neuron_typ = {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable b : Optimise.Algodiff.t;
  3. mutable init_typ : Init.typ;
  4. mutable in_shape : int array;
  5. mutable out_shape : int array;
}

Neuron type definition.

val create : ?inputs:int -> int -> Init.typ -> neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val init : neuron_typ -> unit

Initialise the neuron and its parameters.

val reset : neuron_typ -> unit

Reset the parameters in a neuron.

val mktag : int -> neuron_typ -> unit

Tag the neuron, used by Algodiff module.

val mkpar : neuron_typ -> Optimise.Algodiff.t array

Assemble all the parameters in an array, used by Optimise module.

val mkpri : neuron_typ -> Optimise.Algodiff.t array

Assemble all the primial values in an array, used by Optimise module.

val mkadj : neuron_typ -> Optimise.Algodiff.t array

Assemble all the adjacent values in an array, used by Optimise module.

val update : neuron_typ -> Optimise.Algodiff.t array -> unit

Update parameters in a neuron, used by Optimise module.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/GRU/index.html b/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/GRU/index.html deleted file mode 100644 index 559f34978..000000000 --- a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/GRU/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -GRU (owl-base.Owl_neural_graph_sig.Sig.Neuron.GRU)

Module Neuron.GRU

type neuron_typ = {
  1. mutable wxz : Optimise.Algodiff.t;
  2. mutable whz : Optimise.Algodiff.t;
  3. mutable wxr : Optimise.Algodiff.t;
  4. mutable whr : Optimise.Algodiff.t;
  5. mutable wxh : Optimise.Algodiff.t;
  6. mutable whh : Optimise.Algodiff.t;
  7. mutable bz : Optimise.Algodiff.t;
  8. mutable br : Optimise.Algodiff.t;
  9. mutable bh : Optimise.Algodiff.t;
  10. mutable h : Optimise.Algodiff.t;
  11. mutable init_typ : Init.typ;
  12. mutable in_shape : int array;
  13. mutable out_shape : int array;
}

Neuron type definition.

val create : ?time_steps:int -> ?inputs:int -> int -> Init.typ -> neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val init : neuron_typ -> unit

Initialise the neuron and its parameters.

val reset : neuron_typ -> unit

Reset the parameters in a neuron.

val mktag : int -> neuron_typ -> unit

Tag the neuron, used by Algodiff module.

val mkpar : neuron_typ -> Optimise.Algodiff.t array

Assemble all the parameters in an array, used by Optimise module.

val mkpri : neuron_typ -> Optimise.Algodiff.t array

Assemble all the primial values in an array, used by Optimise module.

val mkadj : neuron_typ -> Optimise.Algodiff.t array

Assemble all the adjacent values in an array, used by Optimise module.

val update : neuron_typ -> Optimise.Algodiff.t array -> unit

Update parameters in a neuron, used by Optimise module.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/GaussianDropout/index.html b/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/GaussianDropout/index.html deleted file mode 100644 index 8c11b9faa..000000000 --- a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/GaussianDropout/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -GaussianDropout (owl-base.Owl_neural_graph_sig.Sig.Neuron.GaussianDropout)

Module Neuron.GaussianDropout

type neuron_typ = {
  1. mutable rate : float;
  2. mutable in_shape : int array;
  3. mutable out_shape : int array;
}

Neuron type definition.

val create : float -> neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/GaussianNoise/index.html b/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/GaussianNoise/index.html deleted file mode 100644 index d39a3c3f2..000000000 --- a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/GaussianNoise/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -GaussianNoise (owl-base.Owl_neural_graph_sig.Sig.Neuron.GaussianNoise)

Module Neuron.GaussianNoise

type neuron_typ = {
  1. mutable sigma : float;
  2. mutable in_shape : int array;
  3. mutable out_shape : int array;
}

Neuron type definition.

val create : float -> neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/GlobalAvgPool1D/index.html b/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/GlobalAvgPool1D/index.html deleted file mode 100644 index 8857c9e0c..000000000 --- a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/GlobalAvgPool1D/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -GlobalAvgPool1D (owl-base.Owl_neural_graph_sig.Sig.Neuron.GlobalAvgPool1D)

Module Neuron.GlobalAvgPool1D

type neuron_typ = {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
}

Neuron type definition.

val create : unit -> neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val copy : 'a -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/GlobalAvgPool2D/index.html b/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/GlobalAvgPool2D/index.html deleted file mode 100644 index fb058f419..000000000 --- a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/GlobalAvgPool2D/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -GlobalAvgPool2D (owl-base.Owl_neural_graph_sig.Sig.Neuron.GlobalAvgPool2D)

Module Neuron.GlobalAvgPool2D

type neuron_typ = {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
}

Neuron type definition.

val create : unit -> neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val copy : 'a -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/GlobalMaxPool1D/index.html b/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/GlobalMaxPool1D/index.html deleted file mode 100644 index b52837edb..000000000 --- a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/GlobalMaxPool1D/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -GlobalMaxPool1D (owl-base.Owl_neural_graph_sig.Sig.Neuron.GlobalMaxPool1D)

Module Neuron.GlobalMaxPool1D

type neuron_typ = {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
}

Neuron type definition.

val create : unit -> neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val copy : 'a -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/GlobalMaxPool2D/index.html b/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/GlobalMaxPool2D/index.html deleted file mode 100644 index b3a5041d3..000000000 --- a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/GlobalMaxPool2D/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -GlobalMaxPool2D (owl-base.Owl_neural_graph_sig.Sig.Neuron.GlobalMaxPool2D)

Module Neuron.GlobalMaxPool2D

type neuron_typ = {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
}

Neuron type definition.

val create : unit -> neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val copy : 'a -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Init/index.html b/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Init/index.html deleted file mode 100644 index 324c1c13a..000000000 --- a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Init/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Init (owl-base.Owl_neural_graph_sig.Sig.Neuron.Init)

Module Neuron.Init

type typ =
  1. | Uniform of float * float
  2. | Gaussian of float * float
  3. | Standard
  4. | Tanh
  5. | GlorotNormal
  6. | GlorotUniform
  7. | LecunNormal
  8. | HeNormal
  9. | Custom of int array -> Optimise.Algodiff.t
    (*

    Initialisation types

    *)
val calc_fans : int array -> float * float

Calculate fan-in and fan-out of weights.

val run : typ -> int array -> Optimise.Algodiff.t -> Optimise.Algodiff.t

Execute the computation in this neuron.

val to_string : typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Input/index.html b/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Input/index.html deleted file mode 100644 index 589aacd34..000000000 --- a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Input/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Input (owl-base.Owl_neural_graph_sig.Sig.Neuron.Input)

Module Neuron.Input

type neuron_typ = {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
}

Neuron type definition.

val create : int array -> neuron_typ

Create the neuron.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/LSTM/index.html b/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/LSTM/index.html deleted file mode 100644 index 34622590c..000000000 --- a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/LSTM/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -LSTM (owl-base.Owl_neural_graph_sig.Sig.Neuron.LSTM)

Module Neuron.LSTM

type neuron_typ = {
  1. mutable wxi : Optimise.Algodiff.t;
  2. mutable whi : Optimise.Algodiff.t;
  3. mutable wxc : Optimise.Algodiff.t;
  4. mutable whc : Optimise.Algodiff.t;
  5. mutable wxf : Optimise.Algodiff.t;
  6. mutable whf : Optimise.Algodiff.t;
  7. mutable wxo : Optimise.Algodiff.t;
  8. mutable who : Optimise.Algodiff.t;
  9. mutable bi : Optimise.Algodiff.t;
  10. mutable bc : Optimise.Algodiff.t;
  11. mutable bf : Optimise.Algodiff.t;
  12. mutable bo : Optimise.Algodiff.t;
  13. mutable c : Optimise.Algodiff.t;
  14. mutable h : Optimise.Algodiff.t;
  15. mutable init_typ : Init.typ;
  16. mutable in_shape : int array;
  17. mutable out_shape : int array;
}

Neuron type definition.

val create : ?time_steps:int -> ?inputs:int -> int -> Init.typ -> neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val init : neuron_typ -> unit

Initialise the neuron and its parameters.

val reset : neuron_typ -> unit

Reset the parameters in a neuron.

val mktag : int -> neuron_typ -> unit

Tag the neuron, used by Algodiff module.

val mkpar : neuron_typ -> Optimise.Algodiff.t array

Assemble all the parameters in an array, used by Optimise module.

val mkpri : neuron_typ -> Optimise.Algodiff.t array

Assemble all the primial values in an array, used by Optimise module.

val mkadj : neuron_typ -> Optimise.Algodiff.t array

Assemble all the adjacent values in an array, used by Optimise module.

val update : neuron_typ -> Optimise.Algodiff.t array -> unit

Update parameters in a neuron, used by Optimise module.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Lambda/index.html b/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Lambda/index.html deleted file mode 100644 index 5bc3db86d..000000000 --- a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Lambda/index.html +++ /dev/null @@ -1,5 +0,0 @@ - -Lambda (owl-base.Owl_neural_graph_sig.Sig.Neuron.Lambda)

Module Neuron.Lambda

type neuron_typ = {
  1. mutable lambda : Optimise.Algodiff.t -> Optimise.Algodiff.t;
  2. mutable in_shape : int array;
  3. mutable out_shape : int array;
}

Neuron type definition.

val create : - ?out_shape:int array -> - (Optimise.Algodiff.t -> Optimise.Algodiff.t) -> - neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/LambdaArray/index.html b/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/LambdaArray/index.html deleted file mode 100644 index 68d64311d..000000000 --- a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/LambdaArray/index.html +++ /dev/null @@ -1,5 +0,0 @@ - -LambdaArray (owl-base.Owl_neural_graph_sig.Sig.Neuron.LambdaArray)

Module Neuron.LambdaArray

type neuron_typ = {
  1. mutable lambda : Optimise.Algodiff.t array -> Optimise.Algodiff.t;
  2. mutable in_shape : int array;
  3. mutable out_shape : int array;
}

Neuron type definition.

val create : - int array -> - (Optimise.Algodiff.t array -> Optimise.Algodiff.t) -> - neuron_typ

Create the neuron.

val connect : int array array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Linear/index.html b/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Linear/index.html deleted file mode 100644 index 95154a514..000000000 --- a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Linear/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Linear (owl-base.Owl_neural_graph_sig.Sig.Neuron.Linear)

Module Neuron.Linear

type neuron_typ = {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable b : Optimise.Algodiff.t;
  3. mutable init_typ : Init.typ;
  4. mutable in_shape : int array;
  5. mutable out_shape : int array;
}

Neuron type definition.

val create : ?inputs:int -> int -> Init.typ -> neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val init : neuron_typ -> unit

Initialise the neuron and its parameters.

val reset : neuron_typ -> unit

Reset the parameters in a neuron.

val mktag : int -> neuron_typ -> unit

Tag the neuron, used by Algodiff module.

val mkpar : neuron_typ -> Optimise.Algodiff.t array

Assemble all the parameters in an array, used by Optimise module.

val mkpri : neuron_typ -> Optimise.Algodiff.t array

Assemble all the primial values in an array, used by Optimise module.

val mkadj : neuron_typ -> Optimise.Algodiff.t array

Assemble all the adjacent values in an array, used by Optimise module.

val update : neuron_typ -> Optimise.Algodiff.t array -> unit

Update parameters in a neuron, used by Optimise module.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/LinearNoBias/index.html b/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/LinearNoBias/index.html deleted file mode 100644 index 024dbff88..000000000 --- a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/LinearNoBias/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -LinearNoBias (owl-base.Owl_neural_graph_sig.Sig.Neuron.LinearNoBias)

Module Neuron.LinearNoBias

type neuron_typ = {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable init_typ : Init.typ;
  3. mutable in_shape : int array;
  4. mutable out_shape : int array;
}

Neuron type definition.

val create : ?inputs:int -> int -> Init.typ -> neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val init : neuron_typ -> unit

Initialise the neuron and its parameters.

val reset : neuron_typ -> unit

Reset the parameters in a neuron.

val mktag : int -> neuron_typ -> unit

Tag the neuron, used by Algodiff module.

val mkpar : neuron_typ -> Optimise.Algodiff.t array

Assemble all the parameters in an array, used by Optimise module.

val mkpri : neuron_typ -> Optimise.Algodiff.t array

Assemble all the primial values in an array, used by Optimise module.

val mkadj : neuron_typ -> Optimise.Algodiff.t array

Assemble all the adjacent values in an array, used by Optimise module.

val update : neuron_typ -> Optimise.Algodiff.t array -> unit

Update parameters in a neuron, used by Optimise module.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Masking/index.html b/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Masking/index.html deleted file mode 100644 index b29bfd967..000000000 --- a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Masking/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Masking (owl-base.Owl_neural_graph_sig.Sig.Neuron.Masking)

Module Neuron.Masking

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Max/index.html b/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Max/index.html deleted file mode 100644 index d5a9c77e9..000000000 --- a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Max/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Max (owl-base.Owl_neural_graph_sig.Sig.Neuron.Max)

Module Neuron.Max

type neuron_typ = {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
}

Neuron type definition.

val create : unit -> neuron_typ

Create the neuron.

val connect : int array array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val copy : 'a -> neuron_typ

Make a deep copy of the neuron and its parameters.

val run : Optimise.Algodiff.t array -> 'a -> Optimise.Algodiff.t

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/MaxPool1D/index.html b/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/MaxPool1D/index.html deleted file mode 100644 index fa783611f..000000000 --- a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/MaxPool1D/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -MaxPool1D (owl-base.Owl_neural_graph_sig.Sig.Neuron.MaxPool1D)

Module Neuron.MaxPool1D

type neuron_typ = {
  1. mutable padding : Owl_types.padding;
  2. mutable kernel : int array;
  3. mutable stride : int array;
  4. mutable in_shape : int array;
  5. mutable out_shape : int array;
}

Neuron type definition.

val create : Owl_types.padding -> int array -> int array -> neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/MaxPool2D/index.html b/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/MaxPool2D/index.html deleted file mode 100644 index f136b6ec1..000000000 --- a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/MaxPool2D/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -MaxPool2D (owl-base.Owl_neural_graph_sig.Sig.Neuron.MaxPool2D)

Module Neuron.MaxPool2D

type neuron_typ = {
  1. mutable padding : Owl_types.padding;
  2. mutable kernel : int array;
  3. mutable stride : int array;
  4. mutable in_shape : int array;
  5. mutable out_shape : int array;
}

Neuron type definition.

val create : Owl_types.padding -> int array -> int array -> neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Mul/index.html b/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Mul/index.html deleted file mode 100644 index 47de4003f..000000000 --- a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Mul/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mul (owl-base.Owl_neural_graph_sig.Sig.Neuron.Mul)

Module Neuron.Mul

type neuron_typ = {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
}

Neuron type definition.

val create : unit -> neuron_typ

Create the neuron.

val connect : int array array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val copy : 'a -> neuron_typ

Make a deep copy of the neuron and its parameters.

val run : Optimise.Algodiff.t array -> 'a -> Optimise.Algodiff.t

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Normalisation/index.html b/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Normalisation/index.html deleted file mode 100644 index 474d3617a..000000000 --- a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Normalisation/index.html +++ /dev/null @@ -1,8 +0,0 @@ - -Normalisation (owl-base.Owl_neural_graph_sig.Sig.Neuron.Normalisation)

Module Neuron.Normalisation

type neuron_typ = {
  1. mutable axis : int;
  2. mutable beta : Optimise.Algodiff.t;
  3. mutable gamma : Optimise.Algodiff.t;
  4. mutable mu : Optimise.Algodiff.t;
  5. mutable var : Optimise.Algodiff.t;
  6. mutable decay : Optimise.Algodiff.t;
  7. mutable training : bool;
  8. mutable in_shape : int array;
  9. mutable out_shape : int array;
}

Neuron type definition.

val create : - ?training:bool -> - ?decay:float -> - ?mu:Optimise.Algodiff.A.arr -> - ?var:Optimise.Algodiff.A.arr -> - int -> - neuron_typ

Create the neuron. Note that axis 0 is the batch axis.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val init : neuron_typ -> unit

Initialise the neuron and its parameters.

val reset : neuron_typ -> unit

Reset the parameters in a neuron.

val mktag : int -> neuron_typ -> unit

Tag the neuron, used by Algodiff module.

val mkpar : neuron_typ -> Optimise.Algodiff.t array

Assemble all the trainable parameters in an array, used by Optimise module.

val mkpri : neuron_typ -> Optimise.Algodiff.t array

Assemble all the primial values in an array, used by Optimise module.

val mkadj : neuron_typ -> Optimise.Algodiff.t array

Assemble all the adjacent values in an array, used by Optimise module.

val update : neuron_typ -> Optimise.Algodiff.t array -> unit

Update trainable parameters of the neuron, used by Optimise module.

val load_weights : neuron_typ -> Optimise.Algodiff.t array -> unit

Load both trainable and non-trainable parameters into the neuron.

val save_weights : neuron_typ -> Optimise.Algodiff.t array

Assemble both trainable and non-trainable parameters of the neuron.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Optimise/Algodiff/A/Linalg/index.html b/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Optimise/Algodiff/A/Linalg/index.html deleted file mode 100644 index b30e710ed..000000000 --- a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Optimise/Algodiff/A/Linalg/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -Linalg (owl-base.Owl_neural_graph_sig.Sig.Neuron.Optimise.Algodiff.A.Linalg)

Module A.Linalg

val inv : arr -> arr
val logdet : arr -> elt
val chol : ?upper:bool -> arr -> arr
val svd : ?thin:bool -> arr -> arr * arr * arr
val qr : arr -> arr * arr
val lq : arr -> arr * arr
val sylvester : arr -> arr -> arr -> arr
val lyapunov : arr -> arr -> arr
val discrete_lyapunov : - ?solver:[ `default | `bilinear | `direct ] -> - arr -> - arr -> - arr
val linsolve : ?trans:bool -> ?typ:[ `n | `u | `l ] -> arr -> arr -> arr
val care : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
val dare : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
\ No newline at end of file diff --git a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Optimise/Algodiff/A/Mat/index.html b/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Optimise/Algodiff/A/Mat/index.html deleted file mode 100644 index 5a88de4ca..000000000 --- a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Optimise/Algodiff/A/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl-base.Owl_neural_graph_sig.Sig.Neuron.Optimise.Algodiff.A.Mat)

Module A.Mat

val diagm : ?k:int -> arr -> arr
val triu : ?k:int -> arr -> arr
val tril : ?k:int -> arr -> arr
val eye : int -> arr
\ No newline at end of file diff --git a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Optimise/Algodiff/A/Scalar/index.html b/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Optimise/Algodiff/A/Scalar/index.html deleted file mode 100644 index eefbeb73b..000000000 --- a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Optimise/Algodiff/A/Scalar/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Scalar (owl-base.Owl_neural_graph_sig.Sig.Neuron.Optimise.Algodiff.A.Scalar)

Module A.Scalar

val add : elt -> elt -> elt
val sub : elt -> elt -> elt
val mul : elt -> elt -> elt
val div : elt -> elt -> elt
val pow : elt -> elt -> elt
val atan2 : elt -> elt -> elt
val abs : elt -> elt
val neg : elt -> elt
val sqr : elt -> elt
val sqrt : elt -> elt
val exp : elt -> elt
val log : elt -> elt
val log2 : elt -> elt
val log10 : elt -> elt
val signum : elt -> elt
val floor : elt -> elt
val ceil : elt -> elt
val round : elt -> elt
val sin : elt -> elt
val cos : elt -> elt
val tan : elt -> elt
val sinh : elt -> elt
val cosh : elt -> elt
val tanh : elt -> elt
val asin : elt -> elt
val acos : elt -> elt
val atan : elt -> elt
val asinh : elt -> elt
val acosh : elt -> elt
val atanh : elt -> elt
val relu : elt -> elt
val dawsn : elt -> elt
val sigmoid : elt -> elt
\ No newline at end of file diff --git a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Optimise/Algodiff/A/index.html b/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Optimise/Algodiff/A/index.html deleted file mode 100644 index f4a273520..000000000 --- a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Optimise/Algodiff/A/index.html +++ /dev/null @@ -1,158 +0,0 @@ - -A (owl-base.Owl_neural_graph_sig.Sig.Neuron.Optimise.Algodiff.A)

Module Algodiff.A

include Owl_types_ndarray_eltcmp.Sig
include Owl_types_ndarray_basic.Sig
type arr
type elt
val empty : int array -> arr
val zeros : int array -> arr
val ones : int array -> arr
val create : int array -> elt -> arr
val sequential : ?a:elt -> ?step:elt -> int array -> arr
val uniform : ?a:elt -> ?b:elt -> int array -> arr
val gaussian : ?mu:elt -> ?sigma:elt -> int array -> arr
val bernoulli : ?p:elt -> int array -> arr
val init : int array -> (int -> elt) -> arr
val init_nd : int array -> (int array -> elt) -> arr
val shape : arr -> int array
val numel : arr -> int
val get : arr -> int array -> elt
val set : arr -> int array -> elt -> unit
val get_slice : int list list -> arr -> arr
val set_slice : int list list -> arr -> arr -> unit
val get_fancy : Owl_types_common.index list -> arr -> arr
val set_fancy : Owl_types_common.index list -> arr -> arr -> unit
val copy : arr -> arr
val copy_ : out:arr -> arr -> unit
val reset : arr -> unit
val reshape : arr -> int array -> arr
val reverse : arr -> arr
val tile : arr -> int array -> arr
val repeat : arr -> int array -> arr
val concatenate : ?axis:int -> arr array -> arr
val stack : ?axis:int -> arr array -> arr
val split : ?axis:int -> int array -> arr -> arr array
val expand : ?hi:bool -> arr -> int -> arr
val squeeze : ?axis:int array -> arr -> arr
val draw : ?axis:int -> arr -> int -> arr * int array
val map : (elt -> elt) -> arr -> arr
val fold : ?axis:int -> (elt -> elt -> elt) -> elt -> arr -> arr
val scan : ?axis:int -> (elt -> elt -> elt) -> arr -> arr
val one_hot : int -> arr -> arr
val pad : ?v:elt -> int list list -> arr -> arr
val print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(elt -> string) -> - arr -> - unit
val abs : arr -> arr
val neg : arr -> arr
val floor : arr -> arr
val ceil : arr -> arr
val round : arr -> arr
val sqr : arr -> arr
val sqrt : arr -> arr
val log : arr -> arr
val log2 : arr -> arr
val log10 : arr -> arr
val exp : arr -> arr
val sin : arr -> arr
val cos : arr -> arr
val tan : arr -> arr
val sinh : arr -> arr
val cosh : arr -> arr
val tanh : arr -> arr
val asin : arr -> arr
val acos : arr -> arr
val atan : arr -> arr
val asinh : arr -> arr
val acosh : arr -> arr
val atanh : arr -> arr
val min : ?axis:int -> ?keep_dims:bool -> arr -> arr
val max : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum_reduce : ?axis:int array -> arr -> arr
val signum : arr -> arr
val sigmoid : arr -> arr
val relu : arr -> arr
val dawsn : arr -> arr
val min' : arr -> elt
val max' : arr -> elt
val sum' : arr -> elt
val log_sum_exp' : arr -> elt
val log_sum_exp : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm' : arr -> elt
val l2norm' : arr -> elt
val l2norm_sqr' : arr -> elt
val clip_by_value : ?amin:elt -> ?amax:elt -> arr -> arr
val clip_by_l2norm : elt -> arr -> arr
val pow : arr -> arr -> arr
val scalar_pow : elt -> arr -> arr
val pow_scalar : arr -> elt -> arr
val atan2 : arr -> arr -> arr
val scalar_atan2 : elt -> arr -> arr
val atan2_scalar : arr -> elt -> arr
val add : arr -> arr -> arr
val sub : arr -> arr -> arr
val mul : arr -> arr -> arr
val div : arr -> arr -> arr
val add_scalar : arr -> elt -> arr
val sub_scalar : arr -> elt -> arr
val mul_scalar : arr -> elt -> arr
val div_scalar : arr -> elt -> arr
val scalar_add : elt -> arr -> arr
val scalar_sub : elt -> arr -> arr
val scalar_mul : elt -> arr -> arr
val scalar_div : elt -> arr -> arr
val fma : arr -> arr -> arr -> arr
val conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val dilated_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val transpose_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val max_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val upsampling2d : arr -> int array -> arr
val conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val dilated_conv1d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv1d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val transpose_conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val max_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val upsampling2d_backward : arr -> int array -> arr -> arr
val row_num : arr -> int
val col_num : arr -> int
val row : arr -> int -> arr
val rows : arr -> int array -> arr
val copy_row_to : arr -> arr -> int -> unit
val copy_col_to : arr -> arr -> int -> unit
val dot : arr -> arr -> arr
val diag : ?k:int -> arr -> arr
val trace : arr -> elt
val transpose : ?axis:int array -> arr -> arr
val to_rows : arr -> arr array
val of_rows : arr array -> arr
val to_cols : arr -> arr array
val of_cols : arr array -> arr
val of_array : elt array -> int array -> arr
val of_arrays : elt array array -> arr
val float_to_elt : float -> elt
val elt_to_float : elt -> float
val elt_equal : arr -> arr -> arr
val elt_not_equal : arr -> arr -> arr
val elt_less : arr -> arr -> arr
val elt_greater : arr -> arr -> arr
val elt_less_equal : arr -> arr -> arr
val elt_greater_equal : arr -> arr -> arr
val elt_equal_scalar : arr -> elt -> arr
val elt_not_equal_scalar : arr -> elt -> arr
val elt_less_scalar : arr -> elt -> arr
val elt_greater_scalar : arr -> elt -> arr
val elt_less_equal_scalar : arr -> elt -> arr
val elt_greater_equal_scalar : arr -> elt -> arr
module Scalar : sig ... end
module Mat : sig ... end
module Linalg : sig ... end
\ No newline at end of file diff --git a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Optimise/Algodiff/Arr/index.html b/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Optimise/Algodiff/Arr/index.html deleted file mode 100644 index 367666024..000000000 --- a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Optimise/Algodiff/Arr/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Arr (owl-base.Owl_neural_graph_sig.Sig.Neuron.Optimise.Algodiff.Arr)

Module Algodiff.Arr

val empty : int array -> t
val zeros : int array -> t
val ones : int array -> t
val uniform : ?a:A.elt -> ?b:A.elt -> int array -> t
val gaussian : ?mu:A.elt -> ?sigma:A.elt -> int array -> t
val shape : t -> int array
val numel : t -> int
val reset : t -> unit
val reshape : t -> int array -> t
val add : t -> t -> t
val sub : t -> t -> t
val mul : t -> t -> t
val div : t -> t -> t
val dot : t -> t -> t
\ No newline at end of file diff --git a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Optimise/Algodiff/Builder/index.html b/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Optimise/Algodiff/Builder/index.html deleted file mode 100644 index e0995c9f7..000000000 --- a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Optimise/Algodiff/Builder/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Builder (owl-base.Owl_neural_graph_sig.Sig.Neuron.Optimise.Algodiff.Builder)

Module Algodiff.Builder

Ops Builder
module type Siso = sig ... end
val build_siso : (module Siso) -> t -> t

build single input single output operations

module type Sipo = sig ... end
val build_sipo : (module Sipo) -> t -> t * t

build single input pair outputs operations

module type Sito = sig ... end
val build_sito : (module Sito) -> t -> t * t * t

build single input triple outputs operations

module type Siao = sig ... end
val build_siao : (module Siao) -> t -> t array

build single input array output operations

module type Piso = sig ... end
val build_piso : (module Piso) -> t -> t -> t

build pair inputs single output operations

module type Aiso = sig ... end
val build_aiso : (module Aiso) -> t array -> t

build array input single output operations

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Optimise/Algodiff/Builder/module-type-Aiso/index.html b/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Optimise/Algodiff/Builder/module-type-Aiso/index.html deleted file mode 100644 index 5e9d79629..000000000 --- a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Optimise/Algodiff/Builder/module-type-Aiso/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Aiso (owl-base.Owl_neural_graph_sig.Sig.Neuron.Optimise.Algodiff.Builder.Aiso)

Module type Builder.Aiso

val label : string
val ff : t array -> t
val df : int list -> t -> t array -> t array -> t
val dr : int list -> t array -> t -> t Stdlib.ref -> t list
\ No newline at end of file diff --git a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Optimise/Algodiff/Builder/module-type-Piso/index.html b/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Optimise/Algodiff/Builder/module-type-Piso/index.html deleted file mode 100644 index 49ed44a36..000000000 --- a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Optimise/Algodiff/Builder/module-type-Piso/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Piso (owl-base.Owl_neural_graph_sig.Sig.Neuron.Optimise.Algodiff.Builder.Piso)

Module type Builder.Piso

val label : string
val ff_aa : A.elt -> A.elt -> t
val ff_ab : A.elt -> A.arr -> t
val ff_ba : A.arr -> A.elt -> t
val ff_bb : A.arr -> A.arr -> t
val df_da : t -> t -> t -> t -> t
val df_db : t -> t -> t -> t -> t
val df_dab : t -> t -> t -> t -> t -> t
val dr_ab : t -> t -> t -> t Stdlib.ref -> t * t
val dr_a : t -> t -> t -> t Stdlib.ref -> t
val dr_b : t -> t -> t -> t Stdlib.ref -> t
\ No newline at end of file diff --git a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Optimise/Algodiff/Builder/module-type-Siao/index.html b/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Optimise/Algodiff/Builder/module-type-Siao/index.html deleted file mode 100644 index 71b497595..000000000 --- a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Optimise/Algodiff/Builder/module-type-Siao/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Siao (owl-base.Owl_neural_graph_sig.Sig.Neuron.Optimise.Algodiff.Builder.Siao)

Module type Builder.Siao

val label : string
val ff_f : A.elt -> t array
val ff_arr : A.arr -> t array
val df : t array -> t -> t -> t array
val dr : t -> t -> t Stdlib.ref array -> t Stdlib.ref array -> t
\ No newline at end of file diff --git a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Optimise/Algodiff/Builder/module-type-Sipo/index.html b/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Optimise/Algodiff/Builder/module-type-Sipo/index.html deleted file mode 100644 index f77df5444..000000000 --- a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Optimise/Algodiff/Builder/module-type-Sipo/index.html +++ /dev/null @@ -1,7 +0,0 @@ - -Sipo (owl-base.Owl_neural_graph_sig.Sig.Neuron.Optimise.Algodiff.Builder.Sipo)

Module type Builder.Sipo

val label : string
val ff_f : A.elt -> t * t
val ff_arr : A.arr -> t * t
val df : t -> t -> t -> t
val dr : - t -> - t -> - (t Stdlib.ref * t Stdlib.ref) -> - (t Stdlib.ref * t Stdlib.ref) -> - t
\ No newline at end of file diff --git a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Optimise/Algodiff/Builder/module-type-Siso/index.html b/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Optimise/Algodiff/Builder/module-type-Siso/index.html deleted file mode 100644 index cca7cf480..000000000 --- a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Optimise/Algodiff/Builder/module-type-Siso/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Siso (owl-base.Owl_neural_graph_sig.Sig.Neuron.Optimise.Algodiff.Builder.Siso)

Module type Builder.Siso

val label : string
val ff_f : A.elt -> t
val ff_arr : A.arr -> t
val df : t -> t -> t -> t
val dr : t -> t -> t Stdlib.ref -> t
\ No newline at end of file diff --git a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Optimise/Algodiff/Builder/module-type-Sito/index.html b/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Optimise/Algodiff/Builder/module-type-Sito/index.html deleted file mode 100644 index 061fa1723..000000000 --- a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Optimise/Algodiff/Builder/module-type-Sito/index.html +++ /dev/null @@ -1,7 +0,0 @@ - -Sito (owl-base.Owl_neural_graph_sig.Sig.Neuron.Optimise.Algodiff.Builder.Sito)

Module type Builder.Sito

val label : string
val ff_f : A.elt -> t * t * t
val ff_arr : A.arr -> t * t * t
val df : t -> t -> t -> t
val dr : - t -> - t -> - (t Stdlib.ref * t Stdlib.ref * t Stdlib.ref) -> - (t Stdlib.ref * t Stdlib.ref * t Stdlib.ref) -> - t
\ No newline at end of file diff --git a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Optimise/Algodiff/Linalg/index.html b/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Optimise/Algodiff/Linalg/index.html deleted file mode 100644 index 731ff1b0e..000000000 --- a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Optimise/Algodiff/Linalg/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -Linalg (owl-base.Owl_neural_graph_sig.Sig.Neuron.Optimise.Algodiff.Linalg)

Module Algodiff.Linalg

val inv : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val logdet : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val chol : ?upper:bool -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val qr : t -> t * t

Refer to :doc:`owl_dense_ndarray_generic`

val lq : t -> t * t

Refer to :doc:`owl_dense_ndarray_generic`

val svd : ?thin:bool -> t -> t * t * t

Refer to :doc:`owl_dense_ndarray_generic`

val sylvester : t -> t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val lyapunov : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val discrete_lyapunov : - ?solver:[ `default | `bilinear | `direct ] -> - t -> - t -> - t

Refer to :doc:`owl_dense_ndarray_generic`

val (/@) : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val linsolve : ?trans:bool -> ?typ:[ `n | `u | `l ] -> t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val care : ?diag_r:bool -> t -> t -> t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val dare : ?diag_r:bool -> t -> t -> t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Optimise/Algodiff/Mat/index.html b/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Optimise/Algodiff/Mat/index.html deleted file mode 100644 index e6332fbfb..000000000 --- a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Optimise/Algodiff/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl-base.Owl_neural_graph_sig.Sig.Neuron.Optimise.Algodiff.Mat)

Module Algodiff.Mat

val empty : int -> int -> t
val zeros : int -> int -> t
val eye : int -> t
val ones : int -> int -> t
val uniform : ?a:A.elt -> ?b:A.elt -> int -> int -> t
val gaussian : ?mu:A.elt -> ?sigma:A.elt -> int -> int -> t
val shape : t -> int * int
val numel : t -> int
val row_num : t -> int
val col_num : t -> int
val reset : t -> unit
val reshape : int -> int -> t -> t
val get : t -> int -> int -> t
val set : t -> int -> int -> t -> t
val row : t -> int -> t
val mean : t -> t
val add : t -> t -> t
val sub : t -> t -> t
val mul : t -> t -> t
val div : t -> t -> t
val dot : t -> t -> t
val map_by_row : (t -> t) -> t -> t
val of_arrays : A.elt array array -> t
val init_2d : int -> int -> (int -> int -> t) -> t
val print : t -> unit
\ No newline at end of file diff --git a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Optimise/Algodiff/Maths/index.html b/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Optimise/Algodiff/Maths/index.html deleted file mode 100644 index dd4241a73..000000000 --- a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Optimise/Algodiff/Maths/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Maths (owl-base.Owl_neural_graph_sig.Sig.Neuron.Optimise.Algodiff.Maths)

Module Algodiff.Maths

val (+) : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val (-) : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val (*) : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val (/) : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val (*@) : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val (**) : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val add : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val sub : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val mul : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val div : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val kron : t -> t -> t

Refer to :doc:`owl_dense_matrix_generic`

val dot : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val pow : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val atan2 : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val min2 : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val max2 : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val cross_entropy : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val inv : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val neg : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val abs : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val signum : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val floor : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val ceil : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val round : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val sqr : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val sqrt : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val log : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val log2 : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val log10 : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val exp : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val sin : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val cos : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val tan : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val sinh : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val cosh : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val tanh : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val asin : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val acos : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val atan : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val asinh : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val acosh : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val atanh : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val sum' : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val log_sum_exp' : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val log_sum_exp : ?axis:int -> ?keep_dims:bool -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val sum : ?axis:int -> ?keep_dims:bool -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val sum_reduce : ?axis:int array -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val mean : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val transpose : ?axis:int array -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val swap : int -> int -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val l1norm' : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val l2norm' : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val l2norm_sqr' : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val sigmoid : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val relu : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val dawsn : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val softplus : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val softsign : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val softmax : ?axis:int -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val reshape : t -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val flatten : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val get_item : t -> int -> int -> t

Refer to :doc:`owl_dense_ndarray_generic`

val get_row : t -> int -> t

Refer to :doc:`owl_dense_ndarray_generic`

val concat : axis:int -> t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val split : axis:int -> int array -> t -> t array

Refer to :doc:`owl_dense_ndarray_generic`

val of_arrays : t array array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val to_arrays : t -> t array array

Refer to :doc:`owl_dense_ndarray_generic`

val concatenate : axis:int -> t array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val stack : axis:int -> t array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val get_slice : int list list -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val set_slice : int list list -> t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val get_fancy : Owl_types.index list -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val set_fancy : Owl_types.index list -> t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val diag : ?k:int -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val diagm : ?k:int -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val trace : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val triu : ?k:int -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val tril : ?k:int -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Optimise/Algodiff/NN/index.html b/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Optimise/Algodiff/NN/index.html deleted file mode 100644 index 94eef9a29..000000000 --- a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Optimise/Algodiff/NN/index.html +++ /dev/null @@ -1,20 +0,0 @@ - -NN (owl-base.Owl_neural_graph_sig.Sig.Neuron.Optimise.Algodiff.NN)

Module Algodiff.NN

val dropout : ?rate:float -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val conv1d : ?padding:Owl_types.padding -> t -> t -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val conv2d : ?padding:Owl_types.padding -> t -> t -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val conv3d : ?padding:Owl_types.padding -> t -> t -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val dilated_conv1d : - ?padding:Owl_types.padding -> - t -> - t -> - int array -> - int array -> - t

Refer to :doc:`owl_dense_ndarray_generic`

val dilated_conv2d : - ?padding:Owl_types.padding -> - t -> - t -> - int array -> - int array -> - t

Refer to :doc:`owl_dense_ndarray_generic`

val dilated_conv3d : - ?padding:Owl_types.padding -> - t -> - t -> - int array -> - int array -> - t

Refer to :doc:`owl_dense_ndarray_generic`

val transpose_conv1d : ?padding:Owl_types.padding -> t -> t -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val transpose_conv2d : ?padding:Owl_types.padding -> t -> t -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val transpose_conv3d : ?padding:Owl_types.padding -> t -> t -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val max_pool1d : Owl_types.padding -> t -> int array -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val max_pool2d : Owl_types.padding -> t -> int array -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val max_pool3d : Owl_types.padding -> t -> int array -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val avg_pool1d : Owl_types.padding -> t -> int array -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val avg_pool2d : Owl_types.padding -> t -> int array -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val avg_pool3d : Owl_types.padding -> t -> int array -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val upsampling2d : t -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val pad : ?v:A.elt -> int list list -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Optimise/Algodiff/index.html b/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Optimise/Algodiff/index.html deleted file mode 100644 index f01178fe9..000000000 --- a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Optimise/Algodiff/index.html +++ /dev/null @@ -1,11 +0,0 @@ - -Algodiff (owl-base.Owl_neural_graph_sig.Sig.Neuron.Optimise.Algodiff)

Module Optimise.Algodiff

include Owl_algodiff_core_sig.Sig
Type definition
include Owl_algodiff_types_sig.Sig with type elt := A.elt and type arr := A.arr
type t =
  1. | F of A.elt
  2. | Arr of A.arr
  3. | DF of t * t * int
  4. | DR of t * t Stdlib.ref * op * int Stdlib.ref * int * int Stdlib.ref
and adjoint = t -> t Stdlib.ref -> (t * t) list -> (t * t) list
and register = t list -> t list
and label = string * t list
and op = adjoint * register * label
Core functions
val tag : unit -> int

TODO

val primal : t -> t

TODO

val primal' : t -> t

TODO

val zero : t -> t

TODO

val reset_zero : t -> t

TODO

val tangent : t -> t

TODO

val adjref : t -> t Stdlib.ref

TODO

val adjval : t -> t

TODO

val shape : t -> int array

TODO

val is_float : t -> bool

TODO

val is_arr : t -> bool

TODO

val row_num : t -> int

number of rows

val col_num : t -> int

number of columns

val numel : t -> int

number of elements

val clip_by_value : amin:A.elt -> amax:A.elt -> t -> t

other functions, without tracking gradient

val clip_by_l2norm : A.elt -> t -> t

other functions, without tracking gradient

val copy_primal' : t -> t

TODO

val tile : t -> int array -> t

TODO

val repeat : t -> int array -> t

TODO

val pack_elt : A.elt -> t

convert from elt type to t type.

val unpack_elt : t -> A.elt

convert from t type to elt type.

val pack_flt : float -> t

convert from float type to t type.

val _f : float -> t

A shortcut function for F A.(float_to_elt x).

val unpack_flt : t -> float

convert from t type to float type.

val pack_arr : A.arr -> t

convert from arr type to t type.

val unpack_arr : t -> A.arr

convert from t type to arr type.

val deep_info : t -> string

TODO

val type_info : t -> string

TODO

val error_binop : string -> t -> t -> 'a

TODO

val error_uniop : string -> t -> 'a

TODO

val make_forward : t -> t -> int -> t

TODO

val make_reverse : t -> int -> t

TODO

val reverse_prop : t -> t -> unit

TODO

val diff : (t -> t) -> t -> t

diff f x returns the exat derivative of a function f : scalar -> scalar at point x. Simply calling diff f will return its derivative function g of the same type, i.e. g : scalar -> scalar.

Keep calling this function will give you higher-order derivatives of f, i.e. f |> diff |> diff |> diff |> ...

val diff' : (t -> t) -> t -> t * t

similar to diff, but return (f x, diff f x).

val grad : (t -> t) -> t -> t

gradient of f : (vector -> scalar) at x, reverse ad.

val grad' : (t -> t) -> t -> t * t

similar to grad, but return (f x, grad f x).

val jacobian : (t -> t) -> t -> t

jacobian of f : (vector -> vector) at x, both x and y are row vectors.

val jacobian' : (t -> t) -> t -> t * t

similar to jacobian, but return (f x, jacobian f x)

val jacobianv : (t -> t) -> t -> t -> t

jacobian vector product of f : (vector -> vector) at x along v, forward ad. Namely, it calcultes (jacobian x) v

val jacobianv' : (t -> t) -> t -> t -> t * t

similar to jacobianv', but return (f x, jacobianv f x v)

val jacobianTv : (t -> t) -> t -> t -> t

transposed jacobian vector product of f : (vector -> vector) at x along v, backward ad. Namely, it calculates transpose ((jacobianv f x v)).

val jacobianTv' : (t -> t) -> t -> t -> t * t

similar to jacobianTv, but return (f x, transpose (jacobianv f x v))

val hessian : (t -> t) -> t -> t

hessian of f : (scalar -> scalar) at x.

val hessian' : (t -> t) -> t -> t * t

simiarl to hessian, but return (f x, hessian f x)

val hessianv : (t -> t) -> t -> t -> t

hessian vector product of f : (scalar -> scalar) at x along v. Namely, it calculates (hessian x) v.

val hessianv' : (t -> t) -> t -> t -> t * t

similar to hessianv, but return (f x, hessianv f x v).

val laplacian : (t -> t) -> t -> t

laplacian of f : (scalar -> scalar) at x.

val laplacian' : (t -> t) -> t -> t * t

similar to laplacian, but return (f x, laplacian f x).

val gradhessian : (t -> t) -> t -> t * t

return (grad f x, hessian f x), f : (scalar -> scalar)

val gradhessian' : (t -> t) -> t -> t * t * t

return (f x, grad f x, hessian f x)

val gradhessianv : (t -> t) -> t -> t -> t * t

return (grad f x v, hessian f x v)

val gradhessianv' : (t -> t) -> t -> t -> t * t * t

return (f x, grad f x v, hessian f x v)

include Owl_algodiff_ops_sig.Sig - with type t := t - and type elt := A.elt - and type arr := A.arr - and type op := op
module Builder : - Owl_algodiff_ops_builder_sig.Sig - with type t := t - and type elt := A.elt - and type arr := A.arr - and type op := op
Supported Maths functions
module Maths : sig ... end
Supported Linalg functions
module Linalg : sig ... end
Supported Neural Network functions
module NN : sig ... end
Supported Mat functions
module Mat : sig ... end
Supported Arr functions
module Arr : sig ... end
Helper functions
include Owl_algodiff_graph_convert_sig.Sig with type t := t
val to_trace : t list -> string

to_trace [t0; t1; ...] outputs the trace of computation graph on the terminal in a human-readable format.

val to_dot : t list -> string

to_dot [t0; t1; ...] outputs the trace of computation graph in the dot file format which you can use other tools further visualisation, such as Graphviz.

val pp_num : Stdlib.Format.formatter -> t -> unit

pp_num t pretty prints the abstract number used in Algodiff.

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Optimise/Batch/index.html b/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Optimise/Batch/index.html deleted file mode 100644 index 39967c4c9..000000000 --- a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Optimise/Batch/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Batch (owl-base.Owl_neural_graph_sig.Sig.Neuron.Optimise.Batch)

Module Optimise.Batch

Batch module

type typ =
  1. | Full
  2. | Mini of int
  3. | Sample of int
  4. | Stochastic

Types of batches.

val run : typ -> Algodiff.t -> Algodiff.t -> int -> Algodiff.t * Algodiff.t

Execute the computations defined in module typ.

val batches : typ -> Algodiff.t -> int

Return the total number of batches given a batch typ.

val to_string : typ -> string

Convert the module typ to its string representation.

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Optimise/Checkpoint/index.html b/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Optimise/Checkpoint/index.html deleted file mode 100644 index 8270d39f3..000000000 --- a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Optimise/Checkpoint/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Checkpoint (owl-base.Owl_neural_graph_sig.Sig.Neuron.Optimise.Checkpoint)

Module Optimise.Checkpoint

Checkpoint module

type state = {
  1. mutable current_batch : int;
  2. mutable batches_per_epoch : int;
  3. mutable epochs : float;
  4. mutable batches : int;
  5. mutable loss : Algodiff.t array;
  6. mutable start_at : float;
  7. mutable stop : bool;
  8. mutable gs : Algodiff.t array array;
  9. mutable ps : Algodiff.t array array;
  10. mutable us : Algodiff.t array array;
  11. mutable ch : Algodiff.t array array array;
}

Type definition of checkpoint

type typ =
  1. | Batch of int
  2. | Epoch of float
  3. | Custom of state -> unit
  4. | None

Batch type.

val init_state : int -> float -> state

init_state batches_per_epoch epochs initialises a state by specifying the number of batches per epoch and the number of epochs in total.

val default_checkpoint_fun : (string -> 'a) -> 'a

This function is used for saving intermediate files during optimisation.

val print_state_info : state -> unit

Print out the detail information of current state.

val print_summary : state -> unit

Print out the summary of current state.

val run : typ -> (string -> unit) -> int -> Algodiff.t -> state -> unit

Execute the computations defined in module typ.

val to_string : typ -> string

Convert the module typ to its string representation.

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Optimise/Clipping/index.html b/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Optimise/Clipping/index.html deleted file mode 100644 index e827dcbab..000000000 --- a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Optimise/Clipping/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Clipping (owl-base.Owl_neural_graph_sig.Sig.Neuron.Optimise.Clipping)

Module Optimise.Clipping

Clipping module

type typ =
  1. | L2norm of float
  2. | Value of float * float
  3. | None

Types of clipping functions.

val run : typ -> Algodiff.t -> Algodiff.t

Execute the computations defined in module typ.

val default : typ -> typ

Create module typ with default values.

val to_string : typ -> string

Convert the module typ to its string representation.

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Optimise/Gradient/index.html b/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Optimise/Gradient/index.html deleted file mode 100644 index deea4abf1..000000000 --- a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Optimise/Gradient/index.html +++ /dev/null @@ -1,9 +0,0 @@ - -Gradient (owl-base.Owl_neural_graph_sig.Sig.Neuron.Optimise.Gradient)

Module Optimise.Gradient

Gradient module

type typ =
  1. | GD
  2. | CG
  3. | CD
  4. | NonlinearCG
  5. | DaiYuanCG
  6. | NewtonCG
  7. | Newton

Types of gradient function.

val run : - typ -> - (Algodiff.t -> Algodiff.t) -> - Algodiff.t -> - Algodiff.t -> - Algodiff.t -> - Algodiff.t -> - Algodiff.t

Execute the computations defined in module typ.

val to_string : typ -> string

Convert the module typ to its string representation.

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Optimise/Learning_Rate/index.html b/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Optimise/Learning_Rate/index.html deleted file mode 100644 index ae881b877..000000000 --- a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Optimise/Learning_Rate/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Learning_Rate (owl-base.Owl_neural_graph_sig.Sig.Neuron.Optimise.Learning_Rate)

Module Optimise.Learning_Rate

Strategies for learning rate update

type typ =
  1. | Adagrad of float
  2. | Const of float
  3. | Decay of float * float
  4. | Exp_decay of float * float
  5. | RMSprop of float * float
  6. | Adam of float * float * float
  7. | Schedule of float array

Representation of learning rate update strategies. Possible values include:

  • Adam (alpha, beta1, beta2), see ref for parameter meaning
val run : typ -> int -> Algodiff.t -> Algodiff.t array -> Algodiff.t

Execute the computations defined in module typ.

val default : typ -> typ

Create module typ with default values.

val update_ch : typ -> Algodiff.t -> Algodiff.t array -> Algodiff.t array

Update the cache of gradients.

val to_string : typ -> string

Convert the module typ to its string representation.

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Optimise/Loss/index.html b/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Optimise/Loss/index.html deleted file mode 100644 index f5074cf6e..000000000 --- a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Optimise/Loss/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Loss (owl-base.Owl_neural_graph_sig.Sig.Neuron.Optimise.Loss)

Module Optimise.Loss

Loss module

type typ =
  1. | Hinge
  2. | L1norm
  3. | L2norm
  4. | Quadratic
  5. | Cross_entropy
  6. | Custom of Algodiff.t -> Algodiff.t -> Algodiff.t

Types of loss functions.

val run : typ -> Algodiff.t -> Algodiff.t -> Algodiff.t

Execute the computations defined in module typ.

val to_string : typ -> string

Convert the module typ to its string representation.

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Optimise/Momentum/index.html b/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Optimise/Momentum/index.html deleted file mode 100644 index 0571ca1e5..000000000 --- a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Optimise/Momentum/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Momentum (owl-base.Owl_neural_graph_sig.Sig.Neuron.Optimise.Momentum)

Module Optimise.Momentum

Momentum module

type typ =
  1. | Standard of float
  2. | Nesterov of float
  3. | None

Types of momentum functions.

val run : typ -> Algodiff.t -> Algodiff.t -> Algodiff.t

Execute the computations defined in module typ.

val default : typ -> typ

Create module typ with default values.

val to_string : typ -> string

Convert the module typ to its string representation.

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Optimise/Params/index.html b/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Optimise/Params/index.html deleted file mode 100644 index 34a53bc48..000000000 --- a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Optimise/Params/index.html +++ /dev/null @@ -1,14 +0,0 @@ - -Params (owl-base.Owl_neural_graph_sig.Sig.Neuron.Optimise.Params)

Module Optimise.Params

Params module

type typ = {
  1. mutable epochs : float;
  2. mutable batch : Batch.typ;
  3. mutable gradient : Gradient.typ;
  4. mutable loss : Loss.typ;
  5. mutable learning_rate : Learning_Rate.typ;
  6. mutable regularisation : Regularisation.typ;
  7. mutable momentum : Momentum.typ;
  8. mutable clipping : Clipping.typ;
  9. mutable stopping : Stopping.typ;
  10. mutable checkpoint : Checkpoint.typ;
  11. mutable verbosity : bool;
}

Type definition of parameter.

val default : unit -> typ

Create module typ with default values.

val config : - ?batch:Batch.typ -> - ?gradient:Gradient.typ -> - ?loss:Loss.typ -> - ?learning_rate:Learning_Rate.typ -> - ?regularisation:Regularisation.typ -> - ?momentum:Momentum.typ -> - ?clipping:Clipping.typ -> - ?stopping:Stopping.typ -> - ?checkpoint:Checkpoint.typ -> - ?verbosity:bool -> - float -> - typ

This function creates a parameter object with many configurations.

val to_string : typ -> string

Convert the module typ to its string representation.

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Optimise/Regularisation/index.html b/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Optimise/Regularisation/index.html deleted file mode 100644 index f7042d603..000000000 --- a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Optimise/Regularisation/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Regularisation (owl-base.Owl_neural_graph_sig.Sig.Neuron.Optimise.Regularisation)

Module Optimise.Regularisation

Regularisation module

type typ =
  1. | L1norm of float
  2. | L2norm of float
  3. | Elastic_net of float * float
  4. | None

Types of regularisation functions.

val run : typ -> Algodiff.t -> Algodiff.t

Execute the computations defined in module typ.

val to_string : typ -> string

Convert the module typ to its string representation.

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Optimise/Stopping/index.html b/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Optimise/Stopping/index.html deleted file mode 100644 index f3ea43dd5..000000000 --- a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Optimise/Stopping/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Stopping (owl-base.Owl_neural_graph_sig.Sig.Neuron.Optimise.Stopping)

Module Optimise.Stopping

Stopping module

type typ =
  1. | Const of float
  2. | Early of int * int
  3. | None

Types of stopping functions.

val run : typ -> float -> bool

Execute the computations defined in module typ.

val default : typ -> typ

Create module typ with default values.

val to_string : typ -> string

Convert the module typ to its string representation.

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Optimise/Utils/index.html b/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Optimise/Utils/index.html deleted file mode 100644 index 64c7d69e1..000000000 --- a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Optimise/Utils/index.html +++ /dev/null @@ -1,7 +0,0 @@ - -Utils (owl-base.Owl_neural_graph_sig.Sig.Neuron.Optimise.Utils)

Module Optimise.Utils

Utils module

val sample_num : Algodiff.t -> int

Return the total number of samples in passed in ndarray.

val draw_samples : Algodiff.t -> Algodiff.t -> int -> Algodiff.t * Algodiff.t

draw_samples x y draws samples from both x (observations) and y (labels). The samples will be drew along axis 0, so x and y must agree along axis 0.

val get_chunk : - Algodiff.t -> - Algodiff.t -> - int -> - int -> - Algodiff.t * Algodiff.t

get_chunk x y i c gets a continuous chunk of c samples from position i from x (observations) and y (labels).

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Optimise/index.html b/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Optimise/index.html deleted file mode 100644 index 79da077e5..000000000 --- a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Optimise/index.html +++ /dev/null @@ -1,31 +0,0 @@ - -Optimise (owl-base.Owl_neural_graph_sig.Sig.Neuron.Optimise)

Module Neuron.Optimise

module Utils : sig ... end

Utils module

module Learning_Rate : sig ... end

Strategies for learning rate update

module Batch : sig ... end

Batch module

module Loss : sig ... end

Loss module

module Gradient : sig ... end

Gradient module

module Momentum : sig ... end

Momentum module

module Regularisation : sig ... end

Regularisation module

module Clipping : sig ... end

Clipping module

module Stopping : sig ... end

Stopping module

module Checkpoint : sig ... end

Checkpoint module

module Params : sig ... end

Params module

Core functions
val minimise_weight : - ?state:Checkpoint.state -> - Params.typ -> - (Algodiff.t -> Algodiff.t -> Algodiff.t) -> - Algodiff.t -> - Algodiff.t -> - Algodiff.t -> - Checkpoint.state * Algodiff.t

This function minimises the weight w of passed-in function f.

* f is a function f : w -> x -> y. * w is a row vector but y can have any shape.

val minimise_network : - ?state:Checkpoint.state -> - Params.typ -> - (Algodiff.t -> Algodiff.t * Algodiff.t array array) -> - (Algodiff.t -> Algodiff.t array array * Algodiff.t array array) -> - (Algodiff.t array array -> unit) -> - (string -> unit) -> - Algodiff.t -> - Algodiff.t -> - Checkpoint.state

This function is specifically designed for minimising the weights in a neural network of graph structure. In Owl's earlier versions, the functions in the regression module were actually implemented using this function.

val minimise_fun : - ?state:Checkpoint.state -> - Params.typ -> - (Algodiff.t -> Algodiff.t) -> - Algodiff.t -> - Checkpoint.state * Algodiff.t

This function minimises f : x -> y w.r.t x.

x is an ndarray; and y is an scalar value.

val minimise_compiled_network : - ?state:Checkpoint.state -> - Params.typ -> - (Algodiff.t -> Algodiff.t -> Algodiff.t) -> - (unit -> unit) -> - (string -> unit) -> - Algodiff.t -> - Algodiff.t -> - Checkpoint.state

TODO

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Padding1D/index.html b/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Padding1D/index.html deleted file mode 100644 index 6e64d8935..000000000 --- a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Padding1D/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Padding1D (owl-base.Owl_neural_graph_sig.Sig.Neuron.Padding1D)

Module Neuron.Padding1D

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Padding2D/index.html b/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Padding2D/index.html deleted file mode 100644 index e2cfae798..000000000 --- a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Padding2D/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Padding2D (owl-base.Owl_neural_graph_sig.Sig.Neuron.Padding2D)

Module Neuron.Padding2D

type neuron_typ = {
  1. mutable padding : int array array;
  2. mutable in_shape : int array;
  3. mutable out_shape : int array;
}

Neuron type definition.

val create : int array array -> neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Padding3D/index.html b/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Padding3D/index.html deleted file mode 100644 index 6321d19e2..000000000 --- a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Padding3D/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Padding3D (owl-base.Owl_neural_graph_sig.Sig.Neuron.Padding3D)

Module Neuron.Padding3D

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Recurrent/index.html b/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Recurrent/index.html deleted file mode 100644 index 8cf3cb0ab..000000000 --- a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Recurrent/index.html +++ /dev/null @@ -1,9 +0,0 @@ - -Recurrent (owl-base.Owl_neural_graph_sig.Sig.Neuron.Recurrent)

Module Neuron.Recurrent

type neuron_typ = {
  1. mutable whh : Optimise.Algodiff.t;
  2. mutable wxh : Optimise.Algodiff.t;
  3. mutable why : Optimise.Algodiff.t;
  4. mutable bh : Optimise.Algodiff.t;
  5. mutable by : Optimise.Algodiff.t;
  6. mutable h : Optimise.Algodiff.t;
  7. mutable hiddens : int;
  8. mutable act : Activation.typ;
  9. mutable init_typ : Init.typ;
  10. mutable in_shape : int array;
  11. mutable out_shape : int array;
}

Neuron type definition.

val create : - ?time_steps:int -> - ?inputs:int -> - int -> - int -> - Activation.typ -> - Init.typ -> - neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val init : neuron_typ -> unit

Initialise the neuron and its parameters.

val reset : neuron_typ -> unit

Reset the parameters in a neuron.

val mktag : int -> neuron_typ -> unit

Tag the neuron, used by Algodiff module.

val mkpar : neuron_typ -> Optimise.Algodiff.t array

Assemble all the parameters in an array, used by Optimise module.

val mkpri : neuron_typ -> Optimise.Algodiff.t array

Assemble all the primial values in an array, used by Optimise module.

val mkadj : neuron_typ -> Optimise.Algodiff.t array

Assemble all the adjacent values in an array, used by Optimise module.

val update : neuron_typ -> Optimise.Algodiff.t array -> unit

Update parameters in a neuron, used by Optimise module.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Reshape/index.html b/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Reshape/index.html deleted file mode 100644 index f248169e9..000000000 --- a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Reshape/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Reshape (owl-base.Owl_neural_graph_sig.Sig.Neuron.Reshape)

Module Neuron.Reshape

type neuron_typ = {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
}

Neuron type definition.

val create : ?inputs:int array -> int array -> neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Slice/index.html b/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Slice/index.html deleted file mode 100644 index a49b45efa..000000000 --- a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/Slice/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Slice (owl-base.Owl_neural_graph_sig.Sig.Neuron.Slice)

Module Neuron.Slice

type neuron_typ = {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
  3. mutable slice : int list list;
}

Neuron type definition.

val create : int list list -> neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/TransposeConv1D/index.html b/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/TransposeConv1D/index.html deleted file mode 100644 index a58973534..000000000 --- a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/TransposeConv1D/index.html +++ /dev/null @@ -1,8 +0,0 @@ - -TransposeConv1D (owl-base.Owl_neural_graph_sig.Sig.Neuron.TransposeConv1D)

Module Neuron.TransposeConv1D

type neuron_typ = {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable b : Optimise.Algodiff.t;
  3. mutable kernel : int array;
  4. mutable stride : int array;
  5. mutable padding : Owl_types.padding;
  6. mutable init_typ : Init.typ;
  7. mutable in_shape : int array;
  8. mutable out_shape : int array;
}

Neuron type definition.

val create : - ?inputs:int array -> - Owl_types.padding -> - int array -> - int array -> - Init.typ -> - neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val init : neuron_typ -> unit

Initialise the neuron and its parameters.

val reset : neuron_typ -> unit

Reset the parameters in a neuron.

val mktag : int -> neuron_typ -> unit

Tag the neuron, used by Algodiff module.

val mkpar : neuron_typ -> Optimise.Algodiff.t array

Assemble all the parameters in an array, used by Optimise module.

val mkpri : neuron_typ -> Optimise.Algodiff.t array

Assemble all the primial values in an array, used by Optimise module.

val mkadj : neuron_typ -> Optimise.Algodiff.t array

Assemble all the adjacent values in an array, used by Optimise module.

val update : neuron_typ -> Optimise.Algodiff.t array -> unit

Update parameters in a neuron, used by Optimise module.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/TransposeConv2D/index.html b/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/TransposeConv2D/index.html deleted file mode 100644 index 05e888deb..000000000 --- a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/TransposeConv2D/index.html +++ /dev/null @@ -1,8 +0,0 @@ - -TransposeConv2D (owl-base.Owl_neural_graph_sig.Sig.Neuron.TransposeConv2D)

Module Neuron.TransposeConv2D

type neuron_typ = {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable b : Optimise.Algodiff.t;
  3. mutable kernel : int array;
  4. mutable stride : int array;
  5. mutable padding : Owl_types.padding;
  6. mutable init_typ : Init.typ;
  7. mutable in_shape : int array;
  8. mutable out_shape : int array;
}

Neuron type definition.

val create : - ?inputs:int array -> - Owl_types.padding -> - int array -> - int array -> - Init.typ -> - neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val init : neuron_typ -> unit

Initialise the neuron and its parameters.

val reset : neuron_typ -> unit

Reset the parameters in a neuron.

val mktag : int -> neuron_typ -> unit

Tag the neuron, used by Algodiff module.

val mkpar : neuron_typ -> Optimise.Algodiff.t array

Assemble all the parameters in an array, used by Optimise module.

val mkpri : neuron_typ -> Optimise.Algodiff.t array

Assemble all the primial values in an array, used by Optimise module.

val mkadj : neuron_typ -> Optimise.Algodiff.t array

Assemble all the adjacent values in an array, used by Optimise module.

val update : neuron_typ -> Optimise.Algodiff.t array -> unit

Update parameters in a neuron, used by Optimise module.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/TransposeConv3D/index.html b/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/TransposeConv3D/index.html deleted file mode 100644 index 878be20fd..000000000 --- a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/TransposeConv3D/index.html +++ /dev/null @@ -1,8 +0,0 @@ - -TransposeConv3D (owl-base.Owl_neural_graph_sig.Sig.Neuron.TransposeConv3D)

Module Neuron.TransposeConv3D

type neuron_typ = {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable b : Optimise.Algodiff.t;
  3. mutable kernel : int array;
  4. mutable stride : int array;
  5. mutable padding : Owl_types.padding;
  6. mutable init_typ : Init.typ;
  7. mutable in_shape : int array;
  8. mutable out_shape : int array;
}

Neuron type definition.

val create : - ?inputs:int array -> - Owl_types.padding -> - int array -> - int array -> - Init.typ -> - neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val init : neuron_typ -> unit

Initialise the neuron and its parameters.

val reset : neuron_typ -> unit

Reset the parameters in a neuron.

val mktag : int -> neuron_typ -> unit

Tag the neuron, used by Algodiff module.

val mkpar : neuron_typ -> Optimise.Algodiff.t array

Assemble all the parameters in an array, used by Optimise module.

val mkpri : neuron_typ -> Optimise.Algodiff.t array

Assemble all the primial values in an array, used by Optimise module.

val mkadj : neuron_typ -> Optimise.Algodiff.t array

Assemble all the adjacent values in an array, used by Optimise module.

val update : neuron_typ -> Optimise.Algodiff.t array -> unit

Update parameters in a neuron, used by Optimise module.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/UpSampling1D/index.html b/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/UpSampling1D/index.html deleted file mode 100644 index 36d3e741d..000000000 --- a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/UpSampling1D/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -UpSampling1D (owl-base.Owl_neural_graph_sig.Sig.Neuron.UpSampling1D)

Module Neuron.UpSampling1D

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/UpSampling2D/index.html b/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/UpSampling2D/index.html deleted file mode 100644 index e9d22c979..000000000 --- a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/UpSampling2D/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -UpSampling2D (owl-base.Owl_neural_graph_sig.Sig.Neuron.UpSampling2D)

Module Neuron.UpSampling2D

type neuron_typ = {
  1. mutable size : int array;
  2. mutable in_shape : int array;
  3. mutable out_shape : int array;
}

Neuron type definition.

val create : int array -> neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/UpSampling3D/index.html b/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/UpSampling3D/index.html deleted file mode 100644 index 98a101628..000000000 --- a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/UpSampling3D/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -UpSampling3D (owl-base.Owl_neural_graph_sig.Sig.Neuron.UpSampling3D)

Module Neuron.UpSampling3D

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/index.html b/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/index.html deleted file mode 100644 index ec09aeacc..000000000 --- a/owl-base/Owl_neural_graph_sig/module-type-Sig/Neuron/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Neuron (owl-base.Owl_neural_graph_sig.Sig.Neuron)

Module Sig.Neuron

Init neuron
module Init : sig ... end
Input neuron
module Input : sig ... end
Activation neuron
module Activation : sig ... end
Linear neuron
module Linear : sig ... end
LinearNoBias neuron
module LinearNoBias : sig ... end
Recurrent neuron
module Recurrent : sig ... end
LSTM neuron
module LSTM : sig ... end
GRU neuron
module GRU : sig ... end
Conv1D neuron
module Conv1D : sig ... end
Conv2D neuron
module Conv2D : sig ... end
Conv3D neuron
module Conv3D : sig ... end
DilatedConv1D neuron
module DilatedConv1D : sig ... end
DilatedConv2D neuron
module DilatedConv2D : sig ... end
DilatedConv3D neuron
module DilatedConv3D : sig ... end
TransposeConv1D neuron
module TransposeConv1D : sig ... end
TransposeConv2D neuron
module TransposeConv2D : sig ... end
TransposeConv3D neuron
module TransposeConv3D : sig ... end
FullyConnected neuron
module FullyConnected : sig ... end
MaxPool1D neuron
module MaxPool1D : sig ... end
MaxPool2D neuron
module MaxPool2D : sig ... end
AvgPool1D neuron
module AvgPool1D : sig ... end
AvgPool2D neuron
module AvgPool2D : sig ... end
GlobalMaxPool1D neuron
module GlobalMaxPool1D : sig ... end
GlobalMaxPool2D neuron
module GlobalMaxPool2D : sig ... end
GlobalAvgPool1D neuron
module GlobalAvgPool1D : sig ... end
GlobalAvgPool2D neuron
module GlobalAvgPool2D : sig ... end
UpSampling1D neuron
module UpSampling1D : sig ... end
UpSampling2D neuron
module UpSampling2D : sig ... end
UpSampling3D neuron
module UpSampling3D : sig ... end
Padding1D neuron
module Padding1D : sig ... end
Padding2D neuron
module Padding2D : sig ... end
Padding3D neuron
module Padding3D : sig ... end
Lambda neuron
module Lambda : sig ... end
LambdaArray neuron
module LambdaArray : sig ... end
Dropout neuron
module Dropout : sig ... end
Reshape neuron
module Reshape : sig ... end
Flatten neuron
module Flatten : sig ... end
Slice neuron
module Slice : sig ... end
Add neuron
module Add : sig ... end
Mul neuron
module Mul : sig ... end
Dot neuron
module Dot : sig ... end
Max neuron
module Max : sig ... end
Average neuron
module Average : sig ... end
Concatenate neuron
module Concatenate : sig ... end
Normalisation neuron
module Normalisation : sig ... end
GaussianNoise neuron
module GaussianNoise : sig ... end
GaussianDropout neuron
module GaussianDropout : sig ... end
AlphaDropout neuron
module AlphaDropout : sig ... end
Embedding neuron
module Embedding : sig ... end
Masking neuron
module Masking : sig ... end
Core functions
type neuron =
  1. | Input of Input.neuron_typ
  2. | Linear of Linear.neuron_typ
  3. | LinearNoBias of LinearNoBias.neuron_typ
  4. | Embedding of Embedding.neuron_typ
  5. | LSTM of LSTM.neuron_typ
  6. | GRU of GRU.neuron_typ
  7. | Recurrent of Recurrent.neuron_typ
  8. | Conv1D of Conv1D.neuron_typ
  9. | Conv2D of Conv2D.neuron_typ
  10. | Conv3D of Conv3D.neuron_typ
  11. | DilatedConv1D of DilatedConv1D.neuron_typ
  12. | DilatedConv2D of DilatedConv2D.neuron_typ
  13. | DilatedConv3D of DilatedConv3D.neuron_typ
  14. | TransposeConv1D of TransposeConv1D.neuron_typ
  15. | TransposeConv2D of TransposeConv2D.neuron_typ
  16. | TransposeConv3D of TransposeConv3D.neuron_typ
  17. | FullyConnected of FullyConnected.neuron_typ
  18. | MaxPool1D of MaxPool1D.neuron_typ
  19. | MaxPool2D of MaxPool2D.neuron_typ
  20. | AvgPool1D of AvgPool1D.neuron_typ
  21. | AvgPool2D of AvgPool2D.neuron_typ
  22. | GlobalMaxPool1D of GlobalMaxPool1D.neuron_typ
  23. | GlobalMaxPool2D of GlobalMaxPool2D.neuron_typ
  24. | GlobalAvgPool1D of GlobalAvgPool1D.neuron_typ
  25. | GlobalAvgPool2D of GlobalAvgPool2D.neuron_typ
  26. | UpSampling2D of UpSampling2D.neuron_typ
  27. | Padding2D of Padding2D.neuron_typ
  28. | Dropout of Dropout.neuron_typ
  29. | Reshape of Reshape.neuron_typ
  30. | Flatten of Flatten.neuron_typ
  31. | Slice of Slice.neuron_typ
  32. | Lambda of Lambda.neuron_typ
  33. | LambdaArray of LambdaArray.neuron_typ
  34. | Activation of Activation.neuron_typ
  35. | GaussianNoise of GaussianNoise.neuron_typ
  36. | GaussianDropout of GaussianDropout.neuron_typ
  37. | AlphaDropout of AlphaDropout.neuron_typ
  38. | Normalisation of Normalisation.neuron_typ
  39. | Add of Add.neuron_typ
  40. | Mul of Mul.neuron_typ
  41. | Dot of Dot.neuron_typ
  42. | Max of Max.neuron_typ
  43. | Average of Average.neuron_typ
  44. | Concatenate of Concatenate.neuron_typ
    (*

    Types of neuron.

    *)
val get_in_out_shape : neuron -> int array * int array

Get both input and output shapes of a neuron.

val get_in_shape : neuron -> int array

Get the input shape of a neuron.

val get_out_shape : neuron -> int array

Get the output shape of a neuron.

val connect : int array array -> neuron -> unit

Connect this neuron to others in a neural network.

val init : neuron -> unit

Initialise the neuron and its parameters.

val reset : neuron -> unit

Reset the parameters in a neuron.

val mktag : int -> neuron -> unit

Tag the neuron, used by Algodiff module.

val mkpar : neuron -> Optimise.Algodiff.t array

Assemble all the trainable parameters in an array, used by Optimise module.

val mkpri : neuron -> Optimise.Algodiff.t array

Assemble all the primal values in an array, used by Optimise module.

val mkadj : neuron -> Optimise.Algodiff.t array

Assemble all the adjacent values in an array, used by Optimise module.

val update : neuron -> Optimise.Algodiff.t array -> unit

Update trainable parameters in a neuron, used by Optimise module.

val load_weights : neuron -> Optimise.Algodiff.t array -> unit

Load both trainable and non-trainable parameters into the neuron.

val save_weights : neuron -> Optimise.Algodiff.t array

Assemble both trainable and non-trainable parameters of the neuron.

val copy : neuron -> neuron

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : neuron -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_graph_sig/module-type-Sig/index.html b/owl-base/Owl_neural_graph_sig/module-type-Sig/index.html deleted file mode 100644 index d3fb0d129..000000000 --- a/owl-base/Owl_neural_graph_sig/module-type-Sig/index.html +++ /dev/null @@ -1,243 +0,0 @@ - -Sig (owl-base.Owl_neural_graph_sig.Sig)

Module type Owl_neural_graph_sig.Sig

Type definition
type node = {
  1. mutable name : string;
  2. mutable prev : node array;
  3. mutable next : node array;
  4. mutable neuron : Neuron.neuron;
  5. mutable output : Neuron.Optimise.Algodiff.t option;
  6. mutable network : network;
  7. mutable train : bool;
}
and network = {
  1. mutable nnid : string;
  2. mutable size : int;
  3. mutable roots : node array;
  4. mutable outputs : node array;
  5. mutable topo : node array;
}

Type definition of a node and a neural network.

Manipulate networks
val make_network : ?nnid:string -> int -> node array -> node array -> network

Create an empty neural network.

val make_node : - ?name:string -> - ?train:bool -> - node array -> - node array -> - Neuron.neuron -> - Neuron.Optimise.Algodiff.t option -> - network -> - node

Create a node in a neural network.

val get_roots : network -> node array

Get the roots of the neural network.

val get_outputs : network -> node array

Get the outputs of the neural network.

val get_node : network -> string -> node

Get a node in a network with the given name.

val get_network : ?name:string -> node -> network

Get the neural network of a given node associated with.

val outputs : ?name:string -> node array -> network

Get the neural network associated with the given output nodes.

val get_network_name : network -> string

get_network_name n returns the name of the network n.

val set_network_name : network -> string -> unit

set_network_name n s sets the name of the network n to s.

val collect_output : node array -> Neuron.Optimise.Algodiff.t array

Collect the output values of given nodes.

val connect_pair : node -> node -> unit

Connect two nodes in a neural network.

val connect_to_parents : node array -> node -> unit

Connect a node to a list of parents.

val add_node : - ?act_typ:Neuron.Activation.typ -> - network -> - node array -> - node -> - node

Add a node to the given network.

val input_shape : network -> int array

Get input shape of a network (without batch dimension), i.e. shape of input neuron.

val input_shapes : network -> int array array

Get input shapes of a network (without batch dimension), i.e. shape of input neurons.

Interface to optimisation engine
val init : network -> unit

Initialise the network.

val reset : network -> unit

Reset the network, i.e. all the parameters in the neurons.

val mktag : int -> network -> unit

Tag the neurons, used by Algodiff module.

val mkpar : network -> Neuron.Optimise.Algodiff.t array array

Collect the parameters of neurons, used by Optimise module.

val mkpri : network -> Neuron.Optimise.Algodiff.t array array

Collect the primal values of neurons, used by Optimise module.

val mkadj : network -> Neuron.Optimise.Algodiff.t array array

Collect the adjacent values of neurons, used by Optimise module.

val update : network -> Neuron.Optimise.Algodiff.t array array -> unit

Update the parameters of neurons, used by Optimise module.

Execute the computations in all the neurons in a network with the given input.

val run_inputs : - Neuron.Optimise.Algodiff.t array -> - network -> - Neuron.Optimise.Algodiff.t array

Execute the computations in all the neurons in a network with the given inputs.

Run the forward pass of a network.

val forward_inputs : - network -> - Neuron.Optimise.Algodiff.t array -> - Neuron.Optimise.Algodiff.t array * Neuron.Optimise.Algodiff.t array array

Run the forward pass of a network (multi-input/output version).

val backward : - network -> - Neuron.Optimise.Algodiff.t -> - Neuron.Optimise.Algodiff.t array array - * Neuron.Optimise.Algodiff.t array array

Run the backward pass of a network.

val copy : network -> network

Make a deep copy of the given network.

Make a deep copy of the given network, excluding the neurons marked with training = true.

val model_inputs : - network -> - Neuron.Optimise.Algodiff.A.arr array -> - Neuron.Optimise.Algodiff.A.arr array

Make a deep copy of the given network, excluding the neurons marked with training = true.

Create Neurons
val input : ?name:string -> int array -> node

input shape creates an input node for input data. Note that if your network has multiple inputs, you should use inputs instead.

Arguments: * shape: shape of input data.

val inputs : ?names:string array -> int array array -> node array

input shapes creates an array of input nodes for input data.

Arguments: * shapes: array of shapes of input data.

val activation : ?name:string -> Neuron.Activation.typ -> node -> node

Applies an activation function to an output.

Arguments: * activation: name of activation function to use.

val linear : - ?name:string -> - ?init_typ:Neuron.Init.typ -> - ?act_typ:Neuron.Activation.typ -> - int -> - node -> - node

linear ?act_typ units node adds the regular densely-connected NN node to node.

Arguments: * units: Positive integer, dimensionality of the output space. * act_typ: Activation function to use.

val linear_nobias : - ?name:string -> - ?init_typ:Neuron.Init.typ -> - ?act_typ:Neuron.Activation.typ -> - int -> - node -> - node

Similar to linear, but does not use the bias vector.

val embedding : - ?name:string -> - ?init_typ:Neuron.Init.typ -> - ?act_typ:Neuron.Activation.typ -> - int -> - int -> - node -> - node

Create a node for embedding neuron.

val recurrent : - ?name:string -> - ?init_typ:Neuron.Init.typ -> - act_typ:Neuron.Activation.typ -> - int -> - int -> - node -> - node

Create a node for recurrent neuron.

val lstm : ?name:string -> ?init_typ:Neuron.Init.typ -> int -> node -> node

lstm units node adds a LSTM node on previous node.

Arguments: * units: Positive integer, dimensionality of the output space.

val gru : ?name:string -> ?init_typ:Neuron.Init.typ -> int -> node -> node

gru units node adds a Gated Recurrent Unit node on previous node.

Arguments: * units: Positive integer, dimensionality of the output space.

val conv1d : - ?name:string -> - ?padding:Owl_types.padding -> - ?init_typ:Neuron.Init.typ -> - ?act_typ:Neuron.Activation.typ -> - int array -> - int array -> - node -> - node

conv1d kernel stride node adds a 1D convolution node (e.g. temporal convolution) on previous node.

Arguments: * kernel: int array consists of h, i, o. h specifies the dimension of the 1D convolution window. i and o are the dimensionalities of the input and output space. * stride: int array of 1 integer.

val conv2d : - ?name:string -> - ?padding:Owl_types.padding -> - ?init_typ:Neuron.Init.typ -> - ?act_typ:Neuron.Activation.typ -> - int array -> - int array -> - node -> - node

conv2d kernel stride node adds a 2D convolution node (e.g. spatial convolution over images) on previous node.

Arguments: * kernel: int array consists of w, h, i, o. w and h specify the width and height of the 2D convolution window. i and o are the dimensionality of the input and output space. * stride: int array of 2 integers.

val conv3d : - ?name:string -> - ?padding:Owl_types.padding -> - ?init_typ:Neuron.Init.typ -> - ?act_typ:Neuron.Activation.typ -> - int array -> - int array -> - node -> - node

conv3d kernel stride node adds a 3D convolution node (e.g. spatial convolution over volumes) on previous node.

Arguments: * kernel: int array consists of w, h, d, i, o. w, h, and d specify the 3 dimensionality of the 3D convolution window. i and o are the dimensionality of the input and output space. * stride: int array of 3 integers.

val dilated_conv1d : - ?name:string -> - ?padding:Owl_types.padding -> - ?init_typ:Neuron.Init.typ -> - ?act_typ:Neuron.Activation.typ -> - int array -> - int array -> - int array -> - node -> - node

dilated_conv1d kernel stride rate node adds a 1D dilated convolution node (e.g. temporal convolution) on previous node.

Arguments: * kernel: int array consists of h, i, o. h specifies the dimension of the 1D convolution window. i and o are the dimensionalities of the input and output space. * stride: int array of 1 integer. * rate: int array of 1 integer.

val dilated_conv2d : - ?name:string -> - ?padding:Owl_types.padding -> - ?init_typ:Neuron.Init.typ -> - ?act_typ:Neuron.Activation.typ -> - int array -> - int array -> - int array -> - node -> - node

dilated_conv2d kernel stride rate node adds a 2D dilated convolution node (e.g. spatial convolution over images) on previous node.

Arguments: * kernel`: int array consists of w, h, i, o. w and h specify the width and height of the 2D convolution window. i and o`` are the dimensionality of the input and output space. * stride: int array of 2 integers. * rate: int array of 2 integers.

val dilated_conv3d : - ?name:string -> - ?padding:Owl_types.padding -> - ?init_typ:Neuron.Init.typ -> - ?act_typ:Neuron.Activation.typ -> - int array -> - int array -> - int array -> - node -> - node

dilated_conv3d kernel stride rate node adds a 3D dilated convolution node (e.g. spatial convolution over volumes) on previous node.

Arguments: * kernel: int array consists of w, h, d, i, o. w, h, and d specify the 3 dimensionality of the 3D convolution window. i and o are the dimensionality of the input and output space. * stride: int array of 3 integers. * rate: int array of 3 integers.

val transpose_conv1d : - ?name:string -> - ?padding:Owl_types.padding -> - ?init_typ:Neuron.Init.typ -> - ?act_typ:Neuron.Activation.typ -> - int array -> - int array -> - node -> - node

transpose_conv1d kernel stride node adds a 1D transpose convolution node (e.g. temporal convolution) on previous node.

Arguments: * kernel: int array consists of h, i, o. h specifies the dimension of the 1D convolution window. i and o are the dimensionalities of the input and output space. * stride: int array of 1 integer.

val transpose_conv2d : - ?name:string -> - ?padding:Owl_types.padding -> - ?init_typ:Neuron.Init.typ -> - ?act_typ:Neuron.Activation.typ -> - int array -> - int array -> - node -> - node

transpose_conv2d kernel stride node adds a 2D transpose convolution node on previous node.

Arguments: * kernel: int array consists of w, h, i, o. w and h specify the width and height of the 2D convolution window. i and o are the dimensionality of the input and output space. * stride: int array of 2 integers.

val transpose_conv3d : - ?name:string -> - ?padding:Owl_types.padding -> - ?init_typ:Neuron.Init.typ -> - ?act_typ:Neuron.Activation.typ -> - int array -> - int array -> - node -> - node

transpose_conv3d kernel stride node adds a 3D transpose convolution node (e.g. spatial convolution over volumes) on previous node.

Arguments: * kernel: int array consists of w, h, d, i, o. w, h, and d specify the 3 dimensionality of the 3D convolution window. i and o are the dimensionality of the input and output space. * stride: int array of 3 integers.

val fully_connected : - ?name:string -> - ?init_typ:Neuron.Init.typ -> - ?act_typ:Neuron.Activation.typ -> - int -> - node -> - node

fully_connected outputs node adds a fully connected node to node.

Arguments: * outputs: integer, the number of output units in the node.

val max_pool1d : - ?name:string -> - ?padding:Owl_types.padding -> - ?act_typ:Neuron.Activation.typ -> - int array -> - int array -> - node -> - node

max_pool1d ~padding ~act_typ pool_size stride node adds a max pooling operation for temporal data to node.

Arguments: * pool_size: Array of one integer, size of the max pooling windows. * stride: Array of one integer, factor by which to downscale.

val max_pool2d : - ?name:string -> - ?padding:Owl_types.padding -> - ?act_typ:Neuron.Activation.typ -> - int array -> - int array -> - node -> - node

max_pool2d ~padding ~act_typ pool_size stride node adds a max pooling operation for spatial data to node.

Arguments: * pool_size: Array of 2 integers, size of the max pooling windows. * stride: Array of 2 integers, factor by which to downscale.

val avg_pool1d : - ?name:string -> - ?padding:Owl_types.padding -> - ?act_typ:Neuron.Activation.typ -> - int array -> - int array -> - node -> - node

avg_pool1d ~padding ~act_typ pool_size stride node adds a average pooling operation for temporal data to node.

Arguments: * pool_size: Array of one integer, size of the max pooling windows. * stride: Array of one integer, factor by which to downscale.

val avg_pool2d : - ?name:string -> - ?padding:Owl_types.padding -> - ?act_typ:Neuron.Activation.typ -> - int array -> - int array -> - node -> - node

avg_pool2d ~padding ~act_typ pool_size stride node adds a average pooling operation for spatial data to node.

Arguments: * pool_size: Array of 2 integers, size of the max pooling windows. * stride: Array of 2 integers, factor by which to downscale.

val global_max_pool1d : - ?name:string -> - ?act_typ:Neuron.Activation.typ -> - node -> - node

global_max_pool1d adds global max pooling operation for temporal data.

val global_max_pool2d : - ?name:string -> - ?act_typ:Neuron.Activation.typ -> - node -> - node

global_max_poo2d global max pooling operation for spatial data.

val global_avg_pool1d : - ?name:string -> - ?act_typ:Neuron.Activation.typ -> - node -> - node

global_avg_pool1d adds global average pooling operation for temporal data.

val global_avg_pool2d : - ?name:string -> - ?act_typ:Neuron.Activation.typ -> - node -> - node

global_avg_poo2d global average pooling operation for spatial data.

val upsampling2d : - ?name:string -> - ?act_typ:Neuron.Activation.typ -> - int array -> - node -> - node

upsampling2d ~act_typ size node adds a upsampling operation for spatial data to node.

Arguments: * size: array of two integers, namely the upsampling factors for columns and rows.

val padding2d : - ?name:string -> - ?act_typ:Neuron.Activation.typ -> - int array array -> - node -> - node

padding2d ~act_typ padding node adds rows and columns of zeros at the top, bottom, left and right side of an image tensor.

Arguments: * padding: array of 2 arrays of 2 integers, interpreted as | [|top_pad; bottom_pad|]; [|left_pad; right_pad|]|.

val dropout : ?name:string -> float -> node -> node

dropout rate node applies Dropout to the input to prevent overfitting.

Arguments: * rate: float between 0 and 1. Fraction of the input units to drop.

val gaussian_noise : ?name:string -> float -> node -> node

gaussian_noise stddev node applies additive zero-centered Gaussian noise.

Arguments: * stddev: float, standard deviation of the noise distribution.

val gaussian_dropout : ?name:string -> float -> node -> node

gaussian_dropout rate node applies multiplicative 1-centered Gaussian noise. Only active at training time.

Arguments: * rates: float, drop probability

val alpha_dropout : ?name:string -> float -> node -> node

alpha_dropout rate node applies Alpha Dropout to the input node. Only active at training time.

Arguments: * rates: float, drop probability

val normalisation : - ?name:string -> - ?axis:int -> - ?training:bool -> - ?decay:float -> - ?mu:Neuron.Optimise.Algodiff.A.arr -> - ?var:Neuron.Optimise.Algodiff.A.arr -> - node -> - node

normalisation axis node normalise the activations of the previous node at each batch.

Arguments: * axis: Integer, the axis that should be normalised (typically the features axis). Default value is 0.

val reshape : ?name:string -> int array -> node -> node

reshape target_shape node reshapes an output to a certain shape.

Arguments: * target_shape: target shape. Array of integers. Does not include the batch axis.

val flatten : ?name:string -> node -> node

flatten node flattens the input. Does not affect the batch size.

val slice : ?name:string -> int list list -> node -> node

slice node slices the input. Does not affect the batch size.

val lambda : - ?name:string -> - ?act_typ:Neuron.Activation.typ -> - ?out_shape:int array -> - (Neuron.Optimise.Algodiff.t -> Neuron.Optimise.Algodiff.t) -> - node -> - node

lambda ?target_shape func node wraps arbitrary expression as a Node object.

Arguments: * func: The function to be evaluated. Takes input tensor as first argument. * target_shape: the shape of the tensor returned by func; set to the same as input shape if not specified.

val lambda_array : - ?name:string -> - ?act_typ:Neuron.Activation.typ -> - int array -> - (Neuron.Optimise.Algodiff.t array -> Neuron.Optimise.Algodiff.t) -> - node array -> - node

lambda_array target_shape func node wraps arbitrary expression as a Node object.

Arguments: * target_shape: the shape of the tensor returned by func. * func: The function to be evaluated. Takes input tensor array as first argument.

val add : ?name:string -> ?act_typ:Neuron.Activation.typ -> node array -> node

Node that adds a list of inputs.

It takes as input an array of nodes, all of the same shape, and returns a single node (also of the same shape).

val mul : ?name:string -> ?act_typ:Neuron.Activation.typ -> node array -> node

Node that multiplies (element-wise) a list of inputs.

It takes as input an array of nodes, all of the same shape, and returns a single node (also of the same shape).

val dot : ?name:string -> ?act_typ:Neuron.Activation.typ -> node array -> node

Node that computes a dot product between samples in two nodes.

val max : ?name:string -> ?act_typ:Neuron.Activation.typ -> node array -> node

Node that computes the maximum (element-wise) a list of inputs.

val average : - ?name:string -> - ?act_typ:Neuron.Activation.typ -> - node array -> - node

Node that averages a list of inputs.

It takes as input an array of nodes, all of the same shape, and returns a single node (also of the same shape).

val concatenate : - ?name:string -> - ?act_typ:Neuron.Activation.typ -> - int -> - node array -> - node

concatenate axis nodes concatenates a array of nodes and return as a single node.

Arguments: * axis: Axis along which to concatenate.

Helper functions
val to_string : network -> string

Convert a neural network to its string representation.

val pp_network : Stdlib.Format.formatter -> network -> unit

Pretty printing function a neural network.

val print : network -> unit

Print the string representation of a neural network to the standard output.

val save : ?unsafe:bool -> network -> string -> unit

Serialise a network and save it to the a file with the given name. Set the unsafe flag to true if network contains Lambda layer.

val load : string -> network

Load the neural network from a file with the given name.

val save_weights : network -> string -> unit

Save all the weights in a neural network to a file. The weights and the name of their associated neurons are saved as key-value pairs in a hash table.

val load_weights : network -> string -> unit

Load the weights from a file of the given name. Note that the weights and the name of their associated neurons are saved as key-value pairs in a hash table.

val make_subnetwork : - ?copy:bool -> - ?make_inputs:string array -> - network -> - string array -> - network

get_subnetwork ?copy ?make_inputs network output_names constructs a subnetwork of nodes on which output_names depend, replacing nodes with names in make_inputs with input nodes.

Arguments: copy: Whether to copy or reference the original node weights. Defaults to true. make_inputs: Names of nodes to use as inputs to the subnetwork. Defaults to ||, which uses the original inputs. nn: The neural network from which the subnetwork is constructed. output_names: Names of nodes to use as outputs.

Train Networks

Generic function of training a neural network.

Train a neural network with various configurations.

\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron/.dummy b/owl-base/Owl_neural_neuron/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl-base/Owl_neural_neuron/Make/Activation/index.html b/owl-base/Owl_neural_neuron/Make/Activation/index.html deleted file mode 100644 index 62f2bd3f6..000000000 --- a/owl-base/Owl_neural_neuron/Make/Activation/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Activation (owl-base.Owl_neural_neuron.Make.Activation)

Module Make.Activation

type typ =
  1. | Elu
  2. | Relu
  3. | Sigmoid
  4. | HardSigmoid
  5. | Softmax of int
  6. | Softplus
  7. | Softsign
  8. | Tanh
  9. | Relu6
  10. | LeakyRelu of float
  11. | TRelu of float
  12. | Custom of Optimise.Algodiff.t -> Optimise.Algodiff.t
  13. | None
type neuron_typ = {
  1. mutable activation : typ;
  2. mutable in_shape : int array;
  3. mutable out_shape : int array;
}
val create : typ -> neuron_typ
val connect : int array -> neuron_typ -> unit
val run_activation : Optimise.Algodiff.t -> typ -> Optimise.Algodiff.t
val copy : neuron_typ -> neuron_typ
val activation_to_string : typ -> string
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron/Make/Add/index.html b/owl-base/Owl_neural_neuron/Make/Add/index.html deleted file mode 100644 index b56e79ad5..000000000 --- a/owl-base/Owl_neural_neuron/Make/Add/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Add (owl-base.Owl_neural_neuron.Make.Add)

Module Make.Add

type neuron_typ = {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
}
val create : unit -> neuron_typ
val connect : int array array -> neuron_typ -> unit
val copy : 'a -> neuron_typ
val run : Optimise.Algodiff.t array -> 'a -> Optimise.Algodiff.t
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron/Make/AlphaDropout/index.html b/owl-base/Owl_neural_neuron/Make/AlphaDropout/index.html deleted file mode 100644 index dc47e13e2..000000000 --- a/owl-base/Owl_neural_neuron/Make/AlphaDropout/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -AlphaDropout (owl-base.Owl_neural_neuron.Make.AlphaDropout)

Module Make.AlphaDropout

type neuron_typ = {
  1. mutable rate : float;
  2. mutable in_shape : int array;
  3. mutable out_shape : int array;
}
val create : float -> neuron_typ
val connect : int array -> neuron_typ -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron/Make/Average/index.html b/owl-base/Owl_neural_neuron/Make/Average/index.html deleted file mode 100644 index 979a4ee61..000000000 --- a/owl-base/Owl_neural_neuron/Make/Average/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Average (owl-base.Owl_neural_neuron.Make.Average)

Module Make.Average

type neuron_typ = {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
}
val create : unit -> neuron_typ
val connect : int array array -> neuron_typ -> unit
val copy : 'a -> neuron_typ
val run : Optimise.Algodiff.t array -> 'a -> Optimise.Algodiff.t
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron/Make/AvgPool1D/index.html b/owl-base/Owl_neural_neuron/Make/AvgPool1D/index.html deleted file mode 100644 index 9aca1922f..000000000 --- a/owl-base/Owl_neural_neuron/Make/AvgPool1D/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -AvgPool1D (owl-base.Owl_neural_neuron.Make.AvgPool1D)

Module Make.AvgPool1D

type neuron_typ = {
  1. mutable padding : Owl_types.padding;
  2. mutable kernel : int array;
  3. mutable stride : int array;
  4. mutable in_shape : int array;
  5. mutable out_shape : int array;
}
val create : Owl_types.padding -> int array -> int array -> neuron_typ
val connect : int array -> neuron_typ -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron/Make/AvgPool2D/index.html b/owl-base/Owl_neural_neuron/Make/AvgPool2D/index.html deleted file mode 100644 index bb8bf5ee1..000000000 --- a/owl-base/Owl_neural_neuron/Make/AvgPool2D/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -AvgPool2D (owl-base.Owl_neural_neuron.Make.AvgPool2D)

Module Make.AvgPool2D

type neuron_typ = {
  1. mutable padding : Owl_types.padding;
  2. mutable kernel : int array;
  3. mutable stride : int array;
  4. mutable in_shape : int array;
  5. mutable out_shape : int array;
}
val create : Owl_types.padding -> int array -> int array -> neuron_typ
val connect : int array -> neuron_typ -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron/Make/Concatenate/index.html b/owl-base/Owl_neural_neuron/Make/Concatenate/index.html deleted file mode 100644 index 6a390594b..000000000 --- a/owl-base/Owl_neural_neuron/Make/Concatenate/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Concatenate (owl-base.Owl_neural_neuron.Make.Concatenate)

Module Make.Concatenate

type neuron_typ = {
  1. mutable axis : int;
  2. mutable in_shape : int array;
  3. mutable out_shape : int array;
}
val create : int -> neuron_typ
val connect : int array array -> neuron_typ -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron/Make/Conv1D/index.html b/owl-base/Owl_neural_neuron/Make/Conv1D/index.html deleted file mode 100644 index f12dfcd94..000000000 --- a/owl-base/Owl_neural_neuron/Make/Conv1D/index.html +++ /dev/null @@ -1,8 +0,0 @@ - -Conv1D (owl-base.Owl_neural_neuron.Make.Conv1D)

Module Make.Conv1D

type neuron_typ = {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable b : Optimise.Algodiff.t;
  3. mutable kernel : int array;
  4. mutable stride : int array;
  5. mutable padding : Owl_types.padding;
  6. mutable init_typ : Init.typ;
  7. mutable in_shape : int array;
  8. mutable out_shape : int array;
}
val create : - ?inputs:int array -> - Owl_types.padding -> - int array -> - int array -> - Init.typ -> - neuron_typ
val connect : int array -> neuron_typ -> unit
val init : neuron_typ -> unit
val reset : neuron_typ -> unit
val mktag : int -> neuron_typ -> unit
val mkpar : neuron_typ -> Optimise.Algodiff.t array
val mkpri : neuron_typ -> Optimise.Algodiff.t array
val mkadj : neuron_typ -> Optimise.Algodiff.t array
val update : neuron_typ -> Optimise.Algodiff.t array -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron/Make/Conv2D/index.html b/owl-base/Owl_neural_neuron/Make/Conv2D/index.html deleted file mode 100644 index 6dbef2df9..000000000 --- a/owl-base/Owl_neural_neuron/Make/Conv2D/index.html +++ /dev/null @@ -1,8 +0,0 @@ - -Conv2D (owl-base.Owl_neural_neuron.Make.Conv2D)

Module Make.Conv2D

type neuron_typ = {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable b : Optimise.Algodiff.t;
  3. mutable kernel : int array;
  4. mutable stride : int array;
  5. mutable padding : Owl_types.padding;
  6. mutable init_typ : Init.typ;
  7. mutable in_shape : int array;
  8. mutable out_shape : int array;
}
val create : - ?inputs:int array -> - Owl_types.padding -> - int array -> - int array -> - Init.typ -> - neuron_typ
val connect : int array -> neuron_typ -> unit
val init : neuron_typ -> unit
val reset : neuron_typ -> unit
val mktag : int -> neuron_typ -> unit
val mkpar : neuron_typ -> Optimise.Algodiff.t array
val mkpri : neuron_typ -> Optimise.Algodiff.t array
val mkadj : neuron_typ -> Optimise.Algodiff.t array
val update : neuron_typ -> Optimise.Algodiff.t array -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron/Make/Conv3D/index.html b/owl-base/Owl_neural_neuron/Make/Conv3D/index.html deleted file mode 100644 index cf44c6cf4..000000000 --- a/owl-base/Owl_neural_neuron/Make/Conv3D/index.html +++ /dev/null @@ -1,8 +0,0 @@ - -Conv3D (owl-base.Owl_neural_neuron.Make.Conv3D)

Module Make.Conv3D

type neuron_typ = {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable b : Optimise.Algodiff.t;
  3. mutable kernel : int array;
  4. mutable stride : int array;
  5. mutable padding : Owl_types.padding;
  6. mutable init_typ : Init.typ;
  7. mutable in_shape : int array;
  8. mutable out_shape : int array;
}
val create : - ?inputs:int array -> - Owl_types.padding -> - int array -> - int array -> - Init.typ -> - neuron_typ
val connect : int array -> neuron_typ -> unit
val init : neuron_typ -> unit
val reset : neuron_typ -> unit
val mktag : int -> neuron_typ -> unit
val mkpar : neuron_typ -> Optimise.Algodiff.t array
val mkpri : neuron_typ -> Optimise.Algodiff.t array
val mkadj : neuron_typ -> Optimise.Algodiff.t array
val update : neuron_typ -> Optimise.Algodiff.t array -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron/Make/DilatedConv1D/index.html b/owl-base/Owl_neural_neuron/Make/DilatedConv1D/index.html deleted file mode 100644 index e3bf5d7d3..000000000 --- a/owl-base/Owl_neural_neuron/Make/DilatedConv1D/index.html +++ /dev/null @@ -1,9 +0,0 @@ - -DilatedConv1D (owl-base.Owl_neural_neuron.Make.DilatedConv1D)

Module Make.DilatedConv1D

type neuron_typ = {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable b : Optimise.Algodiff.t;
  3. mutable kernel : int array;
  4. mutable stride : int array;
  5. mutable rate : int array;
  6. mutable padding : Owl_types.padding;
  7. mutable init_typ : Init.typ;
  8. mutable in_shape : int array;
  9. mutable out_shape : int array;
}
val create : - ?inputs:int array -> - Owl_types.padding -> - int array -> - int array -> - int array -> - Init.typ -> - neuron_typ
val connect : int array -> neuron_typ -> unit
val init : neuron_typ -> unit
val reset : neuron_typ -> unit
val mktag : int -> neuron_typ -> unit
val mkpar : neuron_typ -> Optimise.Algodiff.t array
val mkpri : neuron_typ -> Optimise.Algodiff.t array
val mkadj : neuron_typ -> Optimise.Algodiff.t array
val update : neuron_typ -> Optimise.Algodiff.t array -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron/Make/DilatedConv2D/index.html b/owl-base/Owl_neural_neuron/Make/DilatedConv2D/index.html deleted file mode 100644 index d31462ec3..000000000 --- a/owl-base/Owl_neural_neuron/Make/DilatedConv2D/index.html +++ /dev/null @@ -1,9 +0,0 @@ - -DilatedConv2D (owl-base.Owl_neural_neuron.Make.DilatedConv2D)

Module Make.DilatedConv2D

type neuron_typ = {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable b : Optimise.Algodiff.t;
  3. mutable kernel : int array;
  4. mutable stride : int array;
  5. mutable rate : int array;
  6. mutable padding : Owl_types.padding;
  7. mutable init_typ : Init.typ;
  8. mutable in_shape : int array;
  9. mutable out_shape : int array;
}
val create : - ?inputs:int array -> - Owl_types.padding -> - int array -> - int array -> - int array -> - Init.typ -> - neuron_typ
val connect : int array -> neuron_typ -> unit
val init : neuron_typ -> unit
val reset : neuron_typ -> unit
val mktag : int -> neuron_typ -> unit
val mkpar : neuron_typ -> Optimise.Algodiff.t array
val mkpri : neuron_typ -> Optimise.Algodiff.t array
val mkadj : neuron_typ -> Optimise.Algodiff.t array
val update : neuron_typ -> Optimise.Algodiff.t array -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron/Make/DilatedConv3D/index.html b/owl-base/Owl_neural_neuron/Make/DilatedConv3D/index.html deleted file mode 100644 index 725fca665..000000000 --- a/owl-base/Owl_neural_neuron/Make/DilatedConv3D/index.html +++ /dev/null @@ -1,9 +0,0 @@ - -DilatedConv3D (owl-base.Owl_neural_neuron.Make.DilatedConv3D)

Module Make.DilatedConv3D

type neuron_typ = {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable b : Optimise.Algodiff.t;
  3. mutable kernel : int array;
  4. mutable stride : int array;
  5. mutable rate : int array;
  6. mutable padding : Owl_types.padding;
  7. mutable init_typ : Init.typ;
  8. mutable in_shape : int array;
  9. mutable out_shape : int array;
}
val create : - ?inputs:int array -> - Owl_types.padding -> - int array -> - int array -> - int array -> - Init.typ -> - neuron_typ
val connect : int array -> neuron_typ -> unit
val init : neuron_typ -> unit
val reset : neuron_typ -> unit
val mktag : int -> neuron_typ -> unit
val mkpar : neuron_typ -> Optimise.Algodiff.t array
val mkpri : neuron_typ -> Optimise.Algodiff.t array
val mkadj : neuron_typ -> Optimise.Algodiff.t array
val update : neuron_typ -> Optimise.Algodiff.t array -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron/Make/Dot/index.html b/owl-base/Owl_neural_neuron/Make/Dot/index.html deleted file mode 100644 index 953b956bc..000000000 --- a/owl-base/Owl_neural_neuron/Make/Dot/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Dot (owl-base.Owl_neural_neuron.Make.Dot)

Module Make.Dot

type neuron_typ = {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
}
val create : unit -> neuron_typ
val connect : int array array -> neuron_typ -> unit
val copy : 'a -> neuron_typ
val run : Optimise.Algodiff.t array -> 'a -> Optimise.Algodiff.t
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron/Make/Dropout/index.html b/owl-base/Owl_neural_neuron/Make/Dropout/index.html deleted file mode 100644 index 5988b2092..000000000 --- a/owl-base/Owl_neural_neuron/Make/Dropout/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Dropout (owl-base.Owl_neural_neuron.Make.Dropout)

Module Make.Dropout

type neuron_typ = {
  1. mutable rate : float;
  2. mutable in_shape : int array;
  3. mutable out_shape : int array;
}
val create : float -> neuron_typ
val connect : int array -> neuron_typ -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron/Make/Embedding/index.html b/owl-base/Owl_neural_neuron/Make/Embedding/index.html deleted file mode 100644 index 066e95a9f..000000000 --- a/owl-base/Owl_neural_neuron/Make/Embedding/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Embedding (owl-base.Owl_neural_neuron.Make.Embedding)

Module Make.Embedding

type neuron_typ = {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable init_typ : Init.typ;
  3. mutable in_dim : int;
  4. mutable in_shape : int array;
  5. mutable out_shape : int array;
}
val create : ?inputs:int -> int -> int -> Init.typ -> neuron_typ
val connect : int array -> neuron_typ -> unit
val init : neuron_typ -> unit
val reset : neuron_typ -> unit
val mktag : int -> neuron_typ -> unit
val mkpar : neuron_typ -> Optimise.Algodiff.t array
val mkpri : neuron_typ -> Optimise.Algodiff.t array
val mkadj : neuron_typ -> Optimise.Algodiff.t array
val update : neuron_typ -> Optimise.Algodiff.t array -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron/Make/Flatten/index.html b/owl-base/Owl_neural_neuron/Make/Flatten/index.html deleted file mode 100644 index d29b1e099..000000000 --- a/owl-base/Owl_neural_neuron/Make/Flatten/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Flatten (owl-base.Owl_neural_neuron.Make.Flatten)

Module Make.Flatten

type neuron_typ = {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
}
val create : unit -> neuron_typ
val connect : int array -> neuron_typ -> unit
val copy : 'a -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron/Make/FullyConnected/index.html b/owl-base/Owl_neural_neuron/Make/FullyConnected/index.html deleted file mode 100644 index bf614ff81..000000000 --- a/owl-base/Owl_neural_neuron/Make/FullyConnected/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -FullyConnected (owl-base.Owl_neural_neuron.Make.FullyConnected)

Module Make.FullyConnected

type neuron_typ = {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable b : Optimise.Algodiff.t;
  3. mutable init_typ : Init.typ;
  4. mutable in_shape : int array;
  5. mutable out_shape : int array;
}
val create : ?inputs:int -> int -> Init.typ -> neuron_typ
val connect : int array -> neuron_typ -> unit
val init : neuron_typ -> unit
val reset : neuron_typ -> unit
val mktag : int -> neuron_typ -> unit
val mkpar : neuron_typ -> Optimise.Algodiff.t array
val mkpri : neuron_typ -> Optimise.Algodiff.t array
val mkadj : neuron_typ -> Optimise.Algodiff.t array
val update : neuron_typ -> Optimise.Algodiff.t array -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron/Make/GRU/index.html b/owl-base/Owl_neural_neuron/Make/GRU/index.html deleted file mode 100644 index 5991893f8..000000000 --- a/owl-base/Owl_neural_neuron/Make/GRU/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -GRU (owl-base.Owl_neural_neuron.Make.GRU)

Module Make.GRU

type neuron_typ = {
  1. mutable wxz : Optimise.Algodiff.t;
  2. mutable whz : Optimise.Algodiff.t;
  3. mutable wxr : Optimise.Algodiff.t;
  4. mutable whr : Optimise.Algodiff.t;
  5. mutable wxh : Optimise.Algodiff.t;
  6. mutable whh : Optimise.Algodiff.t;
  7. mutable bz : Optimise.Algodiff.t;
  8. mutable br : Optimise.Algodiff.t;
  9. mutable bh : Optimise.Algodiff.t;
  10. mutable h : Optimise.Algodiff.t;
  11. mutable init_typ : Init.typ;
  12. mutable in_shape : int array;
  13. mutable out_shape : int array;
}
val create : ?time_steps:int -> ?inputs:int -> int -> Init.typ -> neuron_typ
val connect : int array -> neuron_typ -> unit
val init : neuron_typ -> unit
val reset : neuron_typ -> unit
val mktag : int -> neuron_typ -> unit
val mkpar : neuron_typ -> Optimise.Algodiff.t array
val mkpri : neuron_typ -> Optimise.Algodiff.t array
val mkadj : neuron_typ -> Optimise.Algodiff.t array
val update : neuron_typ -> Optimise.Algodiff.t array -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron/Make/GaussianDropout/index.html b/owl-base/Owl_neural_neuron/Make/GaussianDropout/index.html deleted file mode 100644 index 728513fc9..000000000 --- a/owl-base/Owl_neural_neuron/Make/GaussianDropout/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -GaussianDropout (owl-base.Owl_neural_neuron.Make.GaussianDropout)

Module Make.GaussianDropout

type neuron_typ = {
  1. mutable rate : float;
  2. mutable in_shape : int array;
  3. mutable out_shape : int array;
}
val create : float -> neuron_typ
val connect : int array -> neuron_typ -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron/Make/GaussianNoise/index.html b/owl-base/Owl_neural_neuron/Make/GaussianNoise/index.html deleted file mode 100644 index 01a72f180..000000000 --- a/owl-base/Owl_neural_neuron/Make/GaussianNoise/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -GaussianNoise (owl-base.Owl_neural_neuron.Make.GaussianNoise)

Module Make.GaussianNoise

type neuron_typ = {
  1. mutable sigma : float;
  2. mutable in_shape : int array;
  3. mutable out_shape : int array;
}
val create : float -> neuron_typ
val connect : int array -> neuron_typ -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron/Make/GlobalAvgPool1D/index.html b/owl-base/Owl_neural_neuron/Make/GlobalAvgPool1D/index.html deleted file mode 100644 index 05147fef7..000000000 --- a/owl-base/Owl_neural_neuron/Make/GlobalAvgPool1D/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -GlobalAvgPool1D (owl-base.Owl_neural_neuron.Make.GlobalAvgPool1D)

Module Make.GlobalAvgPool1D

type neuron_typ = {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
}
val create : unit -> neuron_typ
val connect : int array -> neuron_typ -> unit
val copy : 'a -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron/Make/GlobalAvgPool2D/index.html b/owl-base/Owl_neural_neuron/Make/GlobalAvgPool2D/index.html deleted file mode 100644 index 091f2518f..000000000 --- a/owl-base/Owl_neural_neuron/Make/GlobalAvgPool2D/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -GlobalAvgPool2D (owl-base.Owl_neural_neuron.Make.GlobalAvgPool2D)

Module Make.GlobalAvgPool2D

type neuron_typ = {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
}
val create : unit -> neuron_typ
val connect : int array -> neuron_typ -> unit
val copy : 'a -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron/Make/GlobalMaxPool1D/index.html b/owl-base/Owl_neural_neuron/Make/GlobalMaxPool1D/index.html deleted file mode 100644 index c6a58230f..000000000 --- a/owl-base/Owl_neural_neuron/Make/GlobalMaxPool1D/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -GlobalMaxPool1D (owl-base.Owl_neural_neuron.Make.GlobalMaxPool1D)

Module Make.GlobalMaxPool1D

type neuron_typ = {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
}
val create : unit -> neuron_typ
val connect : int array -> neuron_typ -> unit
val copy : 'a -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron/Make/GlobalMaxPool2D/index.html b/owl-base/Owl_neural_neuron/Make/GlobalMaxPool2D/index.html deleted file mode 100644 index 0d3d42bd3..000000000 --- a/owl-base/Owl_neural_neuron/Make/GlobalMaxPool2D/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -GlobalMaxPool2D (owl-base.Owl_neural_neuron.Make.GlobalMaxPool2D)

Module Make.GlobalMaxPool2D

type neuron_typ = {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
}
val create : unit -> neuron_typ
val connect : int array -> neuron_typ -> unit
val copy : 'a -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron/Make/Init/index.html b/owl-base/Owl_neural_neuron/Make/Init/index.html deleted file mode 100644 index 6ffcf7061..000000000 --- a/owl-base/Owl_neural_neuron/Make/Init/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Init (owl-base.Owl_neural_neuron.Make.Init)

Module Make.Init

type typ =
  1. | Uniform of float * float
  2. | Gaussian of float * float
  3. | Standard
  4. | Tanh
  5. | GlorotNormal
  6. | GlorotUniform
  7. | LecunNormal
  8. | HeNormal
  9. | Custom of int array -> Optimise.Algodiff.t
val calc_fans : int array -> float * float
val run : typ -> int array -> Optimise.Algodiff.t -> Optimise.Algodiff.t
val to_string : typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron/Make/Input/index.html b/owl-base/Owl_neural_neuron/Make/Input/index.html deleted file mode 100644 index aa30baf14..000000000 --- a/owl-base/Owl_neural_neuron/Make/Input/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Input (owl-base.Owl_neural_neuron.Make.Input)

Module Make.Input

type neuron_typ = {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
}
val create : int array -> neuron_typ
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron/Make/LSTM/index.html b/owl-base/Owl_neural_neuron/Make/LSTM/index.html deleted file mode 100644 index cdb6a04d4..000000000 --- a/owl-base/Owl_neural_neuron/Make/LSTM/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -LSTM (owl-base.Owl_neural_neuron.Make.LSTM)

Module Make.LSTM

type neuron_typ = {
  1. mutable wxi : Optimise.Algodiff.t;
  2. mutable whi : Optimise.Algodiff.t;
  3. mutable wxc : Optimise.Algodiff.t;
  4. mutable whc : Optimise.Algodiff.t;
  5. mutable wxf : Optimise.Algodiff.t;
  6. mutable whf : Optimise.Algodiff.t;
  7. mutable wxo : Optimise.Algodiff.t;
  8. mutable who : Optimise.Algodiff.t;
  9. mutable bi : Optimise.Algodiff.t;
  10. mutable bc : Optimise.Algodiff.t;
  11. mutable bf : Optimise.Algodiff.t;
  12. mutable bo : Optimise.Algodiff.t;
  13. mutable c : Optimise.Algodiff.t;
  14. mutable h : Optimise.Algodiff.t;
  15. mutable init_typ : Init.typ;
  16. mutable in_shape : int array;
  17. mutable out_shape : int array;
}
val create : ?time_steps:int -> ?inputs:int -> int -> Init.typ -> neuron_typ
val connect : int array -> neuron_typ -> unit
val init : neuron_typ -> unit
val reset : neuron_typ -> unit
val mktag : int -> neuron_typ -> unit
val mkpar : neuron_typ -> Optimise.Algodiff.t array
val mkpri : neuron_typ -> Optimise.Algodiff.t array
val mkadj : neuron_typ -> Optimise.Algodiff.t array
val update : neuron_typ -> Optimise.Algodiff.t array -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron/Make/Lambda/index.html b/owl-base/Owl_neural_neuron/Make/Lambda/index.html deleted file mode 100644 index cf8b7ce2a..000000000 --- a/owl-base/Owl_neural_neuron/Make/Lambda/index.html +++ /dev/null @@ -1,5 +0,0 @@ - -Lambda (owl-base.Owl_neural_neuron.Make.Lambda)

Module Make.Lambda

type neuron_typ = {
  1. mutable lambda : Optimise.Algodiff.t -> Optimise.Algodiff.t;
  2. mutable in_shape : int array;
  3. mutable out_shape : int array;
}
val create : - ?out_shape:int array -> - (Optimise.Algodiff.t -> Optimise.Algodiff.t) -> - neuron_typ
val connect : int array -> neuron_typ -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron/Make/LambdaArray/index.html b/owl-base/Owl_neural_neuron/Make/LambdaArray/index.html deleted file mode 100644 index 68357cacd..000000000 --- a/owl-base/Owl_neural_neuron/Make/LambdaArray/index.html +++ /dev/null @@ -1,5 +0,0 @@ - -LambdaArray (owl-base.Owl_neural_neuron.Make.LambdaArray)

Module Make.LambdaArray

type neuron_typ = {
  1. mutable lambda : Optimise.Algodiff.t array -> Optimise.Algodiff.t;
  2. mutable in_shape : int array;
  3. mutable out_shape : int array;
}
val create : - int array -> - (Optimise.Algodiff.t array -> Optimise.Algodiff.t) -> - neuron_typ
val connect : int array array -> neuron_typ -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron/Make/Linear/index.html b/owl-base/Owl_neural_neuron/Make/Linear/index.html deleted file mode 100644 index 7d2dad15e..000000000 --- a/owl-base/Owl_neural_neuron/Make/Linear/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Linear (owl-base.Owl_neural_neuron.Make.Linear)

Module Make.Linear

type neuron_typ = {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable b : Optimise.Algodiff.t;
  3. mutable init_typ : Init.typ;
  4. mutable in_shape : int array;
  5. mutable out_shape : int array;
}
val create : ?inputs:int -> int -> Init.typ -> neuron_typ
val connect : int array -> neuron_typ -> unit
val init : neuron_typ -> unit
val reset : neuron_typ -> unit
val mktag : int -> neuron_typ -> unit
val mkpar : neuron_typ -> Optimise.Algodiff.t array
val mkpri : neuron_typ -> Optimise.Algodiff.t array
val mkadj : neuron_typ -> Optimise.Algodiff.t array
val update : neuron_typ -> Optimise.Algodiff.t array -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron/Make/LinearNoBias/index.html b/owl-base/Owl_neural_neuron/Make/LinearNoBias/index.html deleted file mode 100644 index b1efbc902..000000000 --- a/owl-base/Owl_neural_neuron/Make/LinearNoBias/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -LinearNoBias (owl-base.Owl_neural_neuron.Make.LinearNoBias)

Module Make.LinearNoBias

type neuron_typ = {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable init_typ : Init.typ;
  3. mutable in_shape : int array;
  4. mutable out_shape : int array;
}
val create : ?inputs:int -> int -> Init.typ -> neuron_typ
val connect : int array -> neuron_typ -> unit
val init : neuron_typ -> unit
val reset : neuron_typ -> unit
val mktag : int -> neuron_typ -> unit
val mkpar : neuron_typ -> Optimise.Algodiff.t array
val mkpri : neuron_typ -> Optimise.Algodiff.t array
val mkadj : neuron_typ -> Optimise.Algodiff.t array
val update : neuron_typ -> Optimise.Algodiff.t array -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron/Make/Masking/index.html b/owl-base/Owl_neural_neuron/Make/Masking/index.html deleted file mode 100644 index dc7b6116b..000000000 --- a/owl-base/Owl_neural_neuron/Make/Masking/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Masking (owl-base.Owl_neural_neuron.Make.Masking)

Module Make.Masking

\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron/Make/Max/index.html b/owl-base/Owl_neural_neuron/Make/Max/index.html deleted file mode 100644 index b70bba852..000000000 --- a/owl-base/Owl_neural_neuron/Make/Max/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Max (owl-base.Owl_neural_neuron.Make.Max)

Module Make.Max

type neuron_typ = {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
}
val create : unit -> neuron_typ
val connect : int array array -> neuron_typ -> unit
val copy : 'a -> neuron_typ
val run : Optimise.Algodiff.t array -> 'a -> Optimise.Algodiff.t
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron/Make/MaxPool1D/index.html b/owl-base/Owl_neural_neuron/Make/MaxPool1D/index.html deleted file mode 100644 index 779d6aac6..000000000 --- a/owl-base/Owl_neural_neuron/Make/MaxPool1D/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -MaxPool1D (owl-base.Owl_neural_neuron.Make.MaxPool1D)

Module Make.MaxPool1D

type neuron_typ = {
  1. mutable padding : Owl_types.padding;
  2. mutable kernel : int array;
  3. mutable stride : int array;
  4. mutable in_shape : int array;
  5. mutable out_shape : int array;
}
val create : Owl_types.padding -> int array -> int array -> neuron_typ
val connect : int array -> neuron_typ -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron/Make/MaxPool2D/index.html b/owl-base/Owl_neural_neuron/Make/MaxPool2D/index.html deleted file mode 100644 index d1effea3e..000000000 --- a/owl-base/Owl_neural_neuron/Make/MaxPool2D/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -MaxPool2D (owl-base.Owl_neural_neuron.Make.MaxPool2D)

Module Make.MaxPool2D

type neuron_typ = {
  1. mutable padding : Owl_types.padding;
  2. mutable kernel : int array;
  3. mutable stride : int array;
  4. mutable in_shape : int array;
  5. mutable out_shape : int array;
}
val create : Owl_types.padding -> int array -> int array -> neuron_typ
val connect : int array -> neuron_typ -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron/Make/Mul/index.html b/owl-base/Owl_neural_neuron/Make/Mul/index.html deleted file mode 100644 index cda90601e..000000000 --- a/owl-base/Owl_neural_neuron/Make/Mul/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mul (owl-base.Owl_neural_neuron.Make.Mul)

Module Make.Mul

type neuron_typ = {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
}
val create : unit -> neuron_typ
val connect : int array array -> neuron_typ -> unit
val copy : 'a -> neuron_typ
val run : Optimise.Algodiff.t array -> 'a -> Optimise.Algodiff.t
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron/Make/Normalisation/index.html b/owl-base/Owl_neural_neuron/Make/Normalisation/index.html deleted file mode 100644 index 7e2add471..000000000 --- a/owl-base/Owl_neural_neuron/Make/Normalisation/index.html +++ /dev/null @@ -1,8 +0,0 @@ - -Normalisation (owl-base.Owl_neural_neuron.Make.Normalisation)

Module Make.Normalisation

type neuron_typ = {
  1. mutable axis : int;
  2. mutable beta : Optimise.Algodiff.t;
  3. mutable gamma : Optimise.Algodiff.t;
  4. mutable mu : Optimise.Algodiff.t;
  5. mutable var : Optimise.Algodiff.t;
  6. mutable decay : Optimise.Algodiff.t;
  7. mutable training : bool;
  8. mutable in_shape : int array;
  9. mutable out_shape : int array;
}
val create : - ?training:bool -> - ?decay:float -> - ?mu:Optimise.Algodiff.A.arr -> - ?var:Optimise.Algodiff.A.arr -> - int -> - neuron_typ
val connect : int array -> neuron_typ -> unit
val init : neuron_typ -> unit
val reset : neuron_typ -> unit
val mktag : int -> neuron_typ -> unit
val mkpar : neuron_typ -> Optimise.Algodiff.t array
val mkpri : neuron_typ -> Optimise.Algodiff.t array
val mkadj : neuron_typ -> Optimise.Algodiff.t array
val update : neuron_typ -> Optimise.Algodiff.t array -> unit
val load_weights : neuron_typ -> Optimise.Algodiff.t array -> unit
val save_weights : neuron_typ -> Optimise.Algodiff.t array
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron/Make/Padding1D/index.html b/owl-base/Owl_neural_neuron/Make/Padding1D/index.html deleted file mode 100644 index d8f8a81d9..000000000 --- a/owl-base/Owl_neural_neuron/Make/Padding1D/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Padding1D (owl-base.Owl_neural_neuron.Make.Padding1D)

Module Make.Padding1D

\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron/Make/Padding2D/index.html b/owl-base/Owl_neural_neuron/Make/Padding2D/index.html deleted file mode 100644 index a55b6653a..000000000 --- a/owl-base/Owl_neural_neuron/Make/Padding2D/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Padding2D (owl-base.Owl_neural_neuron.Make.Padding2D)

Module Make.Padding2D

type neuron_typ = {
  1. mutable padding : int array array;
  2. mutable in_shape : int array;
  3. mutable out_shape : int array;
}
val create : int array array -> neuron_typ
val connect : int array -> neuron_typ -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron/Make/Padding3D/index.html b/owl-base/Owl_neural_neuron/Make/Padding3D/index.html deleted file mode 100644 index 48910626c..000000000 --- a/owl-base/Owl_neural_neuron/Make/Padding3D/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Padding3D (owl-base.Owl_neural_neuron.Make.Padding3D)

Module Make.Padding3D

\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron/Make/Recurrent/index.html b/owl-base/Owl_neural_neuron/Make/Recurrent/index.html deleted file mode 100644 index 55c3bc827..000000000 --- a/owl-base/Owl_neural_neuron/Make/Recurrent/index.html +++ /dev/null @@ -1,9 +0,0 @@ - -Recurrent (owl-base.Owl_neural_neuron.Make.Recurrent)

Module Make.Recurrent

type neuron_typ = {
  1. mutable whh : Optimise.Algodiff.t;
  2. mutable wxh : Optimise.Algodiff.t;
  3. mutable why : Optimise.Algodiff.t;
  4. mutable bh : Optimise.Algodiff.t;
  5. mutable by : Optimise.Algodiff.t;
  6. mutable h : Optimise.Algodiff.t;
  7. mutable hiddens : int;
  8. mutable act : Activation.typ;
  9. mutable init_typ : Init.typ;
  10. mutable in_shape : int array;
  11. mutable out_shape : int array;
}
val create : - ?time_steps:int -> - ?inputs:int -> - int -> - int -> - Activation.typ -> - Init.typ -> - neuron_typ
val connect : int array -> neuron_typ -> unit
val init : neuron_typ -> unit
val reset : neuron_typ -> unit
val mktag : int -> neuron_typ -> unit
val mkpar : neuron_typ -> Optimise.Algodiff.t array
val mkpri : neuron_typ -> Optimise.Algodiff.t array
val mkadj : neuron_typ -> Optimise.Algodiff.t array
val update : neuron_typ -> Optimise.Algodiff.t array -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron/Make/Reshape/index.html b/owl-base/Owl_neural_neuron/Make/Reshape/index.html deleted file mode 100644 index 5cfba87e6..000000000 --- a/owl-base/Owl_neural_neuron/Make/Reshape/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Reshape (owl-base.Owl_neural_neuron.Make.Reshape)

Module Make.Reshape

type neuron_typ = {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
}
val create : ?inputs:int array -> int array -> neuron_typ
val connect : int array -> neuron_typ -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron/Make/Slice/index.html b/owl-base/Owl_neural_neuron/Make/Slice/index.html deleted file mode 100644 index 364bd326e..000000000 --- a/owl-base/Owl_neural_neuron/Make/Slice/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Slice (owl-base.Owl_neural_neuron.Make.Slice)

Module Make.Slice

type neuron_typ = {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
  3. mutable slice : int list list;
}
val create : int list list -> neuron_typ
val connect : int array -> neuron_typ -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron/Make/TransposeConv1D/index.html b/owl-base/Owl_neural_neuron/Make/TransposeConv1D/index.html deleted file mode 100644 index f26f50a0f..000000000 --- a/owl-base/Owl_neural_neuron/Make/TransposeConv1D/index.html +++ /dev/null @@ -1,8 +0,0 @@ - -TransposeConv1D (owl-base.Owl_neural_neuron.Make.TransposeConv1D)

Module Make.TransposeConv1D

type neuron_typ = {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable b : Optimise.Algodiff.t;
  3. mutable kernel : int array;
  4. mutable stride : int array;
  5. mutable padding : Owl_types.padding;
  6. mutable init_typ : Init.typ;
  7. mutable in_shape : int array;
  8. mutable out_shape : int array;
}
val create : - ?inputs:int array -> - Owl_types.padding -> - int array -> - int array -> - Init.typ -> - neuron_typ
val connect : int array -> neuron_typ -> unit
val init : neuron_typ -> unit
val reset : neuron_typ -> unit
val mktag : int -> neuron_typ -> unit
val mkpar : neuron_typ -> Optimise.Algodiff.t array
val mkpri : neuron_typ -> Optimise.Algodiff.t array
val mkadj : neuron_typ -> Optimise.Algodiff.t array
val update : neuron_typ -> Optimise.Algodiff.t array -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron/Make/TransposeConv2D/index.html b/owl-base/Owl_neural_neuron/Make/TransposeConv2D/index.html deleted file mode 100644 index da2a7ee7c..000000000 --- a/owl-base/Owl_neural_neuron/Make/TransposeConv2D/index.html +++ /dev/null @@ -1,8 +0,0 @@ - -TransposeConv2D (owl-base.Owl_neural_neuron.Make.TransposeConv2D)

Module Make.TransposeConv2D

type neuron_typ = {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable b : Optimise.Algodiff.t;
  3. mutable kernel : int array;
  4. mutable stride : int array;
  5. mutable padding : Owl_types.padding;
  6. mutable init_typ : Init.typ;
  7. mutable in_shape : int array;
  8. mutable out_shape : int array;
}
val create : - ?inputs:int array -> - Owl_types.padding -> - int array -> - int array -> - Init.typ -> - neuron_typ
val connect : int array -> neuron_typ -> unit
val init : neuron_typ -> unit
val reset : neuron_typ -> unit
val mktag : int -> neuron_typ -> unit
val mkpar : neuron_typ -> Optimise.Algodiff.t array
val mkpri : neuron_typ -> Optimise.Algodiff.t array
val mkadj : neuron_typ -> Optimise.Algodiff.t array
val update : neuron_typ -> Optimise.Algodiff.t array -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron/Make/TransposeConv3D/index.html b/owl-base/Owl_neural_neuron/Make/TransposeConv3D/index.html deleted file mode 100644 index 99ea523a3..000000000 --- a/owl-base/Owl_neural_neuron/Make/TransposeConv3D/index.html +++ /dev/null @@ -1,8 +0,0 @@ - -TransposeConv3D (owl-base.Owl_neural_neuron.Make.TransposeConv3D)

Module Make.TransposeConv3D

type neuron_typ = {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable b : Optimise.Algodiff.t;
  3. mutable kernel : int array;
  4. mutable stride : int array;
  5. mutable padding : Owl_types.padding;
  6. mutable init_typ : Init.typ;
  7. mutable in_shape : int array;
  8. mutable out_shape : int array;
}
val create : - ?inputs:int array -> - Owl_types.padding -> - int array -> - int array -> - Init.typ -> - neuron_typ
val connect : int array -> neuron_typ -> unit
val init : neuron_typ -> unit
val reset : neuron_typ -> unit
val mktag : int -> neuron_typ -> unit
val mkpar : neuron_typ -> Optimise.Algodiff.t array
val mkpri : neuron_typ -> Optimise.Algodiff.t array
val mkadj : neuron_typ -> Optimise.Algodiff.t array
val update : neuron_typ -> Optimise.Algodiff.t array -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron/Make/UpSampling1D/index.html b/owl-base/Owl_neural_neuron/Make/UpSampling1D/index.html deleted file mode 100644 index d7df3073f..000000000 --- a/owl-base/Owl_neural_neuron/Make/UpSampling1D/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -UpSampling1D (owl-base.Owl_neural_neuron.Make.UpSampling1D)

Module Make.UpSampling1D

\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron/Make/UpSampling2D/index.html b/owl-base/Owl_neural_neuron/Make/UpSampling2D/index.html deleted file mode 100644 index 174f111a1..000000000 --- a/owl-base/Owl_neural_neuron/Make/UpSampling2D/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -UpSampling2D (owl-base.Owl_neural_neuron.Make.UpSampling2D)

Module Make.UpSampling2D

type neuron_typ = {
  1. mutable size : int array;
  2. mutable in_shape : int array;
  3. mutable out_shape : int array;
}
val create : int array -> neuron_typ
val connect : int array -> neuron_typ -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron/Make/UpSampling3D/index.html b/owl-base/Owl_neural_neuron/Make/UpSampling3D/index.html deleted file mode 100644 index 6ad06a771..000000000 --- a/owl-base/Owl_neural_neuron/Make/UpSampling3D/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -UpSampling3D (owl-base.Owl_neural_neuron.Make.UpSampling3D)

Module Make.UpSampling3D

\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron/Make/argument-1-Optimise/Algodiff/A/Linalg/index.html b/owl-base/Owl_neural_neuron/Make/argument-1-Optimise/Algodiff/A/Linalg/index.html deleted file mode 100644 index b7ca0828b..000000000 --- a/owl-base/Owl_neural_neuron/Make/argument-1-Optimise/Algodiff/A/Linalg/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -Linalg (owl-base.Owl_neural_neuron.Make.Optimise.Algodiff.A.Linalg)

Module A.Linalg

val inv : arr -> arr
val logdet : arr -> elt
val chol : ?upper:bool -> arr -> arr
val svd : ?thin:bool -> arr -> arr * arr * arr
val qr : arr -> arr * arr
val lq : arr -> arr * arr
val sylvester : arr -> arr -> arr -> arr
val lyapunov : arr -> arr -> arr
val discrete_lyapunov : - ?solver:[ `default | `bilinear | `direct ] -> - arr -> - arr -> - arr
val linsolve : ?trans:bool -> ?typ:[ `n | `u | `l ] -> arr -> arr -> arr
val care : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
val dare : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron/Make/argument-1-Optimise/Algodiff/A/Mat/index.html b/owl-base/Owl_neural_neuron/Make/argument-1-Optimise/Algodiff/A/Mat/index.html deleted file mode 100644 index 0e9a9162d..000000000 --- a/owl-base/Owl_neural_neuron/Make/argument-1-Optimise/Algodiff/A/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl-base.Owl_neural_neuron.Make.Optimise.Algodiff.A.Mat)

Module A.Mat

val diagm : ?k:int -> arr -> arr
val triu : ?k:int -> arr -> arr
val tril : ?k:int -> arr -> arr
val eye : int -> arr
\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron/Make/argument-1-Optimise/Algodiff/A/Scalar/index.html b/owl-base/Owl_neural_neuron/Make/argument-1-Optimise/Algodiff/A/Scalar/index.html deleted file mode 100644 index 9f9d45789..000000000 --- a/owl-base/Owl_neural_neuron/Make/argument-1-Optimise/Algodiff/A/Scalar/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Scalar (owl-base.Owl_neural_neuron.Make.Optimise.Algodiff.A.Scalar)

Module A.Scalar

val add : elt -> elt -> elt
val sub : elt -> elt -> elt
val mul : elt -> elt -> elt
val div : elt -> elt -> elt
val pow : elt -> elt -> elt
val atan2 : elt -> elt -> elt
val abs : elt -> elt
val neg : elt -> elt
val sqr : elt -> elt
val sqrt : elt -> elt
val exp : elt -> elt
val log : elt -> elt
val log2 : elt -> elt
val log10 : elt -> elt
val signum : elt -> elt
val floor : elt -> elt
val ceil : elt -> elt
val round : elt -> elt
val sin : elt -> elt
val cos : elt -> elt
val tan : elt -> elt
val sinh : elt -> elt
val cosh : elt -> elt
val tanh : elt -> elt
val asin : elt -> elt
val acos : elt -> elt
val atan : elt -> elt
val asinh : elt -> elt
val acosh : elt -> elt
val atanh : elt -> elt
val relu : elt -> elt
val dawsn : elt -> elt
val sigmoid : elt -> elt
\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron/Make/argument-1-Optimise/Algodiff/A/index.html b/owl-base/Owl_neural_neuron/Make/argument-1-Optimise/Algodiff/A/index.html deleted file mode 100644 index 05a09dfb1..000000000 --- a/owl-base/Owl_neural_neuron/Make/argument-1-Optimise/Algodiff/A/index.html +++ /dev/null @@ -1,158 +0,0 @@ - -A (owl-base.Owl_neural_neuron.Make.Optimise.Algodiff.A)

Module Algodiff.A

include Owl_types_ndarray_eltcmp.Sig
include Owl_types_ndarray_basic.Sig
type arr
type elt
val empty : int array -> arr
val zeros : int array -> arr
val ones : int array -> arr
val create : int array -> elt -> arr
val sequential : ?a:elt -> ?step:elt -> int array -> arr
val uniform : ?a:elt -> ?b:elt -> int array -> arr
val gaussian : ?mu:elt -> ?sigma:elt -> int array -> arr
val bernoulli : ?p:elt -> int array -> arr
val init : int array -> (int -> elt) -> arr
val init_nd : int array -> (int array -> elt) -> arr
val shape : arr -> int array
val numel : arr -> int
val get : arr -> int array -> elt
val set : arr -> int array -> elt -> unit
val get_slice : int list list -> arr -> arr
val set_slice : int list list -> arr -> arr -> unit
val get_fancy : Owl_types_common.index list -> arr -> arr
val set_fancy : Owl_types_common.index list -> arr -> arr -> unit
val copy : arr -> arr
val copy_ : out:arr -> arr -> unit
val reset : arr -> unit
val reshape : arr -> int array -> arr
val reverse : arr -> arr
val tile : arr -> int array -> arr
val repeat : arr -> int array -> arr
val concatenate : ?axis:int -> arr array -> arr
val stack : ?axis:int -> arr array -> arr
val split : ?axis:int -> int array -> arr -> arr array
val expand : ?hi:bool -> arr -> int -> arr
val squeeze : ?axis:int array -> arr -> arr
val draw : ?axis:int -> arr -> int -> arr * int array
val map : (elt -> elt) -> arr -> arr
val fold : ?axis:int -> (elt -> elt -> elt) -> elt -> arr -> arr
val scan : ?axis:int -> (elt -> elt -> elt) -> arr -> arr
val one_hot : int -> arr -> arr
val pad : ?v:elt -> int list list -> arr -> arr
val print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(elt -> string) -> - arr -> - unit
val abs : arr -> arr
val neg : arr -> arr
val floor : arr -> arr
val ceil : arr -> arr
val round : arr -> arr
val sqr : arr -> arr
val sqrt : arr -> arr
val log : arr -> arr
val log2 : arr -> arr
val log10 : arr -> arr
val exp : arr -> arr
val sin : arr -> arr
val cos : arr -> arr
val tan : arr -> arr
val sinh : arr -> arr
val cosh : arr -> arr
val tanh : arr -> arr
val asin : arr -> arr
val acos : arr -> arr
val atan : arr -> arr
val asinh : arr -> arr
val acosh : arr -> arr
val atanh : arr -> arr
val min : ?axis:int -> ?keep_dims:bool -> arr -> arr
val max : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum_reduce : ?axis:int array -> arr -> arr
val signum : arr -> arr
val sigmoid : arr -> arr
val relu : arr -> arr
val dawsn : arr -> arr
val min' : arr -> elt
val max' : arr -> elt
val sum' : arr -> elt
val log_sum_exp' : arr -> elt
val log_sum_exp : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm' : arr -> elt
val l2norm' : arr -> elt
val l2norm_sqr' : arr -> elt
val clip_by_value : ?amin:elt -> ?amax:elt -> arr -> arr
val clip_by_l2norm : elt -> arr -> arr
val pow : arr -> arr -> arr
val scalar_pow : elt -> arr -> arr
val pow_scalar : arr -> elt -> arr
val atan2 : arr -> arr -> arr
val scalar_atan2 : elt -> arr -> arr
val atan2_scalar : arr -> elt -> arr
val add : arr -> arr -> arr
val sub : arr -> arr -> arr
val mul : arr -> arr -> arr
val div : arr -> arr -> arr
val add_scalar : arr -> elt -> arr
val sub_scalar : arr -> elt -> arr
val mul_scalar : arr -> elt -> arr
val div_scalar : arr -> elt -> arr
val scalar_add : elt -> arr -> arr
val scalar_sub : elt -> arr -> arr
val scalar_mul : elt -> arr -> arr
val scalar_div : elt -> arr -> arr
val fma : arr -> arr -> arr -> arr
val conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val dilated_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val transpose_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val max_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val upsampling2d : arr -> int array -> arr
val conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val dilated_conv1d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv1d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val transpose_conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val max_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val upsampling2d_backward : arr -> int array -> arr -> arr
val row_num : arr -> int
val col_num : arr -> int
val row : arr -> int -> arr
val rows : arr -> int array -> arr
val copy_row_to : arr -> arr -> int -> unit
val copy_col_to : arr -> arr -> int -> unit
val dot : arr -> arr -> arr
val diag : ?k:int -> arr -> arr
val trace : arr -> elt
val transpose : ?axis:int array -> arr -> arr
val to_rows : arr -> arr array
val of_rows : arr array -> arr
val to_cols : arr -> arr array
val of_cols : arr array -> arr
val of_array : elt array -> int array -> arr
val of_arrays : elt array array -> arr
val float_to_elt : float -> elt
val elt_to_float : elt -> float
val elt_equal : arr -> arr -> arr
val elt_not_equal : arr -> arr -> arr
val elt_less : arr -> arr -> arr
val elt_greater : arr -> arr -> arr
val elt_less_equal : arr -> arr -> arr
val elt_greater_equal : arr -> arr -> arr
val elt_equal_scalar : arr -> elt -> arr
val elt_not_equal_scalar : arr -> elt -> arr
val elt_less_scalar : arr -> elt -> arr
val elt_greater_scalar : arr -> elt -> arr
val elt_less_equal_scalar : arr -> elt -> arr
val elt_greater_equal_scalar : arr -> elt -> arr
module Scalar : sig ... end
module Mat : sig ... end
module Linalg : sig ... end
\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron/Make/argument-1-Optimise/Algodiff/Arr/index.html b/owl-base/Owl_neural_neuron/Make/argument-1-Optimise/Algodiff/Arr/index.html deleted file mode 100644 index 88f3e1c2f..000000000 --- a/owl-base/Owl_neural_neuron/Make/argument-1-Optimise/Algodiff/Arr/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Arr (owl-base.Owl_neural_neuron.Make.Optimise.Algodiff.Arr)

Module Algodiff.Arr

val empty : int array -> t
val zeros : int array -> t
val ones : int array -> t
val uniform : ?a:A.elt -> ?b:A.elt -> int array -> t
val gaussian : ?mu:A.elt -> ?sigma:A.elt -> int array -> t
val shape : t -> int array
val numel : t -> int
val reset : t -> unit
val reshape : t -> int array -> t
val add : t -> t -> t
val sub : t -> t -> t
val mul : t -> t -> t
val div : t -> t -> t
val dot : t -> t -> t
\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron/Make/argument-1-Optimise/Algodiff/Builder/index.html b/owl-base/Owl_neural_neuron/Make/argument-1-Optimise/Algodiff/Builder/index.html deleted file mode 100644 index 2a75d4ada..000000000 --- a/owl-base/Owl_neural_neuron/Make/argument-1-Optimise/Algodiff/Builder/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Builder (owl-base.Owl_neural_neuron.Make.Optimise.Algodiff.Builder)

Module Algodiff.Builder

Ops Builder
module type Siso = sig ... end
val build_siso : (module Siso) -> t -> t

build single input single output operations

module type Sipo = sig ... end
val build_sipo : (module Sipo) -> t -> t * t

build single input pair outputs operations

module type Sito = sig ... end
val build_sito : (module Sito) -> t -> t * t * t

build single input triple outputs operations

module type Siao = sig ... end
val build_siao : (module Siao) -> t -> t array

build single input array output operations

module type Piso = sig ... end
val build_piso : (module Piso) -> t -> t -> t

build pair inputs single output operations

module type Aiso = sig ... end
val build_aiso : (module Aiso) -> t array -> t

build array input single output operations

\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron/Make/argument-1-Optimise/Algodiff/Builder/module-type-Aiso/index.html b/owl-base/Owl_neural_neuron/Make/argument-1-Optimise/Algodiff/Builder/module-type-Aiso/index.html deleted file mode 100644 index ab0510c01..000000000 --- a/owl-base/Owl_neural_neuron/Make/argument-1-Optimise/Algodiff/Builder/module-type-Aiso/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Aiso (owl-base.Owl_neural_neuron.Make.Optimise.Algodiff.Builder.Aiso)

Module type Builder.Aiso

val label : string
val ff : t array -> t
val df : int list -> t -> t array -> t array -> t
val dr : int list -> t array -> t -> t Stdlib.ref -> t list
\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron/Make/argument-1-Optimise/Algodiff/Builder/module-type-Piso/index.html b/owl-base/Owl_neural_neuron/Make/argument-1-Optimise/Algodiff/Builder/module-type-Piso/index.html deleted file mode 100644 index 79a4966ea..000000000 --- a/owl-base/Owl_neural_neuron/Make/argument-1-Optimise/Algodiff/Builder/module-type-Piso/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Piso (owl-base.Owl_neural_neuron.Make.Optimise.Algodiff.Builder.Piso)

Module type Builder.Piso

val label : string
val ff_aa : A.elt -> A.elt -> t
val ff_ab : A.elt -> A.arr -> t
val ff_ba : A.arr -> A.elt -> t
val ff_bb : A.arr -> A.arr -> t
val df_da : t -> t -> t -> t -> t
val df_db : t -> t -> t -> t -> t
val df_dab : t -> t -> t -> t -> t -> t
val dr_ab : t -> t -> t -> t Stdlib.ref -> t * t
val dr_a : t -> t -> t -> t Stdlib.ref -> t
val dr_b : t -> t -> t -> t Stdlib.ref -> t
\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron/Make/argument-1-Optimise/Algodiff/Builder/module-type-Siao/index.html b/owl-base/Owl_neural_neuron/Make/argument-1-Optimise/Algodiff/Builder/module-type-Siao/index.html deleted file mode 100644 index 225ab0e17..000000000 --- a/owl-base/Owl_neural_neuron/Make/argument-1-Optimise/Algodiff/Builder/module-type-Siao/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Siao (owl-base.Owl_neural_neuron.Make.Optimise.Algodiff.Builder.Siao)

Module type Builder.Siao

val label : string
val ff_f : A.elt -> t array
val ff_arr : A.arr -> t array
val df : t array -> t -> t -> t array
val dr : t -> t -> t Stdlib.ref array -> t Stdlib.ref array -> t
\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron/Make/argument-1-Optimise/Algodiff/Builder/module-type-Sipo/index.html b/owl-base/Owl_neural_neuron/Make/argument-1-Optimise/Algodiff/Builder/module-type-Sipo/index.html deleted file mode 100644 index 2e652368b..000000000 --- a/owl-base/Owl_neural_neuron/Make/argument-1-Optimise/Algodiff/Builder/module-type-Sipo/index.html +++ /dev/null @@ -1,7 +0,0 @@ - -Sipo (owl-base.Owl_neural_neuron.Make.Optimise.Algodiff.Builder.Sipo)

Module type Builder.Sipo

val label : string
val ff_f : A.elt -> t * t
val ff_arr : A.arr -> t * t
val df : t -> t -> t -> t
val dr : - t -> - t -> - (t Stdlib.ref * t Stdlib.ref) -> - (t Stdlib.ref * t Stdlib.ref) -> - t
\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron/Make/argument-1-Optimise/Algodiff/Builder/module-type-Siso/index.html b/owl-base/Owl_neural_neuron/Make/argument-1-Optimise/Algodiff/Builder/module-type-Siso/index.html deleted file mode 100644 index f0397bc3b..000000000 --- a/owl-base/Owl_neural_neuron/Make/argument-1-Optimise/Algodiff/Builder/module-type-Siso/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Siso (owl-base.Owl_neural_neuron.Make.Optimise.Algodiff.Builder.Siso)

Module type Builder.Siso

val label : string
val ff_f : A.elt -> t
val ff_arr : A.arr -> t
val df : t -> t -> t -> t
val dr : t -> t -> t Stdlib.ref -> t
\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron/Make/argument-1-Optimise/Algodiff/Builder/module-type-Sito/index.html b/owl-base/Owl_neural_neuron/Make/argument-1-Optimise/Algodiff/Builder/module-type-Sito/index.html deleted file mode 100644 index 76a19e7a8..000000000 --- a/owl-base/Owl_neural_neuron/Make/argument-1-Optimise/Algodiff/Builder/module-type-Sito/index.html +++ /dev/null @@ -1,7 +0,0 @@ - -Sito (owl-base.Owl_neural_neuron.Make.Optimise.Algodiff.Builder.Sito)

Module type Builder.Sito

val label : string
val ff_f : A.elt -> t * t * t
val ff_arr : A.arr -> t * t * t
val df : t -> t -> t -> t
val dr : - t -> - t -> - (t Stdlib.ref * t Stdlib.ref * t Stdlib.ref) -> - (t Stdlib.ref * t Stdlib.ref * t Stdlib.ref) -> - t
\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron/Make/argument-1-Optimise/Algodiff/Linalg/index.html b/owl-base/Owl_neural_neuron/Make/argument-1-Optimise/Algodiff/Linalg/index.html deleted file mode 100644 index 98e66f7d0..000000000 --- a/owl-base/Owl_neural_neuron/Make/argument-1-Optimise/Algodiff/Linalg/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -Linalg (owl-base.Owl_neural_neuron.Make.Optimise.Algodiff.Linalg)

Module Algodiff.Linalg

val inv : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val logdet : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val chol : ?upper:bool -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val qr : t -> t * t

Refer to :doc:`owl_dense_ndarray_generic`

val lq : t -> t * t

Refer to :doc:`owl_dense_ndarray_generic`

val svd : ?thin:bool -> t -> t * t * t

Refer to :doc:`owl_dense_ndarray_generic`

val sylvester : t -> t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val lyapunov : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val discrete_lyapunov : - ?solver:[ `default | `bilinear | `direct ] -> - t -> - t -> - t

Refer to :doc:`owl_dense_ndarray_generic`

val (/@) : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val linsolve : ?trans:bool -> ?typ:[ `n | `u | `l ] -> t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val care : ?diag_r:bool -> t -> t -> t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val dare : ?diag_r:bool -> t -> t -> t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron/Make/argument-1-Optimise/Algodiff/Mat/index.html b/owl-base/Owl_neural_neuron/Make/argument-1-Optimise/Algodiff/Mat/index.html deleted file mode 100644 index f3fa83a7a..000000000 --- a/owl-base/Owl_neural_neuron/Make/argument-1-Optimise/Algodiff/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl-base.Owl_neural_neuron.Make.Optimise.Algodiff.Mat)

Module Algodiff.Mat

val empty : int -> int -> t
val zeros : int -> int -> t
val eye : int -> t
val ones : int -> int -> t
val uniform : ?a:A.elt -> ?b:A.elt -> int -> int -> t
val gaussian : ?mu:A.elt -> ?sigma:A.elt -> int -> int -> t
val shape : t -> int * int
val numel : t -> int
val row_num : t -> int
val col_num : t -> int
val reset : t -> unit
val reshape : int -> int -> t -> t
val get : t -> int -> int -> t
val set : t -> int -> int -> t -> t
val row : t -> int -> t
val mean : t -> t
val add : t -> t -> t
val sub : t -> t -> t
val mul : t -> t -> t
val div : t -> t -> t
val dot : t -> t -> t
val map_by_row : (t -> t) -> t -> t
val of_arrays : A.elt array array -> t
val init_2d : int -> int -> (int -> int -> t) -> t
val print : t -> unit
\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron/Make/argument-1-Optimise/Algodiff/Maths/index.html b/owl-base/Owl_neural_neuron/Make/argument-1-Optimise/Algodiff/Maths/index.html deleted file mode 100644 index a63b43334..000000000 --- a/owl-base/Owl_neural_neuron/Make/argument-1-Optimise/Algodiff/Maths/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Maths (owl-base.Owl_neural_neuron.Make.Optimise.Algodiff.Maths)

Module Algodiff.Maths

val (+) : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val (-) : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val (*) : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val (/) : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val (*@) : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val (**) : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val add : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val sub : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val mul : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val div : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val kron : t -> t -> t

Refer to :doc:`owl_dense_matrix_generic`

val dot : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val pow : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val atan2 : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val min2 : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val max2 : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val cross_entropy : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val inv : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val neg : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val abs : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val signum : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val floor : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val ceil : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val round : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val sqr : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val sqrt : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val log : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val log2 : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val log10 : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val exp : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val sin : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val cos : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val tan : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val sinh : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val cosh : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val tanh : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val asin : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val acos : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val atan : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val asinh : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val acosh : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val atanh : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val sum' : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val log_sum_exp' : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val log_sum_exp : ?axis:int -> ?keep_dims:bool -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val sum : ?axis:int -> ?keep_dims:bool -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val sum_reduce : ?axis:int array -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val mean : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val transpose : ?axis:int array -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val swap : int -> int -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val l1norm' : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val l2norm' : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val l2norm_sqr' : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val sigmoid : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val relu : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val dawsn : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val softplus : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val softsign : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val softmax : ?axis:int -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val reshape : t -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val flatten : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val get_item : t -> int -> int -> t

Refer to :doc:`owl_dense_ndarray_generic`

val get_row : t -> int -> t

Refer to :doc:`owl_dense_ndarray_generic`

val concat : axis:int -> t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val split : axis:int -> int array -> t -> t array

Refer to :doc:`owl_dense_ndarray_generic`

val of_arrays : t array array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val to_arrays : t -> t array array

Refer to :doc:`owl_dense_ndarray_generic`

val concatenate : axis:int -> t array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val stack : axis:int -> t array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val get_slice : int list list -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val set_slice : int list list -> t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val get_fancy : Owl_types.index list -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val set_fancy : Owl_types.index list -> t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val diag : ?k:int -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val diagm : ?k:int -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val trace : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val triu : ?k:int -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val tril : ?k:int -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron/Make/argument-1-Optimise/Algodiff/NN/index.html b/owl-base/Owl_neural_neuron/Make/argument-1-Optimise/Algodiff/NN/index.html deleted file mode 100644 index e102042b4..000000000 --- a/owl-base/Owl_neural_neuron/Make/argument-1-Optimise/Algodiff/NN/index.html +++ /dev/null @@ -1,20 +0,0 @@ - -NN (owl-base.Owl_neural_neuron.Make.Optimise.Algodiff.NN)

Module Algodiff.NN

val dropout : ?rate:float -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val conv1d : ?padding:Owl_types.padding -> t -> t -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val conv2d : ?padding:Owl_types.padding -> t -> t -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val conv3d : ?padding:Owl_types.padding -> t -> t -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val dilated_conv1d : - ?padding:Owl_types.padding -> - t -> - t -> - int array -> - int array -> - t

Refer to :doc:`owl_dense_ndarray_generic`

val dilated_conv2d : - ?padding:Owl_types.padding -> - t -> - t -> - int array -> - int array -> - t

Refer to :doc:`owl_dense_ndarray_generic`

val dilated_conv3d : - ?padding:Owl_types.padding -> - t -> - t -> - int array -> - int array -> - t

Refer to :doc:`owl_dense_ndarray_generic`

val transpose_conv1d : ?padding:Owl_types.padding -> t -> t -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val transpose_conv2d : ?padding:Owl_types.padding -> t -> t -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val transpose_conv3d : ?padding:Owl_types.padding -> t -> t -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val max_pool1d : Owl_types.padding -> t -> int array -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val max_pool2d : Owl_types.padding -> t -> int array -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val max_pool3d : Owl_types.padding -> t -> int array -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val avg_pool1d : Owl_types.padding -> t -> int array -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val avg_pool2d : Owl_types.padding -> t -> int array -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val avg_pool3d : Owl_types.padding -> t -> int array -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val upsampling2d : t -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val pad : ?v:A.elt -> int list list -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron/Make/argument-1-Optimise/Algodiff/index.html b/owl-base/Owl_neural_neuron/Make/argument-1-Optimise/Algodiff/index.html deleted file mode 100644 index ba58e7712..000000000 --- a/owl-base/Owl_neural_neuron/Make/argument-1-Optimise/Algodiff/index.html +++ /dev/null @@ -1,11 +0,0 @@ - -Algodiff (owl-base.Owl_neural_neuron.Make.Optimise.Algodiff)

Module Optimise.Algodiff

include Owl_algodiff_core_sig.Sig
Type definition
include Owl_algodiff_types_sig.Sig with type elt := A.elt and type arr := A.arr
type t =
  1. | F of A.elt
  2. | Arr of A.arr
  3. | DF of t * t * int
  4. | DR of t * t Stdlib.ref * op * int Stdlib.ref * int * int Stdlib.ref
and adjoint = t -> t Stdlib.ref -> (t * t) list -> (t * t) list
and register = t list -> t list
and label = string * t list
and op = adjoint * register * label
Core functions
val tag : unit -> int

TODO

val primal : t -> t

TODO

val primal' : t -> t

TODO

val zero : t -> t

TODO

val reset_zero : t -> t

TODO

val tangent : t -> t

TODO

val adjref : t -> t Stdlib.ref

TODO

val adjval : t -> t

TODO

val shape : t -> int array

TODO

val is_float : t -> bool

TODO

val is_arr : t -> bool

TODO

val row_num : t -> int

number of rows

val col_num : t -> int

number of columns

val numel : t -> int

number of elements

val clip_by_value : amin:A.elt -> amax:A.elt -> t -> t

other functions, without tracking gradient

val clip_by_l2norm : A.elt -> t -> t

other functions, without tracking gradient

val copy_primal' : t -> t

TODO

val tile : t -> int array -> t

TODO

val repeat : t -> int array -> t

TODO

val pack_elt : A.elt -> t

convert from elt type to t type.

val unpack_elt : t -> A.elt

convert from t type to elt type.

val pack_flt : float -> t

convert from float type to t type.

val _f : float -> t

A shortcut function for F A.(float_to_elt x).

val unpack_flt : t -> float

convert from t type to float type.

val pack_arr : A.arr -> t

convert from arr type to t type.

val unpack_arr : t -> A.arr

convert from t type to arr type.

val deep_info : t -> string

TODO

val type_info : t -> string

TODO

val error_binop : string -> t -> t -> 'a

TODO

val error_uniop : string -> t -> 'a

TODO

val make_forward : t -> t -> int -> t

TODO

val make_reverse : t -> int -> t

TODO

val reverse_prop : t -> t -> unit

TODO

val diff : (t -> t) -> t -> t

diff f x returns the exat derivative of a function f : scalar -> scalar at point x. Simply calling diff f will return its derivative function g of the same type, i.e. g : scalar -> scalar.

Keep calling this function will give you higher-order derivatives of f, i.e. f |> diff |> diff |> diff |> ...

val diff' : (t -> t) -> t -> t * t

similar to diff, but return (f x, diff f x).

val grad : (t -> t) -> t -> t

gradient of f : (vector -> scalar) at x, reverse ad.

val grad' : (t -> t) -> t -> t * t

similar to grad, but return (f x, grad f x).

val jacobian : (t -> t) -> t -> t

jacobian of f : (vector -> vector) at x, both x and y are row vectors.

val jacobian' : (t -> t) -> t -> t * t

similar to jacobian, but return (f x, jacobian f x)

val jacobianv : (t -> t) -> t -> t -> t

jacobian vector product of f : (vector -> vector) at x along v, forward ad. Namely, it calcultes (jacobian x) v

val jacobianv' : (t -> t) -> t -> t -> t * t

similar to jacobianv', but return (f x, jacobianv f x v)

val jacobianTv : (t -> t) -> t -> t -> t

transposed jacobian vector product of f : (vector -> vector) at x along v, backward ad. Namely, it calculates transpose ((jacobianv f x v)).

val jacobianTv' : (t -> t) -> t -> t -> t * t

similar to jacobianTv, but return (f x, transpose (jacobianv f x v))

val hessian : (t -> t) -> t -> t

hessian of f : (scalar -> scalar) at x.

val hessian' : (t -> t) -> t -> t * t

simiarl to hessian, but return (f x, hessian f x)

val hessianv : (t -> t) -> t -> t -> t

hessian vector product of f : (scalar -> scalar) at x along v. Namely, it calculates (hessian x) v.

val hessianv' : (t -> t) -> t -> t -> t * t

similar to hessianv, but return (f x, hessianv f x v).

val laplacian : (t -> t) -> t -> t

laplacian of f : (scalar -> scalar) at x.

val laplacian' : (t -> t) -> t -> t * t

similar to laplacian, but return (f x, laplacian f x).

val gradhessian : (t -> t) -> t -> t * t

return (grad f x, hessian f x), f : (scalar -> scalar)

val gradhessian' : (t -> t) -> t -> t * t * t

return (f x, grad f x, hessian f x)

val gradhessianv : (t -> t) -> t -> t -> t * t

return (grad f x v, hessian f x v)

val gradhessianv' : (t -> t) -> t -> t -> t * t * t

return (f x, grad f x v, hessian f x v)

include Owl_algodiff_ops_sig.Sig - with type t := t - and type elt := A.elt - and type arr := A.arr - and type op := op
module Builder : - Owl_algodiff_ops_builder_sig.Sig - with type t := t - and type elt := A.elt - and type arr := A.arr - and type op := op
Supported Maths functions
module Maths : sig ... end
Supported Linalg functions
module Linalg : sig ... end
Supported Neural Network functions
module NN : sig ... end
Supported Mat functions
module Mat : sig ... end
Supported Arr functions
module Arr : sig ... end
Helper functions
include Owl_algodiff_graph_convert_sig.Sig with type t := t
val to_trace : t list -> string

to_trace [t0; t1; ...] outputs the trace of computation graph on the terminal in a human-readable format.

val to_dot : t list -> string

to_dot [t0; t1; ...] outputs the trace of computation graph in the dot file format which you can use other tools further visualisation, such as Graphviz.

val pp_num : Stdlib.Format.formatter -> t -> unit

pp_num t pretty prints the abstract number used in Algodiff.

\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron/Make/argument-1-Optimise/Batch/index.html b/owl-base/Owl_neural_neuron/Make/argument-1-Optimise/Batch/index.html deleted file mode 100644 index 7fc2e10b6..000000000 --- a/owl-base/Owl_neural_neuron/Make/argument-1-Optimise/Batch/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Batch (owl-base.Owl_neural_neuron.Make.Optimise.Batch)

Module Optimise.Batch

Batch module

type typ =
  1. | Full
  2. | Mini of int
  3. | Sample of int
  4. | Stochastic

Types of batches.

val run : typ -> Algodiff.t -> Algodiff.t -> int -> Algodiff.t * Algodiff.t

Execute the computations defined in module typ.

val batches : typ -> Algodiff.t -> int

Return the total number of batches given a batch typ.

val to_string : typ -> string

Convert the module typ to its string representation.

\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron/Make/argument-1-Optimise/Checkpoint/index.html b/owl-base/Owl_neural_neuron/Make/argument-1-Optimise/Checkpoint/index.html deleted file mode 100644 index 05f241981..000000000 --- a/owl-base/Owl_neural_neuron/Make/argument-1-Optimise/Checkpoint/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Checkpoint (owl-base.Owl_neural_neuron.Make.Optimise.Checkpoint)

Module Optimise.Checkpoint

Checkpoint module

type state = {
  1. mutable current_batch : int;
  2. mutable batches_per_epoch : int;
  3. mutable epochs : float;
  4. mutable batches : int;
  5. mutable loss : Algodiff.t array;
  6. mutable start_at : float;
  7. mutable stop : bool;
  8. mutable gs : Algodiff.t array array;
  9. mutable ps : Algodiff.t array array;
  10. mutable us : Algodiff.t array array;
  11. mutable ch : Algodiff.t array array array;
}

Type definition of checkpoint

type typ =
  1. | Batch of int
  2. | Epoch of float
  3. | Custom of state -> unit
  4. | None

Batch type.

val init_state : int -> float -> state

init_state batches_per_epoch epochs initialises a state by specifying the number of batches per epoch and the number of epochs in total.

val default_checkpoint_fun : (string -> 'a) -> 'a

This function is used for saving intermediate files during optimisation.

val print_state_info : state -> unit

Print out the detail information of current state.

val print_summary : state -> unit

Print out the summary of current state.

val run : typ -> (string -> unit) -> int -> Algodiff.t -> state -> unit

Execute the computations defined in module typ.

val to_string : typ -> string

Convert the module typ to its string representation.

\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron/Make/argument-1-Optimise/Clipping/index.html b/owl-base/Owl_neural_neuron/Make/argument-1-Optimise/Clipping/index.html deleted file mode 100644 index c2991f81a..000000000 --- a/owl-base/Owl_neural_neuron/Make/argument-1-Optimise/Clipping/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Clipping (owl-base.Owl_neural_neuron.Make.Optimise.Clipping)

Module Optimise.Clipping

Clipping module

type typ =
  1. | L2norm of float
  2. | Value of float * float
  3. | None

Types of clipping functions.

val run : typ -> Algodiff.t -> Algodiff.t

Execute the computations defined in module typ.

val default : typ -> typ

Create module typ with default values.

val to_string : typ -> string

Convert the module typ to its string representation.

\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron/Make/argument-1-Optimise/Gradient/index.html b/owl-base/Owl_neural_neuron/Make/argument-1-Optimise/Gradient/index.html deleted file mode 100644 index ecc50a311..000000000 --- a/owl-base/Owl_neural_neuron/Make/argument-1-Optimise/Gradient/index.html +++ /dev/null @@ -1,9 +0,0 @@ - -Gradient (owl-base.Owl_neural_neuron.Make.Optimise.Gradient)

Module Optimise.Gradient

Gradient module

type typ =
  1. | GD
  2. | CG
  3. | CD
  4. | NonlinearCG
  5. | DaiYuanCG
  6. | NewtonCG
  7. | Newton

Types of gradient function.

val run : - typ -> - (Algodiff.t -> Algodiff.t) -> - Algodiff.t -> - Algodiff.t -> - Algodiff.t -> - Algodiff.t -> - Algodiff.t

Execute the computations defined in module typ.

val to_string : typ -> string

Convert the module typ to its string representation.

\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron/Make/argument-1-Optimise/Learning_Rate/index.html b/owl-base/Owl_neural_neuron/Make/argument-1-Optimise/Learning_Rate/index.html deleted file mode 100644 index 161c38258..000000000 --- a/owl-base/Owl_neural_neuron/Make/argument-1-Optimise/Learning_Rate/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Learning_Rate (owl-base.Owl_neural_neuron.Make.Optimise.Learning_Rate)

Module Optimise.Learning_Rate

Strategies for learning rate update

type typ =
  1. | Adagrad of float
  2. | Const of float
  3. | Decay of float * float
  4. | Exp_decay of float * float
  5. | RMSprop of float * float
  6. | Adam of float * float * float
  7. | Schedule of float array

Representation of learning rate update strategies. Possible values include:

  • Adam (alpha, beta1, beta2), see ref for parameter meaning
val run : typ -> int -> Algodiff.t -> Algodiff.t array -> Algodiff.t

Execute the computations defined in module typ.

val default : typ -> typ

Create module typ with default values.

val update_ch : typ -> Algodiff.t -> Algodiff.t array -> Algodiff.t array

Update the cache of gradients.

val to_string : typ -> string

Convert the module typ to its string representation.

\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron/Make/argument-1-Optimise/Loss/index.html b/owl-base/Owl_neural_neuron/Make/argument-1-Optimise/Loss/index.html deleted file mode 100644 index 4b7c207ca..000000000 --- a/owl-base/Owl_neural_neuron/Make/argument-1-Optimise/Loss/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Loss (owl-base.Owl_neural_neuron.Make.Optimise.Loss)

Module Optimise.Loss

Loss module

type typ =
  1. | Hinge
  2. | L1norm
  3. | L2norm
  4. | Quadratic
  5. | Cross_entropy
  6. | Custom of Algodiff.t -> Algodiff.t -> Algodiff.t

Types of loss functions.

val run : typ -> Algodiff.t -> Algodiff.t -> Algodiff.t

Execute the computations defined in module typ.

val to_string : typ -> string

Convert the module typ to its string representation.

\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron/Make/argument-1-Optimise/Momentum/index.html b/owl-base/Owl_neural_neuron/Make/argument-1-Optimise/Momentum/index.html deleted file mode 100644 index 957253f7f..000000000 --- a/owl-base/Owl_neural_neuron/Make/argument-1-Optimise/Momentum/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Momentum (owl-base.Owl_neural_neuron.Make.Optimise.Momentum)

Module Optimise.Momentum

Momentum module

type typ =
  1. | Standard of float
  2. | Nesterov of float
  3. | None

Types of momentum functions.

val run : typ -> Algodiff.t -> Algodiff.t -> Algodiff.t

Execute the computations defined in module typ.

val default : typ -> typ

Create module typ with default values.

val to_string : typ -> string

Convert the module typ to its string representation.

\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron/Make/argument-1-Optimise/Params/index.html b/owl-base/Owl_neural_neuron/Make/argument-1-Optimise/Params/index.html deleted file mode 100644 index ad912f479..000000000 --- a/owl-base/Owl_neural_neuron/Make/argument-1-Optimise/Params/index.html +++ /dev/null @@ -1,14 +0,0 @@ - -Params (owl-base.Owl_neural_neuron.Make.Optimise.Params)

Module Optimise.Params

Params module

type typ = {
  1. mutable epochs : float;
  2. mutable batch : Batch.typ;
  3. mutable gradient : Gradient.typ;
  4. mutable loss : Loss.typ;
  5. mutable learning_rate : Learning_Rate.typ;
  6. mutable regularisation : Regularisation.typ;
  7. mutable momentum : Momentum.typ;
  8. mutable clipping : Clipping.typ;
  9. mutable stopping : Stopping.typ;
  10. mutable checkpoint : Checkpoint.typ;
  11. mutable verbosity : bool;
}

Type definition of parameter.

val default : unit -> typ

Create module typ with default values.

val config : - ?batch:Batch.typ -> - ?gradient:Gradient.typ -> - ?loss:Loss.typ -> - ?learning_rate:Learning_Rate.typ -> - ?regularisation:Regularisation.typ -> - ?momentum:Momentum.typ -> - ?clipping:Clipping.typ -> - ?stopping:Stopping.typ -> - ?checkpoint:Checkpoint.typ -> - ?verbosity:bool -> - float -> - typ

This function creates a parameter object with many configurations.

val to_string : typ -> string

Convert the module typ to its string representation.

\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron/Make/argument-1-Optimise/Regularisation/index.html b/owl-base/Owl_neural_neuron/Make/argument-1-Optimise/Regularisation/index.html deleted file mode 100644 index 657d11ca6..000000000 --- a/owl-base/Owl_neural_neuron/Make/argument-1-Optimise/Regularisation/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Regularisation (owl-base.Owl_neural_neuron.Make.Optimise.Regularisation)

Module Optimise.Regularisation

Regularisation module

type typ =
  1. | L1norm of float
  2. | L2norm of float
  3. | Elastic_net of float * float
  4. | None

Types of regularisation functions.

val run : typ -> Algodiff.t -> Algodiff.t

Execute the computations defined in module typ.

val to_string : typ -> string

Convert the module typ to its string representation.

\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron/Make/argument-1-Optimise/Stopping/index.html b/owl-base/Owl_neural_neuron/Make/argument-1-Optimise/Stopping/index.html deleted file mode 100644 index 4d30f0e1d..000000000 --- a/owl-base/Owl_neural_neuron/Make/argument-1-Optimise/Stopping/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Stopping (owl-base.Owl_neural_neuron.Make.Optimise.Stopping)

Module Optimise.Stopping

Stopping module

type typ =
  1. | Const of float
  2. | Early of int * int
  3. | None

Types of stopping functions.

val run : typ -> float -> bool

Execute the computations defined in module typ.

val default : typ -> typ

Create module typ with default values.

val to_string : typ -> string

Convert the module typ to its string representation.

\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron/Make/argument-1-Optimise/Utils/index.html b/owl-base/Owl_neural_neuron/Make/argument-1-Optimise/Utils/index.html deleted file mode 100644 index da72b64af..000000000 --- a/owl-base/Owl_neural_neuron/Make/argument-1-Optimise/Utils/index.html +++ /dev/null @@ -1,7 +0,0 @@ - -Utils (owl-base.Owl_neural_neuron.Make.Optimise.Utils)

Module Optimise.Utils

Utils module

val sample_num : Algodiff.t -> int

Return the total number of samples in passed in ndarray.

val draw_samples : Algodiff.t -> Algodiff.t -> int -> Algodiff.t * Algodiff.t

draw_samples x y draws samples from both x (observations) and y (labels). The samples will be drew along axis 0, so x and y must agree along axis 0.

val get_chunk : - Algodiff.t -> - Algodiff.t -> - int -> - int -> - Algodiff.t * Algodiff.t

get_chunk x y i c gets a continuous chunk of c samples from position i from x (observations) and y (labels).

\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron/Make/argument-1-Optimise/index.html b/owl-base/Owl_neural_neuron/Make/argument-1-Optimise/index.html deleted file mode 100644 index 363bc8125..000000000 --- a/owl-base/Owl_neural_neuron/Make/argument-1-Optimise/index.html +++ /dev/null @@ -1,31 +0,0 @@ - -Optimise (owl-base.Owl_neural_neuron.Make.Optimise)

Parameter Make.Optimise

module Utils : sig ... end

Utils module

module Learning_Rate : sig ... end

Strategies for learning rate update

module Batch : sig ... end

Batch module

module Loss : sig ... end

Loss module

module Gradient : sig ... end

Gradient module

module Momentum : sig ... end

Momentum module

module Regularisation : sig ... end

Regularisation module

module Clipping : sig ... end

Clipping module

module Stopping : sig ... end

Stopping module

module Checkpoint : sig ... end

Checkpoint module

module Params : sig ... end

Params module

Core functions
val minimise_weight : - ?state:Checkpoint.state -> - Params.typ -> - (Algodiff.t -> Algodiff.t -> Algodiff.t) -> - Algodiff.t -> - Algodiff.t -> - Algodiff.t -> - Checkpoint.state * Algodiff.t

This function minimises the weight w of passed-in function f.

* f is a function f : w -> x -> y. * w is a row vector but y can have any shape.

val minimise_network : - ?state:Checkpoint.state -> - Params.typ -> - (Algodiff.t -> Algodiff.t * Algodiff.t array array) -> - (Algodiff.t -> Algodiff.t array array * Algodiff.t array array) -> - (Algodiff.t array array -> unit) -> - (string -> unit) -> - Algodiff.t -> - Algodiff.t -> - Checkpoint.state

This function is specifically designed for minimising the weights in a neural network of graph structure. In Owl's earlier versions, the functions in the regression module were actually implemented using this function.

val minimise_fun : - ?state:Checkpoint.state -> - Params.typ -> - (Algodiff.t -> Algodiff.t) -> - Algodiff.t -> - Checkpoint.state * Algodiff.t

This function minimises f : x -> y w.r.t x.

x is an ndarray; and y is an scalar value.

val minimise_compiled_network : - ?state:Checkpoint.state -> - Params.typ -> - (Algodiff.t -> Algodiff.t -> Algodiff.t) -> - (unit -> unit) -> - (string -> unit) -> - Algodiff.t -> - Algodiff.t -> - Checkpoint.state

TODO

\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron/Make/index.html b/owl-base/Owl_neural_neuron/Make/index.html deleted file mode 100644 index 129846b59..000000000 --- a/owl-base/Owl_neural_neuron/Make/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Make (owl-base.Owl_neural_neuron.Make)

Module Owl_neural_neuron.Make

Parameters

Signature

module Optimise = Optimise
module Init : sig ... end
module Input : sig ... end
module Activation : sig ... end
module Linear : sig ... end
module LinearNoBias : sig ... end
module Recurrent : sig ... end
module LSTM : sig ... end
module GRU : sig ... end
module Conv1D : sig ... end
module DilatedConv1D : sig ... end
module TransposeConv1D : sig ... end
module Conv2D : sig ... end
module DilatedConv2D : sig ... end
module TransposeConv2D : sig ... end
module Conv3D : sig ... end
module DilatedConv3D : sig ... end
module TransposeConv3D : sig ... end
module FullyConnected : sig ... end
module MaxPool1D : sig ... end
module MaxPool2D : sig ... end
module AvgPool1D : sig ... end
module AvgPool2D : sig ... end
module GlobalMaxPool1D : sig ... end
module GlobalMaxPool2D : sig ... end
module GlobalAvgPool1D : sig ... end
module GlobalAvgPool2D : sig ... end
module UpSampling1D : sig ... end
module UpSampling2D : sig ... end
module UpSampling3D : sig ... end
module Padding1D : sig ... end
module Padding2D : sig ... end
module Padding3D : sig ... end
module Lambda : sig ... end
module LambdaArray : sig ... end
module Dropout : sig ... end
module Reshape : sig ... end
module Flatten : sig ... end
module Slice : sig ... end
module Add : sig ... end
module Mul : sig ... end
module Dot : sig ... end
module Max : sig ... end
module Average : sig ... end
module Concatenate : sig ... end
module Normalisation : sig ... end
module GaussianNoise : sig ... end
module GaussianDropout : sig ... end
module AlphaDropout : sig ... end
module Embedding : sig ... end
module Masking : sig ... end
type neuron =
  1. | Input of Input.neuron_typ
  2. | Linear of Linear.neuron_typ
  3. | LinearNoBias of LinearNoBias.neuron_typ
  4. | Embedding of Embedding.neuron_typ
  5. | LSTM of LSTM.neuron_typ
  6. | GRU of GRU.neuron_typ
  7. | Recurrent of Recurrent.neuron_typ
  8. | Conv1D of Conv1D.neuron_typ
  9. | Conv2D of Conv2D.neuron_typ
  10. | Conv3D of Conv3D.neuron_typ
  11. | DilatedConv1D of DilatedConv1D.neuron_typ
  12. | DilatedConv2D of DilatedConv2D.neuron_typ
  13. | DilatedConv3D of DilatedConv3D.neuron_typ
  14. | TransposeConv1D of TransposeConv1D.neuron_typ
  15. | TransposeConv2D of TransposeConv2D.neuron_typ
  16. | TransposeConv3D of TransposeConv3D.neuron_typ
  17. | FullyConnected of FullyConnected.neuron_typ
  18. | MaxPool1D of MaxPool1D.neuron_typ
  19. | MaxPool2D of MaxPool2D.neuron_typ
  20. | AvgPool1D of AvgPool1D.neuron_typ
  21. | AvgPool2D of AvgPool2D.neuron_typ
  22. | GlobalMaxPool1D of GlobalMaxPool1D.neuron_typ
  23. | GlobalMaxPool2D of GlobalMaxPool2D.neuron_typ
  24. | GlobalAvgPool1D of GlobalAvgPool1D.neuron_typ
  25. | GlobalAvgPool2D of GlobalAvgPool2D.neuron_typ
  26. | UpSampling2D of UpSampling2D.neuron_typ
  27. | Padding2D of Padding2D.neuron_typ
  28. | Dropout of Dropout.neuron_typ
  29. | Reshape of Reshape.neuron_typ
  30. | Flatten of Flatten.neuron_typ
  31. | Slice of Slice.neuron_typ
  32. | Lambda of Lambda.neuron_typ
  33. | LambdaArray of LambdaArray.neuron_typ
  34. | Activation of Activation.neuron_typ
  35. | GaussianNoise of GaussianNoise.neuron_typ
  36. | GaussianDropout of GaussianDropout.neuron_typ
  37. | AlphaDropout of AlphaDropout.neuron_typ
  38. | Normalisation of Normalisation.neuron_typ
  39. | Add of Add.neuron_typ
  40. | Mul of Mul.neuron_typ
  41. | Dot of Dot.neuron_typ
  42. | Max of Max.neuron_typ
  43. | Average of Average.neuron_typ
  44. | Concatenate of Concatenate.neuron_typ
val get_in_out_shape : neuron -> int array * int array
val get_in_shape : neuron -> int array
val get_out_shape : neuron -> int array
val connect : int array array -> neuron -> unit
val init : neuron -> unit
val reset : neuron -> unit
val mktag : int -> neuron -> unit
val mkpar : neuron -> Optimise.Algodiff.t array
val mkpri : neuron -> Optimise.Algodiff.t array
val mkadj : neuron -> Optimise.Algodiff.t array
val update : neuron -> Optimise.Algodiff.t array -> unit
val save_weights : neuron -> Optimise.Algodiff.t array
val load_weights : neuron -> Optimise.Algodiff.t array -> unit
val copy : neuron -> neuron
val to_string : neuron -> string
val to_name : neuron -> string
\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron_sig/.dummy b/owl-base/Owl_neural_neuron_sig/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl-base/Owl_neural_neuron_sig/module-type-Sig/Activation/index.html b/owl-base/Owl_neural_neuron_sig/module-type-Sig/Activation/index.html deleted file mode 100644 index 9b5739a55..000000000 --- a/owl-base/Owl_neural_neuron_sig/module-type-Sig/Activation/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Activation (owl-base.Owl_neural_neuron_sig.Sig.Activation)

Module Sig.Activation

type typ =
  1. | Elu
  2. | Relu
  3. | Sigmoid
  4. | HardSigmoid
  5. | Softmax of int
  6. | Softplus
  7. | Softsign
  8. | Tanh
  9. | Relu6
  10. | LeakyRelu of float
  11. | TRelu of float
  12. | Custom of Optimise.Algodiff.t -> Optimise.Algodiff.t
  13. | None
    (*

    Types of activation functions.

    *)
type neuron_typ = {
  1. mutable activation : typ;
  2. mutable in_shape : int array;
  3. mutable out_shape : int array;
}

Neuron type definition.

val create : typ -> neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val run_activation : Optimise.Algodiff.t -> typ -> Optimise.Algodiff.t

Run one specific activation function.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val activation_to_string : typ -> string

Return the name of a specific activation function.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron_sig/module-type-Sig/Add/index.html b/owl-base/Owl_neural_neuron_sig/module-type-Sig/Add/index.html deleted file mode 100644 index 47456641e..000000000 --- a/owl-base/Owl_neural_neuron_sig/module-type-Sig/Add/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Add (owl-base.Owl_neural_neuron_sig.Sig.Add)

Module Sig.Add

type neuron_typ = {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
}

Neuron type definition.

val create : unit -> neuron_typ

Create the neuron.

val connect : int array array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val copy : 'a -> neuron_typ

Make a deep copy of the neuron and its parameters.

val run : Optimise.Algodiff.t array -> 'a -> Optimise.Algodiff.t

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron_sig/module-type-Sig/AlphaDropout/index.html b/owl-base/Owl_neural_neuron_sig/module-type-Sig/AlphaDropout/index.html deleted file mode 100644 index e8ee8abf1..000000000 --- a/owl-base/Owl_neural_neuron_sig/module-type-Sig/AlphaDropout/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -AlphaDropout (owl-base.Owl_neural_neuron_sig.Sig.AlphaDropout)

Module Sig.AlphaDropout

type neuron_typ = {
  1. mutable rate : float;
  2. mutable in_shape : int array;
  3. mutable out_shape : int array;
}

Neuron type definition.

val create : float -> neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron_sig/module-type-Sig/Average/index.html b/owl-base/Owl_neural_neuron_sig/module-type-Sig/Average/index.html deleted file mode 100644 index 66f53cda9..000000000 --- a/owl-base/Owl_neural_neuron_sig/module-type-Sig/Average/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Average (owl-base.Owl_neural_neuron_sig.Sig.Average)

Module Sig.Average

type neuron_typ = {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
}

Neuron type definition.

val create : unit -> neuron_typ

Create the neuron.

val connect : int array array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val copy : 'a -> neuron_typ

Make a deep copy of the neuron and its parameters.

val run : Optimise.Algodiff.t array -> 'a -> Optimise.Algodiff.t

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron_sig/module-type-Sig/AvgPool1D/index.html b/owl-base/Owl_neural_neuron_sig/module-type-Sig/AvgPool1D/index.html deleted file mode 100644 index 1641bb319..000000000 --- a/owl-base/Owl_neural_neuron_sig/module-type-Sig/AvgPool1D/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -AvgPool1D (owl-base.Owl_neural_neuron_sig.Sig.AvgPool1D)

Module Sig.AvgPool1D

type neuron_typ = {
  1. mutable padding : Owl_types.padding;
  2. mutable kernel : int array;
  3. mutable stride : int array;
  4. mutable in_shape : int array;
  5. mutable out_shape : int array;
}

Neuron type definition.

val create : Owl_types.padding -> int array -> int array -> neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron_sig/module-type-Sig/AvgPool2D/index.html b/owl-base/Owl_neural_neuron_sig/module-type-Sig/AvgPool2D/index.html deleted file mode 100644 index 80c8e643f..000000000 --- a/owl-base/Owl_neural_neuron_sig/module-type-Sig/AvgPool2D/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -AvgPool2D (owl-base.Owl_neural_neuron_sig.Sig.AvgPool2D)

Module Sig.AvgPool2D

type neuron_typ = {
  1. mutable padding : Owl_types.padding;
  2. mutable kernel : int array;
  3. mutable stride : int array;
  4. mutable in_shape : int array;
  5. mutable out_shape : int array;
}

Neuron type definition.

val create : Owl_types.padding -> int array -> int array -> neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron_sig/module-type-Sig/Concatenate/index.html b/owl-base/Owl_neural_neuron_sig/module-type-Sig/Concatenate/index.html deleted file mode 100644 index a03ed82bb..000000000 --- a/owl-base/Owl_neural_neuron_sig/module-type-Sig/Concatenate/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Concatenate (owl-base.Owl_neural_neuron_sig.Sig.Concatenate)

Module Sig.Concatenate

type neuron_typ = {
  1. mutable axis : int;
  2. mutable in_shape : int array;
  3. mutable out_shape : int array;
}

Neuron type definition.

val create : int -> neuron_typ

Create the neuron.

val connect : int array array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron_sig/module-type-Sig/Conv1D/index.html b/owl-base/Owl_neural_neuron_sig/module-type-Sig/Conv1D/index.html deleted file mode 100644 index 09e8024a4..000000000 --- a/owl-base/Owl_neural_neuron_sig/module-type-Sig/Conv1D/index.html +++ /dev/null @@ -1,8 +0,0 @@ - -Conv1D (owl-base.Owl_neural_neuron_sig.Sig.Conv1D)

Module Sig.Conv1D

type neuron_typ = {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable b : Optimise.Algodiff.t;
  3. mutable kernel : int array;
  4. mutable stride : int array;
  5. mutable padding : Owl_types.padding;
  6. mutable init_typ : Init.typ;
  7. mutable in_shape : int array;
  8. mutable out_shape : int array;
}

Neuron type definition.

val create : - ?inputs:int array -> - Owl_types.padding -> - int array -> - int array -> - Init.typ -> - neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val init : neuron_typ -> unit

Initialise the neuron and its parameters.

val reset : neuron_typ -> unit

Reset the parameters in a neuron.

val mktag : int -> neuron_typ -> unit

Tag the neuron, used by Algodiff module.

val mkpar : neuron_typ -> Optimise.Algodiff.t array

Assemble all the parameters in an array, used by Optimise module.

val mkpri : neuron_typ -> Optimise.Algodiff.t array

Assemble all the primial values in an array, used by Optimise module.

val mkadj : neuron_typ -> Optimise.Algodiff.t array

Assemble all the adjacent values in an array, used by Optimise module.

val update : neuron_typ -> Optimise.Algodiff.t array -> unit

Update parameters in a neuron, used by Optimise module.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron_sig/module-type-Sig/Conv2D/index.html b/owl-base/Owl_neural_neuron_sig/module-type-Sig/Conv2D/index.html deleted file mode 100644 index 624d78e7e..000000000 --- a/owl-base/Owl_neural_neuron_sig/module-type-Sig/Conv2D/index.html +++ /dev/null @@ -1,8 +0,0 @@ - -Conv2D (owl-base.Owl_neural_neuron_sig.Sig.Conv2D)

Module Sig.Conv2D

type neuron_typ = {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable b : Optimise.Algodiff.t;
  3. mutable kernel : int array;
  4. mutable stride : int array;
  5. mutable padding : Owl_types.padding;
  6. mutable init_typ : Init.typ;
  7. mutable in_shape : int array;
  8. mutable out_shape : int array;
}

Neuron type definition.

val create : - ?inputs:int array -> - Owl_types.padding -> - int array -> - int array -> - Init.typ -> - neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val init : neuron_typ -> unit

Initialise the neuron and its parameters.

val reset : neuron_typ -> unit

Reset the parameters in a neuron.

val mktag : int -> neuron_typ -> unit

Tag the neuron, used by Algodiff module.

val mkpar : neuron_typ -> Optimise.Algodiff.t array

Assemble all the parameters in an array, used by Optimise module.

val mkpri : neuron_typ -> Optimise.Algodiff.t array

Assemble all the primial values in an array, used by Optimise module.

val mkadj : neuron_typ -> Optimise.Algodiff.t array

Assemble all the adjacent values in an array, used by Optimise module.

val update : neuron_typ -> Optimise.Algodiff.t array -> unit

Update parameters in a neuron, used by Optimise module.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron_sig/module-type-Sig/Conv3D/index.html b/owl-base/Owl_neural_neuron_sig/module-type-Sig/Conv3D/index.html deleted file mode 100644 index 5c0a118de..000000000 --- a/owl-base/Owl_neural_neuron_sig/module-type-Sig/Conv3D/index.html +++ /dev/null @@ -1,8 +0,0 @@ - -Conv3D (owl-base.Owl_neural_neuron_sig.Sig.Conv3D)

Module Sig.Conv3D

type neuron_typ = {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable b : Optimise.Algodiff.t;
  3. mutable kernel : int array;
  4. mutable stride : int array;
  5. mutable padding : Owl_types.padding;
  6. mutable init_typ : Init.typ;
  7. mutable in_shape : int array;
  8. mutable out_shape : int array;
}

Neuron type definition.

val create : - ?inputs:int array -> - Owl_types.padding -> - int array -> - int array -> - Init.typ -> - neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val init : neuron_typ -> unit

Initialise the neuron and its parameters.

val reset : neuron_typ -> unit

Reset the parameters in a neuron.

val mktag : int -> neuron_typ -> unit

Tag the neuron, used by Algodiff module.

val mkpar : neuron_typ -> Optimise.Algodiff.t array

Assemble all the parameters in an array, used by Optimise module.

val mkpri : neuron_typ -> Optimise.Algodiff.t array

Assemble all the primial values in an array, used by Optimise module.

val mkadj : neuron_typ -> Optimise.Algodiff.t array

Assemble all the adjacent values in an array, used by Optimise module.

val update : neuron_typ -> Optimise.Algodiff.t array -> unit

Update parameters in a neuron, used by Optimise module.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron_sig/module-type-Sig/DilatedConv1D/index.html b/owl-base/Owl_neural_neuron_sig/module-type-Sig/DilatedConv1D/index.html deleted file mode 100644 index c678d4f9b..000000000 --- a/owl-base/Owl_neural_neuron_sig/module-type-Sig/DilatedConv1D/index.html +++ /dev/null @@ -1,9 +0,0 @@ - -DilatedConv1D (owl-base.Owl_neural_neuron_sig.Sig.DilatedConv1D)

Module Sig.DilatedConv1D

type neuron_typ = {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable b : Optimise.Algodiff.t;
  3. mutable kernel : int array;
  4. mutable stride : int array;
  5. mutable rate : int array;
  6. mutable padding : Owl_types.padding;
  7. mutable init_typ : Init.typ;
  8. mutable in_shape : int array;
  9. mutable out_shape : int array;
}

Neuron type definition.

val create : - ?inputs:int array -> - Owl_types.padding -> - int array -> - int array -> - int array -> - Init.typ -> - neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val init : neuron_typ -> unit

Initialise the neuron and its parameters.

val reset : neuron_typ -> unit

Reset the parameters in a neuron.

val mktag : int -> neuron_typ -> unit

Tag the neuron, used by Algodiff module.

val mkpar : neuron_typ -> Optimise.Algodiff.t array

Assemble all the parameters in an array, used by Optimise module.

val mkpri : neuron_typ -> Optimise.Algodiff.t array

Assemble all the primial values in an array, used by Optimise module.

val mkadj : neuron_typ -> Optimise.Algodiff.t array

Assemble all the adjacent values in an array, used by Optimise module.

val update : neuron_typ -> Optimise.Algodiff.t array -> unit

Update parameters in a neuron, used by Optimise module.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron_sig/module-type-Sig/DilatedConv2D/index.html b/owl-base/Owl_neural_neuron_sig/module-type-Sig/DilatedConv2D/index.html deleted file mode 100644 index c12211081..000000000 --- a/owl-base/Owl_neural_neuron_sig/module-type-Sig/DilatedConv2D/index.html +++ /dev/null @@ -1,9 +0,0 @@ - -DilatedConv2D (owl-base.Owl_neural_neuron_sig.Sig.DilatedConv2D)

Module Sig.DilatedConv2D

type neuron_typ = {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable b : Optimise.Algodiff.t;
  3. mutable kernel : int array;
  4. mutable stride : int array;
  5. mutable rate : int array;
  6. mutable padding : Owl_types.padding;
  7. mutable init_typ : Init.typ;
  8. mutable in_shape : int array;
  9. mutable out_shape : int array;
}

Neuron type definition.

val create : - ?inputs:int array -> - Owl_types.padding -> - int array -> - int array -> - int array -> - Init.typ -> - neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val init : neuron_typ -> unit

Initialise the neuron and its parameters.

val reset : neuron_typ -> unit

Reset the parameters in a neuron.

val mktag : int -> neuron_typ -> unit

Tag the neuron, used by Algodiff module.

val mkpar : neuron_typ -> Optimise.Algodiff.t array

Assemble all the parameters in an array, used by Optimise module.

val mkpri : neuron_typ -> Optimise.Algodiff.t array

Assemble all the primial values in an array, used by Optimise module.

val mkadj : neuron_typ -> Optimise.Algodiff.t array

Assemble all the adjacent values in an array, used by Optimise module.

val update : neuron_typ -> Optimise.Algodiff.t array -> unit

Update parameters in a neuron, used by Optimise module.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron_sig/module-type-Sig/DilatedConv3D/index.html b/owl-base/Owl_neural_neuron_sig/module-type-Sig/DilatedConv3D/index.html deleted file mode 100644 index d6550c3f7..000000000 --- a/owl-base/Owl_neural_neuron_sig/module-type-Sig/DilatedConv3D/index.html +++ /dev/null @@ -1,9 +0,0 @@ - -DilatedConv3D (owl-base.Owl_neural_neuron_sig.Sig.DilatedConv3D)

Module Sig.DilatedConv3D

type neuron_typ = {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable b : Optimise.Algodiff.t;
  3. mutable kernel : int array;
  4. mutable stride : int array;
  5. mutable rate : int array;
  6. mutable padding : Owl_types.padding;
  7. mutable init_typ : Init.typ;
  8. mutable in_shape : int array;
  9. mutable out_shape : int array;
}

Neuron type definition.

val create : - ?inputs:int array -> - Owl_types.padding -> - int array -> - int array -> - int array -> - Init.typ -> - neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val init : neuron_typ -> unit

Initialise the neuron and its parameters.

val reset : neuron_typ -> unit

Reset the parameters in a neuron.

val mktag : int -> neuron_typ -> unit

Tag the neuron, used by Algodiff module.

val mkpar : neuron_typ -> Optimise.Algodiff.t array

Assemble all the parameters in an array, used by Optimise module.

val mkpri : neuron_typ -> Optimise.Algodiff.t array

Assemble all the primial values in an array, used by Optimise module.

val mkadj : neuron_typ -> Optimise.Algodiff.t array

Assemble all the adjacent values in an array, used by Optimise module.

val update : neuron_typ -> Optimise.Algodiff.t array -> unit

Update parameters in a neuron, used by Optimise module.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron_sig/module-type-Sig/Dot/index.html b/owl-base/Owl_neural_neuron_sig/module-type-Sig/Dot/index.html deleted file mode 100644 index be602cb45..000000000 --- a/owl-base/Owl_neural_neuron_sig/module-type-Sig/Dot/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Dot (owl-base.Owl_neural_neuron_sig.Sig.Dot)

Module Sig.Dot

type neuron_typ = {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
}

Neuron type definition.

val create : unit -> neuron_typ

Create the neuron.

val connect : int array array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val copy : 'a -> neuron_typ

Make a deep copy of the neuron and its parameters.

val run : Optimise.Algodiff.t array -> 'a -> Optimise.Algodiff.t

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron_sig/module-type-Sig/Dropout/index.html b/owl-base/Owl_neural_neuron_sig/module-type-Sig/Dropout/index.html deleted file mode 100644 index 0cf15e567..000000000 --- a/owl-base/Owl_neural_neuron_sig/module-type-Sig/Dropout/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Dropout (owl-base.Owl_neural_neuron_sig.Sig.Dropout)

Module Sig.Dropout

type neuron_typ = {
  1. mutable rate : float;
  2. mutable in_shape : int array;
  3. mutable out_shape : int array;
}

Neuron type definition.

val create : float -> neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron_sig/module-type-Sig/Embedding/index.html b/owl-base/Owl_neural_neuron_sig/module-type-Sig/Embedding/index.html deleted file mode 100644 index 0f916d282..000000000 --- a/owl-base/Owl_neural_neuron_sig/module-type-Sig/Embedding/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Embedding (owl-base.Owl_neural_neuron_sig.Sig.Embedding)

Module Sig.Embedding

type neuron_typ = {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable init_typ : Init.typ;
  3. mutable in_dim : int;
  4. mutable in_shape : int array;
  5. mutable out_shape : int array;
}

Neuron type definition.

val create : ?inputs:int -> int -> int -> Init.typ -> neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val init : neuron_typ -> unit

Initialise the neuron and its parameters.

val reset : neuron_typ -> unit

Reset the parameters in a neuron.

val mktag : int -> neuron_typ -> unit

Tag the neuron, used by Algodiff module.

val mkpar : neuron_typ -> Optimise.Algodiff.t array

Assemble all the parameters in an array, used by Optimise module.

val mkpri : neuron_typ -> Optimise.Algodiff.t array

Assemble all the primial values in an array, used by Optimise module.

val mkadj : neuron_typ -> Optimise.Algodiff.t array

Assemble all the adjacent values in an array, used by Optimise module.

val update : neuron_typ -> Optimise.Algodiff.t array -> unit

Update parameters in a neuron, used by Optimise module.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron_sig/module-type-Sig/Flatten/index.html b/owl-base/Owl_neural_neuron_sig/module-type-Sig/Flatten/index.html deleted file mode 100644 index 6706428f2..000000000 --- a/owl-base/Owl_neural_neuron_sig/module-type-Sig/Flatten/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Flatten (owl-base.Owl_neural_neuron_sig.Sig.Flatten)

Module Sig.Flatten

type neuron_typ = {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
}

Neuron type definition.

val create : unit -> neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val copy : 'a -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron_sig/module-type-Sig/FullyConnected/index.html b/owl-base/Owl_neural_neuron_sig/module-type-Sig/FullyConnected/index.html deleted file mode 100644 index 4539afeaf..000000000 --- a/owl-base/Owl_neural_neuron_sig/module-type-Sig/FullyConnected/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -FullyConnected (owl-base.Owl_neural_neuron_sig.Sig.FullyConnected)

Module Sig.FullyConnected

type neuron_typ = {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable b : Optimise.Algodiff.t;
  3. mutable init_typ : Init.typ;
  4. mutable in_shape : int array;
  5. mutable out_shape : int array;
}

Neuron type definition.

val create : ?inputs:int -> int -> Init.typ -> neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val init : neuron_typ -> unit

Initialise the neuron and its parameters.

val reset : neuron_typ -> unit

Reset the parameters in a neuron.

val mktag : int -> neuron_typ -> unit

Tag the neuron, used by Algodiff module.

val mkpar : neuron_typ -> Optimise.Algodiff.t array

Assemble all the parameters in an array, used by Optimise module.

val mkpri : neuron_typ -> Optimise.Algodiff.t array

Assemble all the primial values in an array, used by Optimise module.

val mkadj : neuron_typ -> Optimise.Algodiff.t array

Assemble all the adjacent values in an array, used by Optimise module.

val update : neuron_typ -> Optimise.Algodiff.t array -> unit

Update parameters in a neuron, used by Optimise module.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron_sig/module-type-Sig/GRU/index.html b/owl-base/Owl_neural_neuron_sig/module-type-Sig/GRU/index.html deleted file mode 100644 index 86557d851..000000000 --- a/owl-base/Owl_neural_neuron_sig/module-type-Sig/GRU/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -GRU (owl-base.Owl_neural_neuron_sig.Sig.GRU)

Module Sig.GRU

type neuron_typ = {
  1. mutable wxz : Optimise.Algodiff.t;
  2. mutable whz : Optimise.Algodiff.t;
  3. mutable wxr : Optimise.Algodiff.t;
  4. mutable whr : Optimise.Algodiff.t;
  5. mutable wxh : Optimise.Algodiff.t;
  6. mutable whh : Optimise.Algodiff.t;
  7. mutable bz : Optimise.Algodiff.t;
  8. mutable br : Optimise.Algodiff.t;
  9. mutable bh : Optimise.Algodiff.t;
  10. mutable h : Optimise.Algodiff.t;
  11. mutable init_typ : Init.typ;
  12. mutable in_shape : int array;
  13. mutable out_shape : int array;
}

Neuron type definition.

val create : ?time_steps:int -> ?inputs:int -> int -> Init.typ -> neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val init : neuron_typ -> unit

Initialise the neuron and its parameters.

val reset : neuron_typ -> unit

Reset the parameters in a neuron.

val mktag : int -> neuron_typ -> unit

Tag the neuron, used by Algodiff module.

val mkpar : neuron_typ -> Optimise.Algodiff.t array

Assemble all the parameters in an array, used by Optimise module.

val mkpri : neuron_typ -> Optimise.Algodiff.t array

Assemble all the primial values in an array, used by Optimise module.

val mkadj : neuron_typ -> Optimise.Algodiff.t array

Assemble all the adjacent values in an array, used by Optimise module.

val update : neuron_typ -> Optimise.Algodiff.t array -> unit

Update parameters in a neuron, used by Optimise module.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron_sig/module-type-Sig/GaussianDropout/index.html b/owl-base/Owl_neural_neuron_sig/module-type-Sig/GaussianDropout/index.html deleted file mode 100644 index c436e41a0..000000000 --- a/owl-base/Owl_neural_neuron_sig/module-type-Sig/GaussianDropout/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -GaussianDropout (owl-base.Owl_neural_neuron_sig.Sig.GaussianDropout)

Module Sig.GaussianDropout

type neuron_typ = {
  1. mutable rate : float;
  2. mutable in_shape : int array;
  3. mutable out_shape : int array;
}

Neuron type definition.

val create : float -> neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron_sig/module-type-Sig/GaussianNoise/index.html b/owl-base/Owl_neural_neuron_sig/module-type-Sig/GaussianNoise/index.html deleted file mode 100644 index 8875c19c8..000000000 --- a/owl-base/Owl_neural_neuron_sig/module-type-Sig/GaussianNoise/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -GaussianNoise (owl-base.Owl_neural_neuron_sig.Sig.GaussianNoise)

Module Sig.GaussianNoise

type neuron_typ = {
  1. mutable sigma : float;
  2. mutable in_shape : int array;
  3. mutable out_shape : int array;
}

Neuron type definition.

val create : float -> neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron_sig/module-type-Sig/GlobalAvgPool1D/index.html b/owl-base/Owl_neural_neuron_sig/module-type-Sig/GlobalAvgPool1D/index.html deleted file mode 100644 index 74dec4923..000000000 --- a/owl-base/Owl_neural_neuron_sig/module-type-Sig/GlobalAvgPool1D/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -GlobalAvgPool1D (owl-base.Owl_neural_neuron_sig.Sig.GlobalAvgPool1D)

Module Sig.GlobalAvgPool1D

type neuron_typ = {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
}

Neuron type definition.

val create : unit -> neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val copy : 'a -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron_sig/module-type-Sig/GlobalAvgPool2D/index.html b/owl-base/Owl_neural_neuron_sig/module-type-Sig/GlobalAvgPool2D/index.html deleted file mode 100644 index 31d04d1d3..000000000 --- a/owl-base/Owl_neural_neuron_sig/module-type-Sig/GlobalAvgPool2D/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -GlobalAvgPool2D (owl-base.Owl_neural_neuron_sig.Sig.GlobalAvgPool2D)

Module Sig.GlobalAvgPool2D

type neuron_typ = {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
}

Neuron type definition.

val create : unit -> neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val copy : 'a -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron_sig/module-type-Sig/GlobalMaxPool1D/index.html b/owl-base/Owl_neural_neuron_sig/module-type-Sig/GlobalMaxPool1D/index.html deleted file mode 100644 index 573779ed7..000000000 --- a/owl-base/Owl_neural_neuron_sig/module-type-Sig/GlobalMaxPool1D/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -GlobalMaxPool1D (owl-base.Owl_neural_neuron_sig.Sig.GlobalMaxPool1D)

Module Sig.GlobalMaxPool1D

type neuron_typ = {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
}

Neuron type definition.

val create : unit -> neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val copy : 'a -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron_sig/module-type-Sig/GlobalMaxPool2D/index.html b/owl-base/Owl_neural_neuron_sig/module-type-Sig/GlobalMaxPool2D/index.html deleted file mode 100644 index 71f0fb0c2..000000000 --- a/owl-base/Owl_neural_neuron_sig/module-type-Sig/GlobalMaxPool2D/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -GlobalMaxPool2D (owl-base.Owl_neural_neuron_sig.Sig.GlobalMaxPool2D)

Module Sig.GlobalMaxPool2D

type neuron_typ = {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
}

Neuron type definition.

val create : unit -> neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val copy : 'a -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron_sig/module-type-Sig/Init/index.html b/owl-base/Owl_neural_neuron_sig/module-type-Sig/Init/index.html deleted file mode 100644 index 978071316..000000000 --- a/owl-base/Owl_neural_neuron_sig/module-type-Sig/Init/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Init (owl-base.Owl_neural_neuron_sig.Sig.Init)

Module Sig.Init

type typ =
  1. | Uniform of float * float
  2. | Gaussian of float * float
  3. | Standard
  4. | Tanh
  5. | GlorotNormal
  6. | GlorotUniform
  7. | LecunNormal
  8. | HeNormal
  9. | Custom of int array -> Optimise.Algodiff.t
    (*

    Initialisation types

    *)
val calc_fans : int array -> float * float

Calculate fan-in and fan-out of weights.

val run : typ -> int array -> Optimise.Algodiff.t -> Optimise.Algodiff.t

Execute the computation in this neuron.

val to_string : typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron_sig/module-type-Sig/Input/index.html b/owl-base/Owl_neural_neuron_sig/module-type-Sig/Input/index.html deleted file mode 100644 index ce70159c5..000000000 --- a/owl-base/Owl_neural_neuron_sig/module-type-Sig/Input/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Input (owl-base.Owl_neural_neuron_sig.Sig.Input)

Module Sig.Input

type neuron_typ = {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
}

Neuron type definition.

val create : int array -> neuron_typ

Create the neuron.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron_sig/module-type-Sig/LSTM/index.html b/owl-base/Owl_neural_neuron_sig/module-type-Sig/LSTM/index.html deleted file mode 100644 index 15de7cb5a..000000000 --- a/owl-base/Owl_neural_neuron_sig/module-type-Sig/LSTM/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -LSTM (owl-base.Owl_neural_neuron_sig.Sig.LSTM)

Module Sig.LSTM

type neuron_typ = {
  1. mutable wxi : Optimise.Algodiff.t;
  2. mutable whi : Optimise.Algodiff.t;
  3. mutable wxc : Optimise.Algodiff.t;
  4. mutable whc : Optimise.Algodiff.t;
  5. mutable wxf : Optimise.Algodiff.t;
  6. mutable whf : Optimise.Algodiff.t;
  7. mutable wxo : Optimise.Algodiff.t;
  8. mutable who : Optimise.Algodiff.t;
  9. mutable bi : Optimise.Algodiff.t;
  10. mutable bc : Optimise.Algodiff.t;
  11. mutable bf : Optimise.Algodiff.t;
  12. mutable bo : Optimise.Algodiff.t;
  13. mutable c : Optimise.Algodiff.t;
  14. mutable h : Optimise.Algodiff.t;
  15. mutable init_typ : Init.typ;
  16. mutable in_shape : int array;
  17. mutable out_shape : int array;
}

Neuron type definition.

val create : ?time_steps:int -> ?inputs:int -> int -> Init.typ -> neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val init : neuron_typ -> unit

Initialise the neuron and its parameters.

val reset : neuron_typ -> unit

Reset the parameters in a neuron.

val mktag : int -> neuron_typ -> unit

Tag the neuron, used by Algodiff module.

val mkpar : neuron_typ -> Optimise.Algodiff.t array

Assemble all the parameters in an array, used by Optimise module.

val mkpri : neuron_typ -> Optimise.Algodiff.t array

Assemble all the primial values in an array, used by Optimise module.

val mkadj : neuron_typ -> Optimise.Algodiff.t array

Assemble all the adjacent values in an array, used by Optimise module.

val update : neuron_typ -> Optimise.Algodiff.t array -> unit

Update parameters in a neuron, used by Optimise module.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron_sig/module-type-Sig/Lambda/index.html b/owl-base/Owl_neural_neuron_sig/module-type-Sig/Lambda/index.html deleted file mode 100644 index 4e0037ffc..000000000 --- a/owl-base/Owl_neural_neuron_sig/module-type-Sig/Lambda/index.html +++ /dev/null @@ -1,5 +0,0 @@ - -Lambda (owl-base.Owl_neural_neuron_sig.Sig.Lambda)

Module Sig.Lambda

type neuron_typ = {
  1. mutable lambda : Optimise.Algodiff.t -> Optimise.Algodiff.t;
  2. mutable in_shape : int array;
  3. mutable out_shape : int array;
}

Neuron type definition.

val create : - ?out_shape:int array -> - (Optimise.Algodiff.t -> Optimise.Algodiff.t) -> - neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron_sig/module-type-Sig/LambdaArray/index.html b/owl-base/Owl_neural_neuron_sig/module-type-Sig/LambdaArray/index.html deleted file mode 100644 index 083ba4500..000000000 --- a/owl-base/Owl_neural_neuron_sig/module-type-Sig/LambdaArray/index.html +++ /dev/null @@ -1,5 +0,0 @@ - -LambdaArray (owl-base.Owl_neural_neuron_sig.Sig.LambdaArray)

Module Sig.LambdaArray

type neuron_typ = {
  1. mutable lambda : Optimise.Algodiff.t array -> Optimise.Algodiff.t;
  2. mutable in_shape : int array;
  3. mutable out_shape : int array;
}

Neuron type definition.

val create : - int array -> - (Optimise.Algodiff.t array -> Optimise.Algodiff.t) -> - neuron_typ

Create the neuron.

val connect : int array array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron_sig/module-type-Sig/Linear/index.html b/owl-base/Owl_neural_neuron_sig/module-type-Sig/Linear/index.html deleted file mode 100644 index e70bfc557..000000000 --- a/owl-base/Owl_neural_neuron_sig/module-type-Sig/Linear/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Linear (owl-base.Owl_neural_neuron_sig.Sig.Linear)

Module Sig.Linear

type neuron_typ = {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable b : Optimise.Algodiff.t;
  3. mutable init_typ : Init.typ;
  4. mutable in_shape : int array;
  5. mutable out_shape : int array;
}

Neuron type definition.

val create : ?inputs:int -> int -> Init.typ -> neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val init : neuron_typ -> unit

Initialise the neuron and its parameters.

val reset : neuron_typ -> unit

Reset the parameters in a neuron.

val mktag : int -> neuron_typ -> unit

Tag the neuron, used by Algodiff module.

val mkpar : neuron_typ -> Optimise.Algodiff.t array

Assemble all the parameters in an array, used by Optimise module.

val mkpri : neuron_typ -> Optimise.Algodiff.t array

Assemble all the primial values in an array, used by Optimise module.

val mkadj : neuron_typ -> Optimise.Algodiff.t array

Assemble all the adjacent values in an array, used by Optimise module.

val update : neuron_typ -> Optimise.Algodiff.t array -> unit

Update parameters in a neuron, used by Optimise module.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron_sig/module-type-Sig/LinearNoBias/index.html b/owl-base/Owl_neural_neuron_sig/module-type-Sig/LinearNoBias/index.html deleted file mode 100644 index 72e5b7efc..000000000 --- a/owl-base/Owl_neural_neuron_sig/module-type-Sig/LinearNoBias/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -LinearNoBias (owl-base.Owl_neural_neuron_sig.Sig.LinearNoBias)

Module Sig.LinearNoBias

type neuron_typ = {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable init_typ : Init.typ;
  3. mutable in_shape : int array;
  4. mutable out_shape : int array;
}

Neuron type definition.

val create : ?inputs:int -> int -> Init.typ -> neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val init : neuron_typ -> unit

Initialise the neuron and its parameters.

val reset : neuron_typ -> unit

Reset the parameters in a neuron.

val mktag : int -> neuron_typ -> unit

Tag the neuron, used by Algodiff module.

val mkpar : neuron_typ -> Optimise.Algodiff.t array

Assemble all the parameters in an array, used by Optimise module.

val mkpri : neuron_typ -> Optimise.Algodiff.t array

Assemble all the primial values in an array, used by Optimise module.

val mkadj : neuron_typ -> Optimise.Algodiff.t array

Assemble all the adjacent values in an array, used by Optimise module.

val update : neuron_typ -> Optimise.Algodiff.t array -> unit

Update parameters in a neuron, used by Optimise module.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron_sig/module-type-Sig/Masking/index.html b/owl-base/Owl_neural_neuron_sig/module-type-Sig/Masking/index.html deleted file mode 100644 index 3febda193..000000000 --- a/owl-base/Owl_neural_neuron_sig/module-type-Sig/Masking/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Masking (owl-base.Owl_neural_neuron_sig.Sig.Masking)

Module Sig.Masking

\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron_sig/module-type-Sig/Max/index.html b/owl-base/Owl_neural_neuron_sig/module-type-Sig/Max/index.html deleted file mode 100644 index 1f4521583..000000000 --- a/owl-base/Owl_neural_neuron_sig/module-type-Sig/Max/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Max (owl-base.Owl_neural_neuron_sig.Sig.Max)

Module Sig.Max

type neuron_typ = {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
}

Neuron type definition.

val create : unit -> neuron_typ

Create the neuron.

val connect : int array array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val copy : 'a -> neuron_typ

Make a deep copy of the neuron and its parameters.

val run : Optimise.Algodiff.t array -> 'a -> Optimise.Algodiff.t

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron_sig/module-type-Sig/MaxPool1D/index.html b/owl-base/Owl_neural_neuron_sig/module-type-Sig/MaxPool1D/index.html deleted file mode 100644 index f92c0c3c3..000000000 --- a/owl-base/Owl_neural_neuron_sig/module-type-Sig/MaxPool1D/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -MaxPool1D (owl-base.Owl_neural_neuron_sig.Sig.MaxPool1D)

Module Sig.MaxPool1D

type neuron_typ = {
  1. mutable padding : Owl_types.padding;
  2. mutable kernel : int array;
  3. mutable stride : int array;
  4. mutable in_shape : int array;
  5. mutable out_shape : int array;
}

Neuron type definition.

val create : Owl_types.padding -> int array -> int array -> neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron_sig/module-type-Sig/MaxPool2D/index.html b/owl-base/Owl_neural_neuron_sig/module-type-Sig/MaxPool2D/index.html deleted file mode 100644 index ff6c40a19..000000000 --- a/owl-base/Owl_neural_neuron_sig/module-type-Sig/MaxPool2D/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -MaxPool2D (owl-base.Owl_neural_neuron_sig.Sig.MaxPool2D)

Module Sig.MaxPool2D

type neuron_typ = {
  1. mutable padding : Owl_types.padding;
  2. mutable kernel : int array;
  3. mutable stride : int array;
  4. mutable in_shape : int array;
  5. mutable out_shape : int array;
}

Neuron type definition.

val create : Owl_types.padding -> int array -> int array -> neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron_sig/module-type-Sig/Mul/index.html b/owl-base/Owl_neural_neuron_sig/module-type-Sig/Mul/index.html deleted file mode 100644 index cb2ce68d4..000000000 --- a/owl-base/Owl_neural_neuron_sig/module-type-Sig/Mul/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mul (owl-base.Owl_neural_neuron_sig.Sig.Mul)

Module Sig.Mul

type neuron_typ = {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
}

Neuron type definition.

val create : unit -> neuron_typ

Create the neuron.

val connect : int array array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val copy : 'a -> neuron_typ

Make a deep copy of the neuron and its parameters.

val run : Optimise.Algodiff.t array -> 'a -> Optimise.Algodiff.t

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron_sig/module-type-Sig/Normalisation/index.html b/owl-base/Owl_neural_neuron_sig/module-type-Sig/Normalisation/index.html deleted file mode 100644 index 0743d3cb3..000000000 --- a/owl-base/Owl_neural_neuron_sig/module-type-Sig/Normalisation/index.html +++ /dev/null @@ -1,8 +0,0 @@ - -Normalisation (owl-base.Owl_neural_neuron_sig.Sig.Normalisation)

Module Sig.Normalisation

type neuron_typ = {
  1. mutable axis : int;
  2. mutable beta : Optimise.Algodiff.t;
  3. mutable gamma : Optimise.Algodiff.t;
  4. mutable mu : Optimise.Algodiff.t;
  5. mutable var : Optimise.Algodiff.t;
  6. mutable decay : Optimise.Algodiff.t;
  7. mutable training : bool;
  8. mutable in_shape : int array;
  9. mutable out_shape : int array;
}

Neuron type definition.

val create : - ?training:bool -> - ?decay:float -> - ?mu:Optimise.Algodiff.A.arr -> - ?var:Optimise.Algodiff.A.arr -> - int -> - neuron_typ

Create the neuron. Note that axis 0 is the batch axis.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val init : neuron_typ -> unit

Initialise the neuron and its parameters.

val reset : neuron_typ -> unit

Reset the parameters in a neuron.

val mktag : int -> neuron_typ -> unit

Tag the neuron, used by Algodiff module.

val mkpar : neuron_typ -> Optimise.Algodiff.t array

Assemble all the trainable parameters in an array, used by Optimise module.

val mkpri : neuron_typ -> Optimise.Algodiff.t array

Assemble all the primial values in an array, used by Optimise module.

val mkadj : neuron_typ -> Optimise.Algodiff.t array

Assemble all the adjacent values in an array, used by Optimise module.

val update : neuron_typ -> Optimise.Algodiff.t array -> unit

Update trainable parameters of the neuron, used by Optimise module.

val load_weights : neuron_typ -> Optimise.Algodiff.t array -> unit

Load both trainable and non-trainable parameters into the neuron.

val save_weights : neuron_typ -> Optimise.Algodiff.t array

Assemble both trainable and non-trainable parameters of the neuron.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron_sig/module-type-Sig/Optimise/Algodiff/A/Linalg/index.html b/owl-base/Owl_neural_neuron_sig/module-type-Sig/Optimise/Algodiff/A/Linalg/index.html deleted file mode 100644 index d8f597310..000000000 --- a/owl-base/Owl_neural_neuron_sig/module-type-Sig/Optimise/Algodiff/A/Linalg/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -Linalg (owl-base.Owl_neural_neuron_sig.Sig.Optimise.Algodiff.A.Linalg)

Module A.Linalg

val inv : arr -> arr
val logdet : arr -> elt
val chol : ?upper:bool -> arr -> arr
val svd : ?thin:bool -> arr -> arr * arr * arr
val qr : arr -> arr * arr
val lq : arr -> arr * arr
val sylvester : arr -> arr -> arr -> arr
val lyapunov : arr -> arr -> arr
val discrete_lyapunov : - ?solver:[ `default | `bilinear | `direct ] -> - arr -> - arr -> - arr
val linsolve : ?trans:bool -> ?typ:[ `n | `u | `l ] -> arr -> arr -> arr
val care : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
val dare : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron_sig/module-type-Sig/Optimise/Algodiff/A/Mat/index.html b/owl-base/Owl_neural_neuron_sig/module-type-Sig/Optimise/Algodiff/A/Mat/index.html deleted file mode 100644 index c617567e5..000000000 --- a/owl-base/Owl_neural_neuron_sig/module-type-Sig/Optimise/Algodiff/A/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl-base.Owl_neural_neuron_sig.Sig.Optimise.Algodiff.A.Mat)

Module A.Mat

val diagm : ?k:int -> arr -> arr
val triu : ?k:int -> arr -> arr
val tril : ?k:int -> arr -> arr
val eye : int -> arr
\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron_sig/module-type-Sig/Optimise/Algodiff/A/Scalar/index.html b/owl-base/Owl_neural_neuron_sig/module-type-Sig/Optimise/Algodiff/A/Scalar/index.html deleted file mode 100644 index 63738c02e..000000000 --- a/owl-base/Owl_neural_neuron_sig/module-type-Sig/Optimise/Algodiff/A/Scalar/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Scalar (owl-base.Owl_neural_neuron_sig.Sig.Optimise.Algodiff.A.Scalar)

Module A.Scalar

val add : elt -> elt -> elt
val sub : elt -> elt -> elt
val mul : elt -> elt -> elt
val div : elt -> elt -> elt
val pow : elt -> elt -> elt
val atan2 : elt -> elt -> elt
val abs : elt -> elt
val neg : elt -> elt
val sqr : elt -> elt
val sqrt : elt -> elt
val exp : elt -> elt
val log : elt -> elt
val log2 : elt -> elt
val log10 : elt -> elt
val signum : elt -> elt
val floor : elt -> elt
val ceil : elt -> elt
val round : elt -> elt
val sin : elt -> elt
val cos : elt -> elt
val tan : elt -> elt
val sinh : elt -> elt
val cosh : elt -> elt
val tanh : elt -> elt
val asin : elt -> elt
val acos : elt -> elt
val atan : elt -> elt
val asinh : elt -> elt
val acosh : elt -> elt
val atanh : elt -> elt
val relu : elt -> elt
val dawsn : elt -> elt
val sigmoid : elt -> elt
\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron_sig/module-type-Sig/Optimise/Algodiff/A/index.html b/owl-base/Owl_neural_neuron_sig/module-type-Sig/Optimise/Algodiff/A/index.html deleted file mode 100644 index 29ac076e8..000000000 --- a/owl-base/Owl_neural_neuron_sig/module-type-Sig/Optimise/Algodiff/A/index.html +++ /dev/null @@ -1,158 +0,0 @@ - -A (owl-base.Owl_neural_neuron_sig.Sig.Optimise.Algodiff.A)

Module Algodiff.A

include Owl_types_ndarray_eltcmp.Sig
include Owl_types_ndarray_basic.Sig
type arr
type elt
val empty : int array -> arr
val zeros : int array -> arr
val ones : int array -> arr
val create : int array -> elt -> arr
val sequential : ?a:elt -> ?step:elt -> int array -> arr
val uniform : ?a:elt -> ?b:elt -> int array -> arr
val gaussian : ?mu:elt -> ?sigma:elt -> int array -> arr
val bernoulli : ?p:elt -> int array -> arr
val init : int array -> (int -> elt) -> arr
val init_nd : int array -> (int array -> elt) -> arr
val shape : arr -> int array
val numel : arr -> int
val get : arr -> int array -> elt
val set : arr -> int array -> elt -> unit
val get_slice : int list list -> arr -> arr
val set_slice : int list list -> arr -> arr -> unit
val get_fancy : Owl_types_common.index list -> arr -> arr
val set_fancy : Owl_types_common.index list -> arr -> arr -> unit
val copy : arr -> arr
val copy_ : out:arr -> arr -> unit
val reset : arr -> unit
val reshape : arr -> int array -> arr
val reverse : arr -> arr
val tile : arr -> int array -> arr
val repeat : arr -> int array -> arr
val concatenate : ?axis:int -> arr array -> arr
val stack : ?axis:int -> arr array -> arr
val split : ?axis:int -> int array -> arr -> arr array
val expand : ?hi:bool -> arr -> int -> arr
val squeeze : ?axis:int array -> arr -> arr
val draw : ?axis:int -> arr -> int -> arr * int array
val map : (elt -> elt) -> arr -> arr
val fold : ?axis:int -> (elt -> elt -> elt) -> elt -> arr -> arr
val scan : ?axis:int -> (elt -> elt -> elt) -> arr -> arr
val one_hot : int -> arr -> arr
val pad : ?v:elt -> int list list -> arr -> arr
val print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(elt -> string) -> - arr -> - unit
val abs : arr -> arr
val neg : arr -> arr
val floor : arr -> arr
val ceil : arr -> arr
val round : arr -> arr
val sqr : arr -> arr
val sqrt : arr -> arr
val log : arr -> arr
val log2 : arr -> arr
val log10 : arr -> arr
val exp : arr -> arr
val sin : arr -> arr
val cos : arr -> arr
val tan : arr -> arr
val sinh : arr -> arr
val cosh : arr -> arr
val tanh : arr -> arr
val asin : arr -> arr
val acos : arr -> arr
val atan : arr -> arr
val asinh : arr -> arr
val acosh : arr -> arr
val atanh : arr -> arr
val min : ?axis:int -> ?keep_dims:bool -> arr -> arr
val max : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum_reduce : ?axis:int array -> arr -> arr
val signum : arr -> arr
val sigmoid : arr -> arr
val relu : arr -> arr
val dawsn : arr -> arr
val min' : arr -> elt
val max' : arr -> elt
val sum' : arr -> elt
val log_sum_exp' : arr -> elt
val log_sum_exp : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm' : arr -> elt
val l2norm' : arr -> elt
val l2norm_sqr' : arr -> elt
val clip_by_value : ?amin:elt -> ?amax:elt -> arr -> arr
val clip_by_l2norm : elt -> arr -> arr
val pow : arr -> arr -> arr
val scalar_pow : elt -> arr -> arr
val pow_scalar : arr -> elt -> arr
val atan2 : arr -> arr -> arr
val scalar_atan2 : elt -> arr -> arr
val atan2_scalar : arr -> elt -> arr
val add : arr -> arr -> arr
val sub : arr -> arr -> arr
val mul : arr -> arr -> arr
val div : arr -> arr -> arr
val add_scalar : arr -> elt -> arr
val sub_scalar : arr -> elt -> arr
val mul_scalar : arr -> elt -> arr
val div_scalar : arr -> elt -> arr
val scalar_add : elt -> arr -> arr
val scalar_sub : elt -> arr -> arr
val scalar_mul : elt -> arr -> arr
val scalar_div : elt -> arr -> arr
val fma : arr -> arr -> arr -> arr
val conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val dilated_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val transpose_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val max_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val upsampling2d : arr -> int array -> arr
val conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val dilated_conv1d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv1d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val transpose_conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val max_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val upsampling2d_backward : arr -> int array -> arr -> arr
val row_num : arr -> int
val col_num : arr -> int
val row : arr -> int -> arr
val rows : arr -> int array -> arr
val copy_row_to : arr -> arr -> int -> unit
val copy_col_to : arr -> arr -> int -> unit
val dot : arr -> arr -> arr
val diag : ?k:int -> arr -> arr
val trace : arr -> elt
val transpose : ?axis:int array -> arr -> arr
val to_rows : arr -> arr array
val of_rows : arr array -> arr
val to_cols : arr -> arr array
val of_cols : arr array -> arr
val of_array : elt array -> int array -> arr
val of_arrays : elt array array -> arr
val float_to_elt : float -> elt
val elt_to_float : elt -> float
val elt_equal : arr -> arr -> arr
val elt_not_equal : arr -> arr -> arr
val elt_less : arr -> arr -> arr
val elt_greater : arr -> arr -> arr
val elt_less_equal : arr -> arr -> arr
val elt_greater_equal : arr -> arr -> arr
val elt_equal_scalar : arr -> elt -> arr
val elt_not_equal_scalar : arr -> elt -> arr
val elt_less_scalar : arr -> elt -> arr
val elt_greater_scalar : arr -> elt -> arr
val elt_less_equal_scalar : arr -> elt -> arr
val elt_greater_equal_scalar : arr -> elt -> arr
module Scalar : sig ... end
module Mat : sig ... end
module Linalg : sig ... end
\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron_sig/module-type-Sig/Optimise/Algodiff/Arr/index.html b/owl-base/Owl_neural_neuron_sig/module-type-Sig/Optimise/Algodiff/Arr/index.html deleted file mode 100644 index 79d261266..000000000 --- a/owl-base/Owl_neural_neuron_sig/module-type-Sig/Optimise/Algodiff/Arr/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Arr (owl-base.Owl_neural_neuron_sig.Sig.Optimise.Algodiff.Arr)

Module Algodiff.Arr

val empty : int array -> t
val zeros : int array -> t
val ones : int array -> t
val uniform : ?a:A.elt -> ?b:A.elt -> int array -> t
val gaussian : ?mu:A.elt -> ?sigma:A.elt -> int array -> t
val shape : t -> int array
val numel : t -> int
val reset : t -> unit
val reshape : t -> int array -> t
val add : t -> t -> t
val sub : t -> t -> t
val mul : t -> t -> t
val div : t -> t -> t
val dot : t -> t -> t
\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron_sig/module-type-Sig/Optimise/Algodiff/Builder/index.html b/owl-base/Owl_neural_neuron_sig/module-type-Sig/Optimise/Algodiff/Builder/index.html deleted file mode 100644 index aa40cc896..000000000 --- a/owl-base/Owl_neural_neuron_sig/module-type-Sig/Optimise/Algodiff/Builder/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Builder (owl-base.Owl_neural_neuron_sig.Sig.Optimise.Algodiff.Builder)

Module Algodiff.Builder

Ops Builder
module type Siso = sig ... end
val build_siso : (module Siso) -> t -> t

build single input single output operations

module type Sipo = sig ... end
val build_sipo : (module Sipo) -> t -> t * t

build single input pair outputs operations

module type Sito = sig ... end
val build_sito : (module Sito) -> t -> t * t * t

build single input triple outputs operations

module type Siao = sig ... end
val build_siao : (module Siao) -> t -> t array

build single input array output operations

module type Piso = sig ... end
val build_piso : (module Piso) -> t -> t -> t

build pair inputs single output operations

module type Aiso = sig ... end
val build_aiso : (module Aiso) -> t array -> t

build array input single output operations

\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron_sig/module-type-Sig/Optimise/Algodiff/Builder/module-type-Aiso/index.html b/owl-base/Owl_neural_neuron_sig/module-type-Sig/Optimise/Algodiff/Builder/module-type-Aiso/index.html deleted file mode 100644 index 7ac0492dd..000000000 --- a/owl-base/Owl_neural_neuron_sig/module-type-Sig/Optimise/Algodiff/Builder/module-type-Aiso/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Aiso (owl-base.Owl_neural_neuron_sig.Sig.Optimise.Algodiff.Builder.Aiso)

Module type Builder.Aiso

val label : string
val ff : t array -> t
val df : int list -> t -> t array -> t array -> t
val dr : int list -> t array -> t -> t Stdlib.ref -> t list
\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron_sig/module-type-Sig/Optimise/Algodiff/Builder/module-type-Piso/index.html b/owl-base/Owl_neural_neuron_sig/module-type-Sig/Optimise/Algodiff/Builder/module-type-Piso/index.html deleted file mode 100644 index 82c6622b6..000000000 --- a/owl-base/Owl_neural_neuron_sig/module-type-Sig/Optimise/Algodiff/Builder/module-type-Piso/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Piso (owl-base.Owl_neural_neuron_sig.Sig.Optimise.Algodiff.Builder.Piso)

Module type Builder.Piso

val label : string
val ff_aa : A.elt -> A.elt -> t
val ff_ab : A.elt -> A.arr -> t
val ff_ba : A.arr -> A.elt -> t
val ff_bb : A.arr -> A.arr -> t
val df_da : t -> t -> t -> t -> t
val df_db : t -> t -> t -> t -> t
val df_dab : t -> t -> t -> t -> t -> t
val dr_ab : t -> t -> t -> t Stdlib.ref -> t * t
val dr_a : t -> t -> t -> t Stdlib.ref -> t
val dr_b : t -> t -> t -> t Stdlib.ref -> t
\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron_sig/module-type-Sig/Optimise/Algodiff/Builder/module-type-Siao/index.html b/owl-base/Owl_neural_neuron_sig/module-type-Sig/Optimise/Algodiff/Builder/module-type-Siao/index.html deleted file mode 100644 index dcd2dd022..000000000 --- a/owl-base/Owl_neural_neuron_sig/module-type-Sig/Optimise/Algodiff/Builder/module-type-Siao/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Siao (owl-base.Owl_neural_neuron_sig.Sig.Optimise.Algodiff.Builder.Siao)

Module type Builder.Siao

val label : string
val ff_f : A.elt -> t array
val ff_arr : A.arr -> t array
val df : t array -> t -> t -> t array
val dr : t -> t -> t Stdlib.ref array -> t Stdlib.ref array -> t
\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron_sig/module-type-Sig/Optimise/Algodiff/Builder/module-type-Sipo/index.html b/owl-base/Owl_neural_neuron_sig/module-type-Sig/Optimise/Algodiff/Builder/module-type-Sipo/index.html deleted file mode 100644 index bcc371dcf..000000000 --- a/owl-base/Owl_neural_neuron_sig/module-type-Sig/Optimise/Algodiff/Builder/module-type-Sipo/index.html +++ /dev/null @@ -1,7 +0,0 @@ - -Sipo (owl-base.Owl_neural_neuron_sig.Sig.Optimise.Algodiff.Builder.Sipo)

Module type Builder.Sipo

val label : string
val ff_f : A.elt -> t * t
val ff_arr : A.arr -> t * t
val df : t -> t -> t -> t
val dr : - t -> - t -> - (t Stdlib.ref * t Stdlib.ref) -> - (t Stdlib.ref * t Stdlib.ref) -> - t
\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron_sig/module-type-Sig/Optimise/Algodiff/Builder/module-type-Siso/index.html b/owl-base/Owl_neural_neuron_sig/module-type-Sig/Optimise/Algodiff/Builder/module-type-Siso/index.html deleted file mode 100644 index e72e85882..000000000 --- a/owl-base/Owl_neural_neuron_sig/module-type-Sig/Optimise/Algodiff/Builder/module-type-Siso/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Siso (owl-base.Owl_neural_neuron_sig.Sig.Optimise.Algodiff.Builder.Siso)

Module type Builder.Siso

val label : string
val ff_f : A.elt -> t
val ff_arr : A.arr -> t
val df : t -> t -> t -> t
val dr : t -> t -> t Stdlib.ref -> t
\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron_sig/module-type-Sig/Optimise/Algodiff/Builder/module-type-Sito/index.html b/owl-base/Owl_neural_neuron_sig/module-type-Sig/Optimise/Algodiff/Builder/module-type-Sito/index.html deleted file mode 100644 index 025be0a41..000000000 --- a/owl-base/Owl_neural_neuron_sig/module-type-Sig/Optimise/Algodiff/Builder/module-type-Sito/index.html +++ /dev/null @@ -1,7 +0,0 @@ - -Sito (owl-base.Owl_neural_neuron_sig.Sig.Optimise.Algodiff.Builder.Sito)

Module type Builder.Sito

val label : string
val ff_f : A.elt -> t * t * t
val ff_arr : A.arr -> t * t * t
val df : t -> t -> t -> t
val dr : - t -> - t -> - (t Stdlib.ref * t Stdlib.ref * t Stdlib.ref) -> - (t Stdlib.ref * t Stdlib.ref * t Stdlib.ref) -> - t
\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron_sig/module-type-Sig/Optimise/Algodiff/Linalg/index.html b/owl-base/Owl_neural_neuron_sig/module-type-Sig/Optimise/Algodiff/Linalg/index.html deleted file mode 100644 index 4d49d0e87..000000000 --- a/owl-base/Owl_neural_neuron_sig/module-type-Sig/Optimise/Algodiff/Linalg/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -Linalg (owl-base.Owl_neural_neuron_sig.Sig.Optimise.Algodiff.Linalg)

Module Algodiff.Linalg

val inv : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val logdet : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val chol : ?upper:bool -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val qr : t -> t * t

Refer to :doc:`owl_dense_ndarray_generic`

val lq : t -> t * t

Refer to :doc:`owl_dense_ndarray_generic`

val svd : ?thin:bool -> t -> t * t * t

Refer to :doc:`owl_dense_ndarray_generic`

val sylvester : t -> t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val lyapunov : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val discrete_lyapunov : - ?solver:[ `default | `bilinear | `direct ] -> - t -> - t -> - t

Refer to :doc:`owl_dense_ndarray_generic`

val (/@) : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val linsolve : ?trans:bool -> ?typ:[ `n | `u | `l ] -> t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val care : ?diag_r:bool -> t -> t -> t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val dare : ?diag_r:bool -> t -> t -> t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron_sig/module-type-Sig/Optimise/Algodiff/Mat/index.html b/owl-base/Owl_neural_neuron_sig/module-type-Sig/Optimise/Algodiff/Mat/index.html deleted file mode 100644 index ac734843f..000000000 --- a/owl-base/Owl_neural_neuron_sig/module-type-Sig/Optimise/Algodiff/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl-base.Owl_neural_neuron_sig.Sig.Optimise.Algodiff.Mat)

Module Algodiff.Mat

val empty : int -> int -> t
val zeros : int -> int -> t
val eye : int -> t
val ones : int -> int -> t
val uniform : ?a:A.elt -> ?b:A.elt -> int -> int -> t
val gaussian : ?mu:A.elt -> ?sigma:A.elt -> int -> int -> t
val shape : t -> int * int
val numel : t -> int
val row_num : t -> int
val col_num : t -> int
val reset : t -> unit
val reshape : int -> int -> t -> t
val get : t -> int -> int -> t
val set : t -> int -> int -> t -> t
val row : t -> int -> t
val mean : t -> t
val add : t -> t -> t
val sub : t -> t -> t
val mul : t -> t -> t
val div : t -> t -> t
val dot : t -> t -> t
val map_by_row : (t -> t) -> t -> t
val of_arrays : A.elt array array -> t
val init_2d : int -> int -> (int -> int -> t) -> t
val print : t -> unit
\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron_sig/module-type-Sig/Optimise/Algodiff/Maths/index.html b/owl-base/Owl_neural_neuron_sig/module-type-Sig/Optimise/Algodiff/Maths/index.html deleted file mode 100644 index 126ae00a1..000000000 --- a/owl-base/Owl_neural_neuron_sig/module-type-Sig/Optimise/Algodiff/Maths/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Maths (owl-base.Owl_neural_neuron_sig.Sig.Optimise.Algodiff.Maths)

Module Algodiff.Maths

val (+) : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val (-) : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val (*) : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val (/) : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val (*@) : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val (**) : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val add : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val sub : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val mul : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val div : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val kron : t -> t -> t

Refer to :doc:`owl_dense_matrix_generic`

val dot : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val pow : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val atan2 : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val min2 : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val max2 : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val cross_entropy : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val inv : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val neg : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val abs : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val signum : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val floor : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val ceil : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val round : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val sqr : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val sqrt : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val log : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val log2 : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val log10 : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val exp : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val sin : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val cos : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val tan : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val sinh : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val cosh : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val tanh : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val asin : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val acos : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val atan : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val asinh : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val acosh : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val atanh : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val sum' : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val log_sum_exp' : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val log_sum_exp : ?axis:int -> ?keep_dims:bool -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val sum : ?axis:int -> ?keep_dims:bool -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val sum_reduce : ?axis:int array -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val mean : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val transpose : ?axis:int array -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val swap : int -> int -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val l1norm' : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val l2norm' : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val l2norm_sqr' : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val sigmoid : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val relu : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val dawsn : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val softplus : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val softsign : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val softmax : ?axis:int -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val reshape : t -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val flatten : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val get_item : t -> int -> int -> t

Refer to :doc:`owl_dense_ndarray_generic`

val get_row : t -> int -> t

Refer to :doc:`owl_dense_ndarray_generic`

val concat : axis:int -> t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val split : axis:int -> int array -> t -> t array

Refer to :doc:`owl_dense_ndarray_generic`

val of_arrays : t array array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val to_arrays : t -> t array array

Refer to :doc:`owl_dense_ndarray_generic`

val concatenate : axis:int -> t array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val stack : axis:int -> t array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val get_slice : int list list -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val set_slice : int list list -> t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val get_fancy : Owl_types.index list -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val set_fancy : Owl_types.index list -> t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val diag : ?k:int -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val diagm : ?k:int -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val trace : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val triu : ?k:int -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val tril : ?k:int -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron_sig/module-type-Sig/Optimise/Algodiff/NN/index.html b/owl-base/Owl_neural_neuron_sig/module-type-Sig/Optimise/Algodiff/NN/index.html deleted file mode 100644 index 69807bf5b..000000000 --- a/owl-base/Owl_neural_neuron_sig/module-type-Sig/Optimise/Algodiff/NN/index.html +++ /dev/null @@ -1,20 +0,0 @@ - -NN (owl-base.Owl_neural_neuron_sig.Sig.Optimise.Algodiff.NN)

Module Algodiff.NN

val dropout : ?rate:float -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val conv1d : ?padding:Owl_types.padding -> t -> t -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val conv2d : ?padding:Owl_types.padding -> t -> t -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val conv3d : ?padding:Owl_types.padding -> t -> t -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val dilated_conv1d : - ?padding:Owl_types.padding -> - t -> - t -> - int array -> - int array -> - t

Refer to :doc:`owl_dense_ndarray_generic`

val dilated_conv2d : - ?padding:Owl_types.padding -> - t -> - t -> - int array -> - int array -> - t

Refer to :doc:`owl_dense_ndarray_generic`

val dilated_conv3d : - ?padding:Owl_types.padding -> - t -> - t -> - int array -> - int array -> - t

Refer to :doc:`owl_dense_ndarray_generic`

val transpose_conv1d : ?padding:Owl_types.padding -> t -> t -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val transpose_conv2d : ?padding:Owl_types.padding -> t -> t -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val transpose_conv3d : ?padding:Owl_types.padding -> t -> t -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val max_pool1d : Owl_types.padding -> t -> int array -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val max_pool2d : Owl_types.padding -> t -> int array -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val max_pool3d : Owl_types.padding -> t -> int array -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val avg_pool1d : Owl_types.padding -> t -> int array -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val avg_pool2d : Owl_types.padding -> t -> int array -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val avg_pool3d : Owl_types.padding -> t -> int array -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val upsampling2d : t -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val pad : ?v:A.elt -> int list list -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron_sig/module-type-Sig/Optimise/Algodiff/index.html b/owl-base/Owl_neural_neuron_sig/module-type-Sig/Optimise/Algodiff/index.html deleted file mode 100644 index 77598fbd8..000000000 --- a/owl-base/Owl_neural_neuron_sig/module-type-Sig/Optimise/Algodiff/index.html +++ /dev/null @@ -1,11 +0,0 @@ - -Algodiff (owl-base.Owl_neural_neuron_sig.Sig.Optimise.Algodiff)

Module Optimise.Algodiff

include Owl_algodiff_core_sig.Sig
Type definition
include Owl_algodiff_types_sig.Sig with type elt := A.elt and type arr := A.arr
type t =
  1. | F of A.elt
  2. | Arr of A.arr
  3. | DF of t * t * int
  4. | DR of t * t Stdlib.ref * op * int Stdlib.ref * int * int Stdlib.ref
and adjoint = t -> t Stdlib.ref -> (t * t) list -> (t * t) list
and register = t list -> t list
and label = string * t list
and op = adjoint * register * label
Core functions
val tag : unit -> int

TODO

val primal : t -> t

TODO

val primal' : t -> t

TODO

val zero : t -> t

TODO

val reset_zero : t -> t

TODO

val tangent : t -> t

TODO

val adjref : t -> t Stdlib.ref

TODO

val adjval : t -> t

TODO

val shape : t -> int array

TODO

val is_float : t -> bool

TODO

val is_arr : t -> bool

TODO

val row_num : t -> int

number of rows

val col_num : t -> int

number of columns

val numel : t -> int

number of elements

val clip_by_value : amin:A.elt -> amax:A.elt -> t -> t

other functions, without tracking gradient

val clip_by_l2norm : A.elt -> t -> t

other functions, without tracking gradient

val copy_primal' : t -> t

TODO

val tile : t -> int array -> t

TODO

val repeat : t -> int array -> t

TODO

val pack_elt : A.elt -> t

convert from elt type to t type.

val unpack_elt : t -> A.elt

convert from t type to elt type.

val pack_flt : float -> t

convert from float type to t type.

val _f : float -> t

A shortcut function for F A.(float_to_elt x).

val unpack_flt : t -> float

convert from t type to float type.

val pack_arr : A.arr -> t

convert from arr type to t type.

val unpack_arr : t -> A.arr

convert from t type to arr type.

val deep_info : t -> string

TODO

val type_info : t -> string

TODO

val error_binop : string -> t -> t -> 'a

TODO

val error_uniop : string -> t -> 'a

TODO

val make_forward : t -> t -> int -> t

TODO

val make_reverse : t -> int -> t

TODO

val reverse_prop : t -> t -> unit

TODO

val diff : (t -> t) -> t -> t

diff f x returns the exat derivative of a function f : scalar -> scalar at point x. Simply calling diff f will return its derivative function g of the same type, i.e. g : scalar -> scalar.

Keep calling this function will give you higher-order derivatives of f, i.e. f |> diff |> diff |> diff |> ...

val diff' : (t -> t) -> t -> t * t

similar to diff, but return (f x, diff f x).

val grad : (t -> t) -> t -> t

gradient of f : (vector -> scalar) at x, reverse ad.

val grad' : (t -> t) -> t -> t * t

similar to grad, but return (f x, grad f x).

val jacobian : (t -> t) -> t -> t

jacobian of f : (vector -> vector) at x, both x and y are row vectors.

val jacobian' : (t -> t) -> t -> t * t

similar to jacobian, but return (f x, jacobian f x)

val jacobianv : (t -> t) -> t -> t -> t

jacobian vector product of f : (vector -> vector) at x along v, forward ad. Namely, it calcultes (jacobian x) v

val jacobianv' : (t -> t) -> t -> t -> t * t

similar to jacobianv', but return (f x, jacobianv f x v)

val jacobianTv : (t -> t) -> t -> t -> t

transposed jacobian vector product of f : (vector -> vector) at x along v, backward ad. Namely, it calculates transpose ((jacobianv f x v)).

val jacobianTv' : (t -> t) -> t -> t -> t * t

similar to jacobianTv, but return (f x, transpose (jacobianv f x v))

val hessian : (t -> t) -> t -> t

hessian of f : (scalar -> scalar) at x.

val hessian' : (t -> t) -> t -> t * t

simiarl to hessian, but return (f x, hessian f x)

val hessianv : (t -> t) -> t -> t -> t

hessian vector product of f : (scalar -> scalar) at x along v. Namely, it calculates (hessian x) v.

val hessianv' : (t -> t) -> t -> t -> t * t

similar to hessianv, but return (f x, hessianv f x v).

val laplacian : (t -> t) -> t -> t

laplacian of f : (scalar -> scalar) at x.

val laplacian' : (t -> t) -> t -> t * t

similar to laplacian, but return (f x, laplacian f x).

val gradhessian : (t -> t) -> t -> t * t

return (grad f x, hessian f x), f : (scalar -> scalar)

val gradhessian' : (t -> t) -> t -> t * t * t

return (f x, grad f x, hessian f x)

val gradhessianv : (t -> t) -> t -> t -> t * t

return (grad f x v, hessian f x v)

val gradhessianv' : (t -> t) -> t -> t -> t * t * t

return (f x, grad f x v, hessian f x v)

include Owl_algodiff_ops_sig.Sig - with type t := t - and type elt := A.elt - and type arr := A.arr - and type op := op
module Builder : - Owl_algodiff_ops_builder_sig.Sig - with type t := t - and type elt := A.elt - and type arr := A.arr - and type op := op
Supported Maths functions
module Maths : sig ... end
Supported Linalg functions
module Linalg : sig ... end
Supported Neural Network functions
module NN : sig ... end
Supported Mat functions
module Mat : sig ... end
Supported Arr functions
module Arr : sig ... end
Helper functions
include Owl_algodiff_graph_convert_sig.Sig with type t := t
val to_trace : t list -> string

to_trace [t0; t1; ...] outputs the trace of computation graph on the terminal in a human-readable format.

val to_dot : t list -> string

to_dot [t0; t1; ...] outputs the trace of computation graph in the dot file format which you can use other tools further visualisation, such as Graphviz.

val pp_num : Stdlib.Format.formatter -> t -> unit

pp_num t pretty prints the abstract number used in Algodiff.

\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron_sig/module-type-Sig/Optimise/Batch/index.html b/owl-base/Owl_neural_neuron_sig/module-type-Sig/Optimise/Batch/index.html deleted file mode 100644 index dead03027..000000000 --- a/owl-base/Owl_neural_neuron_sig/module-type-Sig/Optimise/Batch/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Batch (owl-base.Owl_neural_neuron_sig.Sig.Optimise.Batch)

Module Optimise.Batch

Batch module

type typ =
  1. | Full
  2. | Mini of int
  3. | Sample of int
  4. | Stochastic

Types of batches.

val run : typ -> Algodiff.t -> Algodiff.t -> int -> Algodiff.t * Algodiff.t

Execute the computations defined in module typ.

val batches : typ -> Algodiff.t -> int

Return the total number of batches given a batch typ.

val to_string : typ -> string

Convert the module typ to its string representation.

\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron_sig/module-type-Sig/Optimise/Checkpoint/index.html b/owl-base/Owl_neural_neuron_sig/module-type-Sig/Optimise/Checkpoint/index.html deleted file mode 100644 index cb1c495a1..000000000 --- a/owl-base/Owl_neural_neuron_sig/module-type-Sig/Optimise/Checkpoint/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Checkpoint (owl-base.Owl_neural_neuron_sig.Sig.Optimise.Checkpoint)

Module Optimise.Checkpoint

Checkpoint module

type state = {
  1. mutable current_batch : int;
  2. mutable batches_per_epoch : int;
  3. mutable epochs : float;
  4. mutable batches : int;
  5. mutable loss : Algodiff.t array;
  6. mutable start_at : float;
  7. mutable stop : bool;
  8. mutable gs : Algodiff.t array array;
  9. mutable ps : Algodiff.t array array;
  10. mutable us : Algodiff.t array array;
  11. mutable ch : Algodiff.t array array array;
}

Type definition of checkpoint

type typ =
  1. | Batch of int
  2. | Epoch of float
  3. | Custom of state -> unit
  4. | None

Batch type.

val init_state : int -> float -> state

init_state batches_per_epoch epochs initialises a state by specifying the number of batches per epoch and the number of epochs in total.

val default_checkpoint_fun : (string -> 'a) -> 'a

This function is used for saving intermediate files during optimisation.

val print_state_info : state -> unit

Print out the detail information of current state.

val print_summary : state -> unit

Print out the summary of current state.

val run : typ -> (string -> unit) -> int -> Algodiff.t -> state -> unit

Execute the computations defined in module typ.

val to_string : typ -> string

Convert the module typ to its string representation.

\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron_sig/module-type-Sig/Optimise/Clipping/index.html b/owl-base/Owl_neural_neuron_sig/module-type-Sig/Optimise/Clipping/index.html deleted file mode 100644 index 566b77af7..000000000 --- a/owl-base/Owl_neural_neuron_sig/module-type-Sig/Optimise/Clipping/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Clipping (owl-base.Owl_neural_neuron_sig.Sig.Optimise.Clipping)

Module Optimise.Clipping

Clipping module

type typ =
  1. | L2norm of float
  2. | Value of float * float
  3. | None

Types of clipping functions.

val run : typ -> Algodiff.t -> Algodiff.t

Execute the computations defined in module typ.

val default : typ -> typ

Create module typ with default values.

val to_string : typ -> string

Convert the module typ to its string representation.

\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron_sig/module-type-Sig/Optimise/Gradient/index.html b/owl-base/Owl_neural_neuron_sig/module-type-Sig/Optimise/Gradient/index.html deleted file mode 100644 index 5f6cdab79..000000000 --- a/owl-base/Owl_neural_neuron_sig/module-type-Sig/Optimise/Gradient/index.html +++ /dev/null @@ -1,9 +0,0 @@ - -Gradient (owl-base.Owl_neural_neuron_sig.Sig.Optimise.Gradient)

Module Optimise.Gradient

Gradient module

type typ =
  1. | GD
  2. | CG
  3. | CD
  4. | NonlinearCG
  5. | DaiYuanCG
  6. | NewtonCG
  7. | Newton

Types of gradient function.

val run : - typ -> - (Algodiff.t -> Algodiff.t) -> - Algodiff.t -> - Algodiff.t -> - Algodiff.t -> - Algodiff.t -> - Algodiff.t

Execute the computations defined in module typ.

val to_string : typ -> string

Convert the module typ to its string representation.

\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron_sig/module-type-Sig/Optimise/Learning_Rate/index.html b/owl-base/Owl_neural_neuron_sig/module-type-Sig/Optimise/Learning_Rate/index.html deleted file mode 100644 index 7097c1a84..000000000 --- a/owl-base/Owl_neural_neuron_sig/module-type-Sig/Optimise/Learning_Rate/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Learning_Rate (owl-base.Owl_neural_neuron_sig.Sig.Optimise.Learning_Rate)

Module Optimise.Learning_Rate

Strategies for learning rate update

type typ =
  1. | Adagrad of float
  2. | Const of float
  3. | Decay of float * float
  4. | Exp_decay of float * float
  5. | RMSprop of float * float
  6. | Adam of float * float * float
  7. | Schedule of float array

Representation of learning rate update strategies. Possible values include:

  • Adam (alpha, beta1, beta2), see ref for parameter meaning
val run : typ -> int -> Algodiff.t -> Algodiff.t array -> Algodiff.t

Execute the computations defined in module typ.

val default : typ -> typ

Create module typ with default values.

val update_ch : typ -> Algodiff.t -> Algodiff.t array -> Algodiff.t array

Update the cache of gradients.

val to_string : typ -> string

Convert the module typ to its string representation.

\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron_sig/module-type-Sig/Optimise/Loss/index.html b/owl-base/Owl_neural_neuron_sig/module-type-Sig/Optimise/Loss/index.html deleted file mode 100644 index a72e07fbb..000000000 --- a/owl-base/Owl_neural_neuron_sig/module-type-Sig/Optimise/Loss/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Loss (owl-base.Owl_neural_neuron_sig.Sig.Optimise.Loss)

Module Optimise.Loss

Loss module

type typ =
  1. | Hinge
  2. | L1norm
  3. | L2norm
  4. | Quadratic
  5. | Cross_entropy
  6. | Custom of Algodiff.t -> Algodiff.t -> Algodiff.t

Types of loss functions.

val run : typ -> Algodiff.t -> Algodiff.t -> Algodiff.t

Execute the computations defined in module typ.

val to_string : typ -> string

Convert the module typ to its string representation.

\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron_sig/module-type-Sig/Optimise/Momentum/index.html b/owl-base/Owl_neural_neuron_sig/module-type-Sig/Optimise/Momentum/index.html deleted file mode 100644 index 0b45fbd68..000000000 --- a/owl-base/Owl_neural_neuron_sig/module-type-Sig/Optimise/Momentum/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Momentum (owl-base.Owl_neural_neuron_sig.Sig.Optimise.Momentum)

Module Optimise.Momentum

Momentum module

type typ =
  1. | Standard of float
  2. | Nesterov of float
  3. | None

Types of momentum functions.

val run : typ -> Algodiff.t -> Algodiff.t -> Algodiff.t

Execute the computations defined in module typ.

val default : typ -> typ

Create module typ with default values.

val to_string : typ -> string

Convert the module typ to its string representation.

\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron_sig/module-type-Sig/Optimise/Params/index.html b/owl-base/Owl_neural_neuron_sig/module-type-Sig/Optimise/Params/index.html deleted file mode 100644 index 440af8d93..000000000 --- a/owl-base/Owl_neural_neuron_sig/module-type-Sig/Optimise/Params/index.html +++ /dev/null @@ -1,14 +0,0 @@ - -Params (owl-base.Owl_neural_neuron_sig.Sig.Optimise.Params)

Module Optimise.Params

Params module

type typ = {
  1. mutable epochs : float;
  2. mutable batch : Batch.typ;
  3. mutable gradient : Gradient.typ;
  4. mutable loss : Loss.typ;
  5. mutable learning_rate : Learning_Rate.typ;
  6. mutable regularisation : Regularisation.typ;
  7. mutable momentum : Momentum.typ;
  8. mutable clipping : Clipping.typ;
  9. mutable stopping : Stopping.typ;
  10. mutable checkpoint : Checkpoint.typ;
  11. mutable verbosity : bool;
}

Type definition of parameter.

val default : unit -> typ

Create module typ with default values.

val config : - ?batch:Batch.typ -> - ?gradient:Gradient.typ -> - ?loss:Loss.typ -> - ?learning_rate:Learning_Rate.typ -> - ?regularisation:Regularisation.typ -> - ?momentum:Momentum.typ -> - ?clipping:Clipping.typ -> - ?stopping:Stopping.typ -> - ?checkpoint:Checkpoint.typ -> - ?verbosity:bool -> - float -> - typ

This function creates a parameter object with many configurations.

val to_string : typ -> string

Convert the module typ to its string representation.

\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron_sig/module-type-Sig/Optimise/Regularisation/index.html b/owl-base/Owl_neural_neuron_sig/module-type-Sig/Optimise/Regularisation/index.html deleted file mode 100644 index 5c55e0099..000000000 --- a/owl-base/Owl_neural_neuron_sig/module-type-Sig/Optimise/Regularisation/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Regularisation (owl-base.Owl_neural_neuron_sig.Sig.Optimise.Regularisation)

Module Optimise.Regularisation

Regularisation module

type typ =
  1. | L1norm of float
  2. | L2norm of float
  3. | Elastic_net of float * float
  4. | None

Types of regularisation functions.

val run : typ -> Algodiff.t -> Algodiff.t

Execute the computations defined in module typ.

val to_string : typ -> string

Convert the module typ to its string representation.

\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron_sig/module-type-Sig/Optimise/Stopping/index.html b/owl-base/Owl_neural_neuron_sig/module-type-Sig/Optimise/Stopping/index.html deleted file mode 100644 index 3ad1a165d..000000000 --- a/owl-base/Owl_neural_neuron_sig/module-type-Sig/Optimise/Stopping/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Stopping (owl-base.Owl_neural_neuron_sig.Sig.Optimise.Stopping)

Module Optimise.Stopping

Stopping module

type typ =
  1. | Const of float
  2. | Early of int * int
  3. | None

Types of stopping functions.

val run : typ -> float -> bool

Execute the computations defined in module typ.

val default : typ -> typ

Create module typ with default values.

val to_string : typ -> string

Convert the module typ to its string representation.

\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron_sig/module-type-Sig/Optimise/Utils/index.html b/owl-base/Owl_neural_neuron_sig/module-type-Sig/Optimise/Utils/index.html deleted file mode 100644 index 515b31ed9..000000000 --- a/owl-base/Owl_neural_neuron_sig/module-type-Sig/Optimise/Utils/index.html +++ /dev/null @@ -1,7 +0,0 @@ - -Utils (owl-base.Owl_neural_neuron_sig.Sig.Optimise.Utils)

Module Optimise.Utils

Utils module

val sample_num : Algodiff.t -> int

Return the total number of samples in passed in ndarray.

val draw_samples : Algodiff.t -> Algodiff.t -> int -> Algodiff.t * Algodiff.t

draw_samples x y draws samples from both x (observations) and y (labels). The samples will be drew along axis 0, so x and y must agree along axis 0.

val get_chunk : - Algodiff.t -> - Algodiff.t -> - int -> - int -> - Algodiff.t * Algodiff.t

get_chunk x y i c gets a continuous chunk of c samples from position i from x (observations) and y (labels).

\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron_sig/module-type-Sig/Optimise/index.html b/owl-base/Owl_neural_neuron_sig/module-type-Sig/Optimise/index.html deleted file mode 100644 index 210927fc4..000000000 --- a/owl-base/Owl_neural_neuron_sig/module-type-Sig/Optimise/index.html +++ /dev/null @@ -1,31 +0,0 @@ - -Optimise (owl-base.Owl_neural_neuron_sig.Sig.Optimise)

Module Sig.Optimise

module Utils : sig ... end

Utils module

module Learning_Rate : sig ... end

Strategies for learning rate update

module Batch : sig ... end

Batch module

module Loss : sig ... end

Loss module

module Gradient : sig ... end

Gradient module

module Momentum : sig ... end

Momentum module

module Regularisation : sig ... end

Regularisation module

module Clipping : sig ... end

Clipping module

module Stopping : sig ... end

Stopping module

module Checkpoint : sig ... end

Checkpoint module

module Params : sig ... end

Params module

Core functions
val minimise_weight : - ?state:Checkpoint.state -> - Params.typ -> - (Algodiff.t -> Algodiff.t -> Algodiff.t) -> - Algodiff.t -> - Algodiff.t -> - Algodiff.t -> - Checkpoint.state * Algodiff.t

This function minimises the weight w of passed-in function f.

* f is a function f : w -> x -> y. * w is a row vector but y can have any shape.

val minimise_network : - ?state:Checkpoint.state -> - Params.typ -> - (Algodiff.t -> Algodiff.t * Algodiff.t array array) -> - (Algodiff.t -> Algodiff.t array array * Algodiff.t array array) -> - (Algodiff.t array array -> unit) -> - (string -> unit) -> - Algodiff.t -> - Algodiff.t -> - Checkpoint.state

This function is specifically designed for minimising the weights in a neural network of graph structure. In Owl's earlier versions, the functions in the regression module were actually implemented using this function.

val minimise_fun : - ?state:Checkpoint.state -> - Params.typ -> - (Algodiff.t -> Algodiff.t) -> - Algodiff.t -> - Checkpoint.state * Algodiff.t

This function minimises f : x -> y w.r.t x.

x is an ndarray; and y is an scalar value.

val minimise_compiled_network : - ?state:Checkpoint.state -> - Params.typ -> - (Algodiff.t -> Algodiff.t -> Algodiff.t) -> - (unit -> unit) -> - (string -> unit) -> - Algodiff.t -> - Algodiff.t -> - Checkpoint.state

TODO

\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron_sig/module-type-Sig/Padding1D/index.html b/owl-base/Owl_neural_neuron_sig/module-type-Sig/Padding1D/index.html deleted file mode 100644 index 10e9dff1b..000000000 --- a/owl-base/Owl_neural_neuron_sig/module-type-Sig/Padding1D/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Padding1D (owl-base.Owl_neural_neuron_sig.Sig.Padding1D)

Module Sig.Padding1D

\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron_sig/module-type-Sig/Padding2D/index.html b/owl-base/Owl_neural_neuron_sig/module-type-Sig/Padding2D/index.html deleted file mode 100644 index ab89c489a..000000000 --- a/owl-base/Owl_neural_neuron_sig/module-type-Sig/Padding2D/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Padding2D (owl-base.Owl_neural_neuron_sig.Sig.Padding2D)

Module Sig.Padding2D

type neuron_typ = {
  1. mutable padding : int array array;
  2. mutable in_shape : int array;
  3. mutable out_shape : int array;
}

Neuron type definition.

val create : int array array -> neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron_sig/module-type-Sig/Padding3D/index.html b/owl-base/Owl_neural_neuron_sig/module-type-Sig/Padding3D/index.html deleted file mode 100644 index 6810a331d..000000000 --- a/owl-base/Owl_neural_neuron_sig/module-type-Sig/Padding3D/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Padding3D (owl-base.Owl_neural_neuron_sig.Sig.Padding3D)

Module Sig.Padding3D

\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron_sig/module-type-Sig/Recurrent/index.html b/owl-base/Owl_neural_neuron_sig/module-type-Sig/Recurrent/index.html deleted file mode 100644 index 7136fb4d7..000000000 --- a/owl-base/Owl_neural_neuron_sig/module-type-Sig/Recurrent/index.html +++ /dev/null @@ -1,9 +0,0 @@ - -Recurrent (owl-base.Owl_neural_neuron_sig.Sig.Recurrent)

Module Sig.Recurrent

type neuron_typ = {
  1. mutable whh : Optimise.Algodiff.t;
  2. mutable wxh : Optimise.Algodiff.t;
  3. mutable why : Optimise.Algodiff.t;
  4. mutable bh : Optimise.Algodiff.t;
  5. mutable by : Optimise.Algodiff.t;
  6. mutable h : Optimise.Algodiff.t;
  7. mutable hiddens : int;
  8. mutable act : Activation.typ;
  9. mutable init_typ : Init.typ;
  10. mutable in_shape : int array;
  11. mutable out_shape : int array;
}

Neuron type definition.

val create : - ?time_steps:int -> - ?inputs:int -> - int -> - int -> - Activation.typ -> - Init.typ -> - neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val init : neuron_typ -> unit

Initialise the neuron and its parameters.

val reset : neuron_typ -> unit

Reset the parameters in a neuron.

val mktag : int -> neuron_typ -> unit

Tag the neuron, used by Algodiff module.

val mkpar : neuron_typ -> Optimise.Algodiff.t array

Assemble all the parameters in an array, used by Optimise module.

val mkpri : neuron_typ -> Optimise.Algodiff.t array

Assemble all the primial values in an array, used by Optimise module.

val mkadj : neuron_typ -> Optimise.Algodiff.t array

Assemble all the adjacent values in an array, used by Optimise module.

val update : neuron_typ -> Optimise.Algodiff.t array -> unit

Update parameters in a neuron, used by Optimise module.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron_sig/module-type-Sig/Reshape/index.html b/owl-base/Owl_neural_neuron_sig/module-type-Sig/Reshape/index.html deleted file mode 100644 index 0bcd4fbf1..000000000 --- a/owl-base/Owl_neural_neuron_sig/module-type-Sig/Reshape/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Reshape (owl-base.Owl_neural_neuron_sig.Sig.Reshape)

Module Sig.Reshape

type neuron_typ = {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
}

Neuron type definition.

val create : ?inputs:int array -> int array -> neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron_sig/module-type-Sig/Slice/index.html b/owl-base/Owl_neural_neuron_sig/module-type-Sig/Slice/index.html deleted file mode 100644 index bc75eccbb..000000000 --- a/owl-base/Owl_neural_neuron_sig/module-type-Sig/Slice/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Slice (owl-base.Owl_neural_neuron_sig.Sig.Slice)

Module Sig.Slice

type neuron_typ = {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
  3. mutable slice : int list list;
}

Neuron type definition.

val create : int list list -> neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron_sig/module-type-Sig/TransposeConv1D/index.html b/owl-base/Owl_neural_neuron_sig/module-type-Sig/TransposeConv1D/index.html deleted file mode 100644 index 60b96737b..000000000 --- a/owl-base/Owl_neural_neuron_sig/module-type-Sig/TransposeConv1D/index.html +++ /dev/null @@ -1,8 +0,0 @@ - -TransposeConv1D (owl-base.Owl_neural_neuron_sig.Sig.TransposeConv1D)

Module Sig.TransposeConv1D

type neuron_typ = {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable b : Optimise.Algodiff.t;
  3. mutable kernel : int array;
  4. mutable stride : int array;
  5. mutable padding : Owl_types.padding;
  6. mutable init_typ : Init.typ;
  7. mutable in_shape : int array;
  8. mutable out_shape : int array;
}

Neuron type definition.

val create : - ?inputs:int array -> - Owl_types.padding -> - int array -> - int array -> - Init.typ -> - neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val init : neuron_typ -> unit

Initialise the neuron and its parameters.

val reset : neuron_typ -> unit

Reset the parameters in a neuron.

val mktag : int -> neuron_typ -> unit

Tag the neuron, used by Algodiff module.

val mkpar : neuron_typ -> Optimise.Algodiff.t array

Assemble all the parameters in an array, used by Optimise module.

val mkpri : neuron_typ -> Optimise.Algodiff.t array

Assemble all the primial values in an array, used by Optimise module.

val mkadj : neuron_typ -> Optimise.Algodiff.t array

Assemble all the adjacent values in an array, used by Optimise module.

val update : neuron_typ -> Optimise.Algodiff.t array -> unit

Update parameters in a neuron, used by Optimise module.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron_sig/module-type-Sig/TransposeConv2D/index.html b/owl-base/Owl_neural_neuron_sig/module-type-Sig/TransposeConv2D/index.html deleted file mode 100644 index a30d4c510..000000000 --- a/owl-base/Owl_neural_neuron_sig/module-type-Sig/TransposeConv2D/index.html +++ /dev/null @@ -1,8 +0,0 @@ - -TransposeConv2D (owl-base.Owl_neural_neuron_sig.Sig.TransposeConv2D)

Module Sig.TransposeConv2D

type neuron_typ = {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable b : Optimise.Algodiff.t;
  3. mutable kernel : int array;
  4. mutable stride : int array;
  5. mutable padding : Owl_types.padding;
  6. mutable init_typ : Init.typ;
  7. mutable in_shape : int array;
  8. mutable out_shape : int array;
}

Neuron type definition.

val create : - ?inputs:int array -> - Owl_types.padding -> - int array -> - int array -> - Init.typ -> - neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val init : neuron_typ -> unit

Initialise the neuron and its parameters.

val reset : neuron_typ -> unit

Reset the parameters in a neuron.

val mktag : int -> neuron_typ -> unit

Tag the neuron, used by Algodiff module.

val mkpar : neuron_typ -> Optimise.Algodiff.t array

Assemble all the parameters in an array, used by Optimise module.

val mkpri : neuron_typ -> Optimise.Algodiff.t array

Assemble all the primial values in an array, used by Optimise module.

val mkadj : neuron_typ -> Optimise.Algodiff.t array

Assemble all the adjacent values in an array, used by Optimise module.

val update : neuron_typ -> Optimise.Algodiff.t array -> unit

Update parameters in a neuron, used by Optimise module.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron_sig/module-type-Sig/TransposeConv3D/index.html b/owl-base/Owl_neural_neuron_sig/module-type-Sig/TransposeConv3D/index.html deleted file mode 100644 index e2a389d1f..000000000 --- a/owl-base/Owl_neural_neuron_sig/module-type-Sig/TransposeConv3D/index.html +++ /dev/null @@ -1,8 +0,0 @@ - -TransposeConv3D (owl-base.Owl_neural_neuron_sig.Sig.TransposeConv3D)

Module Sig.TransposeConv3D

type neuron_typ = {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable b : Optimise.Algodiff.t;
  3. mutable kernel : int array;
  4. mutable stride : int array;
  5. mutable padding : Owl_types.padding;
  6. mutable init_typ : Init.typ;
  7. mutable in_shape : int array;
  8. mutable out_shape : int array;
}

Neuron type definition.

val create : - ?inputs:int array -> - Owl_types.padding -> - int array -> - int array -> - Init.typ -> - neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val init : neuron_typ -> unit

Initialise the neuron and its parameters.

val reset : neuron_typ -> unit

Reset the parameters in a neuron.

val mktag : int -> neuron_typ -> unit

Tag the neuron, used by Algodiff module.

val mkpar : neuron_typ -> Optimise.Algodiff.t array

Assemble all the parameters in an array, used by Optimise module.

val mkpri : neuron_typ -> Optimise.Algodiff.t array

Assemble all the primial values in an array, used by Optimise module.

val mkadj : neuron_typ -> Optimise.Algodiff.t array

Assemble all the adjacent values in an array, used by Optimise module.

val update : neuron_typ -> Optimise.Algodiff.t array -> unit

Update parameters in a neuron, used by Optimise module.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron_sig/module-type-Sig/UpSampling1D/index.html b/owl-base/Owl_neural_neuron_sig/module-type-Sig/UpSampling1D/index.html deleted file mode 100644 index b553e4940..000000000 --- a/owl-base/Owl_neural_neuron_sig/module-type-Sig/UpSampling1D/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -UpSampling1D (owl-base.Owl_neural_neuron_sig.Sig.UpSampling1D)

Module Sig.UpSampling1D

\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron_sig/module-type-Sig/UpSampling2D/index.html b/owl-base/Owl_neural_neuron_sig/module-type-Sig/UpSampling2D/index.html deleted file mode 100644 index 769a36254..000000000 --- a/owl-base/Owl_neural_neuron_sig/module-type-Sig/UpSampling2D/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -UpSampling2D (owl-base.Owl_neural_neuron_sig.Sig.UpSampling2D)

Module Sig.UpSampling2D

type neuron_typ = {
  1. mutable size : int array;
  2. mutable in_shape : int array;
  3. mutable out_shape : int array;
}

Neuron type definition.

val create : int array -> neuron_typ

Create the neuron.

val connect : int array -> neuron_typ -> unit

Connect this neuron to others in a neural network.

val copy : neuron_typ -> neuron_typ

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron_typ -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : unit -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron_sig/module-type-Sig/UpSampling3D/index.html b/owl-base/Owl_neural_neuron_sig/module-type-Sig/UpSampling3D/index.html deleted file mode 100644 index c43162d9f..000000000 --- a/owl-base/Owl_neural_neuron_sig/module-type-Sig/UpSampling3D/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -UpSampling3D (owl-base.Owl_neural_neuron_sig.Sig.UpSampling3D)

Module Sig.UpSampling3D

\ No newline at end of file diff --git a/owl-base/Owl_neural_neuron_sig/module-type-Sig/index.html b/owl-base/Owl_neural_neuron_sig/module-type-Sig/index.html deleted file mode 100644 index 8413aa8a1..000000000 --- a/owl-base/Owl_neural_neuron_sig/module-type-Sig/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Sig (owl-base.Owl_neural_neuron_sig.Sig)

Module type Owl_neural_neuron_sig.Sig

Init neuron
module Init : sig ... end
Input neuron
module Input : sig ... end
Activation neuron
module Activation : sig ... end
Linear neuron
module Linear : sig ... end
LinearNoBias neuron
module LinearNoBias : sig ... end
Recurrent neuron
module Recurrent : sig ... end
LSTM neuron
module LSTM : sig ... end
GRU neuron
module GRU : sig ... end
Conv1D neuron
module Conv1D : sig ... end
Conv2D neuron
module Conv2D : sig ... end
Conv3D neuron
module Conv3D : sig ... end
DilatedConv1D neuron
module DilatedConv1D : sig ... end
DilatedConv2D neuron
module DilatedConv2D : sig ... end
DilatedConv3D neuron
module DilatedConv3D : sig ... end
TransposeConv1D neuron
module TransposeConv1D : sig ... end
TransposeConv2D neuron
module TransposeConv2D : sig ... end
TransposeConv3D neuron
module TransposeConv3D : sig ... end
FullyConnected neuron
module FullyConnected : sig ... end
MaxPool1D neuron
module MaxPool1D : sig ... end
MaxPool2D neuron
module MaxPool2D : sig ... end
AvgPool1D neuron
module AvgPool1D : sig ... end
AvgPool2D neuron
module AvgPool2D : sig ... end
GlobalMaxPool1D neuron
module GlobalMaxPool1D : sig ... end
GlobalMaxPool2D neuron
module GlobalMaxPool2D : sig ... end
GlobalAvgPool1D neuron
module GlobalAvgPool1D : sig ... end
GlobalAvgPool2D neuron
module GlobalAvgPool2D : sig ... end
UpSampling1D neuron
module UpSampling1D : sig ... end
UpSampling2D neuron
module UpSampling2D : sig ... end
UpSampling3D neuron
module UpSampling3D : sig ... end
Padding1D neuron
module Padding1D : sig ... end
Padding2D neuron
module Padding2D : sig ... end
Padding3D neuron
module Padding3D : sig ... end
Lambda neuron
module Lambda : sig ... end
LambdaArray neuron
module LambdaArray : sig ... end
Dropout neuron
module Dropout : sig ... end
Reshape neuron
module Reshape : sig ... end
Flatten neuron
module Flatten : sig ... end
Slice neuron
module Slice : sig ... end
Add neuron
module Add : sig ... end
Mul neuron
module Mul : sig ... end
Dot neuron
module Dot : sig ... end
Max neuron
module Max : sig ... end
Average neuron
module Average : sig ... end
Concatenate neuron
module Concatenate : sig ... end
Normalisation neuron
module Normalisation : sig ... end
GaussianNoise neuron
module GaussianNoise : sig ... end
GaussianDropout neuron
module GaussianDropout : sig ... end
AlphaDropout neuron
module AlphaDropout : sig ... end
Embedding neuron
module Embedding : sig ... end
Masking neuron
module Masking : sig ... end
Core functions
type neuron =
  1. | Input of Input.neuron_typ
  2. | Linear of Linear.neuron_typ
  3. | LinearNoBias of LinearNoBias.neuron_typ
  4. | Embedding of Embedding.neuron_typ
  5. | LSTM of LSTM.neuron_typ
  6. | GRU of GRU.neuron_typ
  7. | Recurrent of Recurrent.neuron_typ
  8. | Conv1D of Conv1D.neuron_typ
  9. | Conv2D of Conv2D.neuron_typ
  10. | Conv3D of Conv3D.neuron_typ
  11. | DilatedConv1D of DilatedConv1D.neuron_typ
  12. | DilatedConv2D of DilatedConv2D.neuron_typ
  13. | DilatedConv3D of DilatedConv3D.neuron_typ
  14. | TransposeConv1D of TransposeConv1D.neuron_typ
  15. | TransposeConv2D of TransposeConv2D.neuron_typ
  16. | TransposeConv3D of TransposeConv3D.neuron_typ
  17. | FullyConnected of FullyConnected.neuron_typ
  18. | MaxPool1D of MaxPool1D.neuron_typ
  19. | MaxPool2D of MaxPool2D.neuron_typ
  20. | AvgPool1D of AvgPool1D.neuron_typ
  21. | AvgPool2D of AvgPool2D.neuron_typ
  22. | GlobalMaxPool1D of GlobalMaxPool1D.neuron_typ
  23. | GlobalMaxPool2D of GlobalMaxPool2D.neuron_typ
  24. | GlobalAvgPool1D of GlobalAvgPool1D.neuron_typ
  25. | GlobalAvgPool2D of GlobalAvgPool2D.neuron_typ
  26. | UpSampling2D of UpSampling2D.neuron_typ
  27. | Padding2D of Padding2D.neuron_typ
  28. | Dropout of Dropout.neuron_typ
  29. | Reshape of Reshape.neuron_typ
  30. | Flatten of Flatten.neuron_typ
  31. | Slice of Slice.neuron_typ
  32. | Lambda of Lambda.neuron_typ
  33. | LambdaArray of LambdaArray.neuron_typ
  34. | Activation of Activation.neuron_typ
  35. | GaussianNoise of GaussianNoise.neuron_typ
  36. | GaussianDropout of GaussianDropout.neuron_typ
  37. | AlphaDropout of AlphaDropout.neuron_typ
  38. | Normalisation of Normalisation.neuron_typ
  39. | Add of Add.neuron_typ
  40. | Mul of Mul.neuron_typ
  41. | Dot of Dot.neuron_typ
  42. | Max of Max.neuron_typ
  43. | Average of Average.neuron_typ
  44. | Concatenate of Concatenate.neuron_typ
    (*

    Types of neuron.

    *)
val get_in_out_shape : neuron -> int array * int array

Get both input and output shapes of a neuron.

val get_in_shape : neuron -> int array

Get the input shape of a neuron.

val get_out_shape : neuron -> int array

Get the output shape of a neuron.

val connect : int array array -> neuron -> unit

Connect this neuron to others in a neural network.

val init : neuron -> unit

Initialise the neuron and its parameters.

val reset : neuron -> unit

Reset the parameters in a neuron.

val mktag : int -> neuron -> unit

Tag the neuron, used by Algodiff module.

val mkpar : neuron -> Optimise.Algodiff.t array

Assemble all the trainable parameters in an array, used by Optimise module.

val mkpri : neuron -> Optimise.Algodiff.t array

Assemble all the primal values in an array, used by Optimise module.

val mkadj : neuron -> Optimise.Algodiff.t array

Assemble all the adjacent values in an array, used by Optimise module.

val update : neuron -> Optimise.Algodiff.t array -> unit

Update trainable parameters in a neuron, used by Optimise module.

val load_weights : neuron -> Optimise.Algodiff.t array -> unit

Load both trainable and non-trainable parameters into the neuron.

val save_weights : neuron -> Optimise.Algodiff.t array

Assemble both trainable and non-trainable parameters of the neuron.

val copy : neuron -> neuron

Make a deep copy of the neuron and its parameters.

Execute the computation in this neuron.

val to_string : neuron -> string

Convert the neuron to its string representation. The string is often a summary of the parameters defined in the neuron.

val to_name : neuron -> string

Return the name of the neuron.

\ No newline at end of file diff --git a/owl-base/Owl_numdiff_generic/.dummy b/owl-base/Owl_numdiff_generic/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl-base/Owl_numdiff_generic/Make/argument-1-A/index.html b/owl-base/Owl_numdiff_generic/Make/argument-1-A/index.html deleted file mode 100644 index 10e353954..000000000 --- a/owl-base/Owl_numdiff_generic/Make/argument-1-A/index.html +++ /dev/null @@ -1,158 +0,0 @@ - -A (owl-base.Owl_numdiff_generic.Make.A)

Parameter Make.A

include Owl_types_ndarray_numdiff.Sig with type elt = float
include Owl_types_ndarray_basic.Sig with type elt = float
type arr
type elt = float
val empty : int array -> arr
val zeros : int array -> arr
val ones : int array -> arr
val create : int array -> elt -> arr
val sequential : ?a:elt -> ?step:elt -> int array -> arr
val uniform : ?a:elt -> ?b:elt -> int array -> arr
val gaussian : ?mu:elt -> ?sigma:elt -> int array -> arr
val bernoulli : ?p:elt -> int array -> arr
val init : int array -> (int -> elt) -> arr
val init_nd : int array -> (int array -> elt) -> arr
val shape : arr -> int array
val numel : arr -> int
val get : arr -> int array -> elt
val set : arr -> int array -> elt -> unit
val get_slice : int list list -> arr -> arr
val set_slice : int list list -> arr -> arr -> unit
val get_fancy : Owl_types_common.index list -> arr -> arr
val set_fancy : Owl_types_common.index list -> arr -> arr -> unit
val copy : arr -> arr
val copy_ : out:arr -> arr -> unit
val reset : arr -> unit
val reshape : arr -> int array -> arr
val reverse : arr -> arr
val tile : arr -> int array -> arr
val repeat : arr -> int array -> arr
val concatenate : ?axis:int -> arr array -> arr
val stack : ?axis:int -> arr array -> arr
val split : ?axis:int -> int array -> arr -> arr array
val expand : ?hi:bool -> arr -> int -> arr
val squeeze : ?axis:int array -> arr -> arr
val draw : ?axis:int -> arr -> int -> arr * int array
val map : (elt -> elt) -> arr -> arr
val fold : ?axis:int -> (elt -> elt -> elt) -> elt -> arr -> arr
val scan : ?axis:int -> (elt -> elt -> elt) -> arr -> arr
val one_hot : int -> arr -> arr
val pad : ?v:elt -> int list list -> arr -> arr
val print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(elt -> string) -> - arr -> - unit
val abs : arr -> arr
val neg : arr -> arr
val floor : arr -> arr
val ceil : arr -> arr
val round : arr -> arr
val sqr : arr -> arr
val sqrt : arr -> arr
val log : arr -> arr
val log2 : arr -> arr
val log10 : arr -> arr
val exp : arr -> arr
val sin : arr -> arr
val cos : arr -> arr
val tan : arr -> arr
val sinh : arr -> arr
val cosh : arr -> arr
val tanh : arr -> arr
val asin : arr -> arr
val acos : arr -> arr
val atan : arr -> arr
val asinh : arr -> arr
val acosh : arr -> arr
val atanh : arr -> arr
val min : ?axis:int -> ?keep_dims:bool -> arr -> arr
val max : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum_reduce : ?axis:int array -> arr -> arr
val signum : arr -> arr
val sigmoid : arr -> arr
val relu : arr -> arr
val dawsn : arr -> arr
val min' : arr -> elt
val max' : arr -> elt
val sum' : arr -> elt
val log_sum_exp' : arr -> elt
val log_sum_exp : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm' : arr -> elt
val l2norm' : arr -> elt
val l2norm_sqr' : arr -> elt
val clip_by_value : ?amin:elt -> ?amax:elt -> arr -> arr
val clip_by_l2norm : elt -> arr -> arr
val pow : arr -> arr -> arr
val scalar_pow : elt -> arr -> arr
val pow_scalar : arr -> elt -> arr
val atan2 : arr -> arr -> arr
val scalar_atan2 : elt -> arr -> arr
val atan2_scalar : arr -> elt -> arr
val add : arr -> arr -> arr
val sub : arr -> arr -> arr
val mul : arr -> arr -> arr
val div : arr -> arr -> arr
val add_scalar : arr -> elt -> arr
val sub_scalar : arr -> elt -> arr
val mul_scalar : arr -> elt -> arr
val div_scalar : arr -> elt -> arr
val scalar_add : elt -> arr -> arr
val scalar_sub : elt -> arr -> arr
val scalar_mul : elt -> arr -> arr
val scalar_div : elt -> arr -> arr
val fma : arr -> arr -> arr -> arr
val conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val dilated_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val transpose_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val max_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val upsampling2d : arr -> int array -> arr
val conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val dilated_conv1d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv1d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val transpose_conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val max_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val upsampling2d_backward : arr -> int array -> arr -> arr
val row_num : arr -> int
val col_num : arr -> int
val row : arr -> int -> arr
val rows : arr -> int array -> arr
val copy_row_to : arr -> arr -> int -> unit
val copy_col_to : arr -> arr -> int -> unit
val dot : arr -> arr -> arr
val diag : ?k:int -> arr -> arr
val trace : arr -> elt
val transpose : ?axis:int array -> arr -> arr
val to_rows : arr -> arr array
val of_rows : arr array -> arr
val to_cols : arr -> arr array
val of_cols : arr array -> arr
val of_array : elt array -> int array -> arr
val of_arrays : elt array array -> arr
val float_to_elt : float -> elt
val elt_to_float : elt -> float
val mapi : (int -> elt -> elt) -> arr -> arr
val (+) : arr -> arr -> arr
val (-) : arr -> arr -> arr
val (*) : arr -> arr -> arr
val (/) : arr -> arr -> arr
val (+$) : arr -> elt -> arr
val (-$) : arr -> elt -> arr
val (*$) : arr -> elt -> arr
val (/$) : arr -> elt -> arr
\ No newline at end of file diff --git a/owl-base/Owl_numdiff_generic/Make/index.html b/owl-base/Owl_numdiff_generic/Make/index.html deleted file mode 100644 index 99d157ea7..000000000 --- a/owl-base/Owl_numdiff_generic/Make/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Make (owl-base.Owl_numdiff_generic.Make)

Module Owl_numdiff_generic.Make

Parameters

module A : Owl_types.Ndarray_Numdiff with type elt = float

Signature

type arr = A.arr
type elt = A.elt
val _eps : float
val _ep1 : float
val _ep2 : float
val diff : (float -> float) -> float -> float
val diff' : (float -> float) -> float -> float * float
val diff2 : (float -> float) -> float -> float
val diff2' : (float -> float) -> float -> float * float
val grad' : (A.arr -> A.elt) -> A.arr -> A.arr * A.arr
val grad : (A.arr -> A.elt) -> A.arr -> A.arr
val jacobianT' : (A.arr -> A.arr) -> A.arr -> A.arr * A.arr
val jacobianT : (A.arr -> A.arr) -> A.arr -> A.arr
val jacobian' : (A.arr -> A.arr) -> A.arr -> A.arr * A.arr
val jacobian : (A.arr -> A.arr) -> A.arr -> A.arr
\ No newline at end of file diff --git a/owl-base/Owl_numdiff_generic_sig/.dummy b/owl-base/Owl_numdiff_generic_sig/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl-base/Owl_numdiff_generic_sig/Impl/argument-1-A/index.html b/owl-base/Owl_numdiff_generic_sig/Impl/argument-1-A/index.html deleted file mode 100644 index 7512664e9..000000000 --- a/owl-base/Owl_numdiff_generic_sig/Impl/argument-1-A/index.html +++ /dev/null @@ -1,158 +0,0 @@ - -A (owl-base.Owl_numdiff_generic_sig.Impl.A)

Parameter Impl.A

include Owl_types_ndarray_numdiff.Sig with type elt = float
include Owl_types_ndarray_basic.Sig with type elt = float
type arr
type elt = float
val empty : int array -> arr
val zeros : int array -> arr
val ones : int array -> arr
val create : int array -> elt -> arr
val sequential : ?a:elt -> ?step:elt -> int array -> arr
val uniform : ?a:elt -> ?b:elt -> int array -> arr
val gaussian : ?mu:elt -> ?sigma:elt -> int array -> arr
val bernoulli : ?p:elt -> int array -> arr
val init : int array -> (int -> elt) -> arr
val init_nd : int array -> (int array -> elt) -> arr
val shape : arr -> int array
val numel : arr -> int
val get : arr -> int array -> elt
val set : arr -> int array -> elt -> unit
val get_slice : int list list -> arr -> arr
val set_slice : int list list -> arr -> arr -> unit
val get_fancy : Owl_types_common.index list -> arr -> arr
val set_fancy : Owl_types_common.index list -> arr -> arr -> unit
val copy : arr -> arr
val copy_ : out:arr -> arr -> unit
val reset : arr -> unit
val reshape : arr -> int array -> arr
val reverse : arr -> arr
val tile : arr -> int array -> arr
val repeat : arr -> int array -> arr
val concatenate : ?axis:int -> arr array -> arr
val stack : ?axis:int -> arr array -> arr
val split : ?axis:int -> int array -> arr -> arr array
val expand : ?hi:bool -> arr -> int -> arr
val squeeze : ?axis:int array -> arr -> arr
val draw : ?axis:int -> arr -> int -> arr * int array
val map : (elt -> elt) -> arr -> arr
val fold : ?axis:int -> (elt -> elt -> elt) -> elt -> arr -> arr
val scan : ?axis:int -> (elt -> elt -> elt) -> arr -> arr
val one_hot : int -> arr -> arr
val pad : ?v:elt -> int list list -> arr -> arr
val print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(elt -> string) -> - arr -> - unit
val abs : arr -> arr
val neg : arr -> arr
val floor : arr -> arr
val ceil : arr -> arr
val round : arr -> arr
val sqr : arr -> arr
val sqrt : arr -> arr
val log : arr -> arr
val log2 : arr -> arr
val log10 : arr -> arr
val exp : arr -> arr
val sin : arr -> arr
val cos : arr -> arr
val tan : arr -> arr
val sinh : arr -> arr
val cosh : arr -> arr
val tanh : arr -> arr
val asin : arr -> arr
val acos : arr -> arr
val atan : arr -> arr
val asinh : arr -> arr
val acosh : arr -> arr
val atanh : arr -> arr
val min : ?axis:int -> ?keep_dims:bool -> arr -> arr
val max : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum_reduce : ?axis:int array -> arr -> arr
val signum : arr -> arr
val sigmoid : arr -> arr
val relu : arr -> arr
val dawsn : arr -> arr
val min' : arr -> elt
val max' : arr -> elt
val sum' : arr -> elt
val log_sum_exp' : arr -> elt
val log_sum_exp : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm' : arr -> elt
val l2norm' : arr -> elt
val l2norm_sqr' : arr -> elt
val clip_by_value : ?amin:elt -> ?amax:elt -> arr -> arr
val clip_by_l2norm : elt -> arr -> arr
val pow : arr -> arr -> arr
val scalar_pow : elt -> arr -> arr
val pow_scalar : arr -> elt -> arr
val atan2 : arr -> arr -> arr
val scalar_atan2 : elt -> arr -> arr
val atan2_scalar : arr -> elt -> arr
val add : arr -> arr -> arr
val sub : arr -> arr -> arr
val mul : arr -> arr -> arr
val div : arr -> arr -> arr
val add_scalar : arr -> elt -> arr
val sub_scalar : arr -> elt -> arr
val mul_scalar : arr -> elt -> arr
val div_scalar : arr -> elt -> arr
val scalar_add : elt -> arr -> arr
val scalar_sub : elt -> arr -> arr
val scalar_mul : elt -> arr -> arr
val scalar_div : elt -> arr -> arr
val fma : arr -> arr -> arr -> arr
val conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val dilated_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val transpose_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val max_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val upsampling2d : arr -> int array -> arr
val conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val dilated_conv1d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv1d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val transpose_conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val max_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val upsampling2d_backward : arr -> int array -> arr -> arr
val row_num : arr -> int
val col_num : arr -> int
val row : arr -> int -> arr
val rows : arr -> int array -> arr
val copy_row_to : arr -> arr -> int -> unit
val copy_col_to : arr -> arr -> int -> unit
val dot : arr -> arr -> arr
val diag : ?k:int -> arr -> arr
val trace : arr -> elt
val transpose : ?axis:int array -> arr -> arr
val to_rows : arr -> arr array
val of_rows : arr array -> arr
val to_cols : arr -> arr array
val of_cols : arr array -> arr
val of_array : elt array -> int array -> arr
val of_arrays : elt array array -> arr
val float_to_elt : float -> elt
val elt_to_float : elt -> float
val mapi : (int -> elt -> elt) -> arr -> arr
val (+) : arr -> arr -> arr
val (-) : arr -> arr -> arr
val (*) : arr -> arr -> arr
val (/) : arr -> arr -> arr
val (+$) : arr -> elt -> arr
val (-$) : arr -> elt -> arr
val (*$) : arr -> elt -> arr
val (/$) : arr -> elt -> arr
\ No newline at end of file diff --git a/owl-base/Owl_numdiff_generic_sig/Impl/index.html b/owl-base/Owl_numdiff_generic_sig/Impl/index.html deleted file mode 100644 index c58b07804..000000000 --- a/owl-base/Owl_numdiff_generic_sig/Impl/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Impl (owl-base.Owl_numdiff_generic_sig.Impl)

Module Owl_numdiff_generic_sig.Impl

Parameters

module A : Owl_types.Ndarray_Numdiff with type elt = float

Signature

Type definition
type arr

General ndarray type

type elt

Scalar type

Basic functions
val diff : (elt -> elt) -> elt -> elt

derivative of f : scalar -> scalar.

val diff' : (elt -> elt) -> elt -> elt * elt

derivative of f : scalar -> scalar, return both f x and f' x.

val diff2 : (elt -> elt) -> elt -> elt

second order derivative of f : float -> float.

val diff2' : (elt -> elt) -> elt -> elt * elt

second order derivative of f : float -> float, return f x and f' x.

val grad : (arr -> elt) -> arr -> arr

gradient of f : vector -> scalar.

val grad' : (arr -> elt) -> arr -> arr * arr

gradient of f : vector -> scalar, return f x and g x.

val jacobian : (arr -> arr) -> arr -> arr

jacobian of f : vector -> vector.

val jacobian' : (arr -> arr) -> arr -> arr * arr

jacobian of f : vector -> vector, return f x and j x.

val jacobianT : (arr -> arr) -> arr -> arr

transposed jacobian of f : vector -> vector.

val jacobianT' : (arr -> arr) -> arr -> arr * arr

transposed jacobian of f : vector -> vector, return f x and j x.

\ No newline at end of file diff --git a/owl-base/Owl_numdiff_generic_sig/module-type-Sig/index.html b/owl-base/Owl_numdiff_generic_sig/module-type-Sig/index.html deleted file mode 100644 index 5cb87c4cc..000000000 --- a/owl-base/Owl_numdiff_generic_sig/module-type-Sig/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Sig (owl-base.Owl_numdiff_generic_sig.Sig)

Module type Owl_numdiff_generic_sig.Sig

Type definition
type arr

General ndarray type

type elt

Scalar type

Basic functions
val diff : (elt -> elt) -> elt -> elt

derivative of f : scalar -> scalar.

val diff' : (elt -> elt) -> elt -> elt * elt

derivative of f : scalar -> scalar, return both f x and f' x.

val diff2 : (elt -> elt) -> elt -> elt

second order derivative of f : float -> float.

val diff2' : (elt -> elt) -> elt -> elt * elt

second order derivative of f : float -> float, return f x and f' x.

val grad : (arr -> elt) -> arr -> arr

gradient of f : vector -> scalar.

val grad' : (arr -> elt) -> arr -> arr * arr

gradient of f : vector -> scalar, return f x and g x.

val jacobian : (arr -> arr) -> arr -> arr

jacobian of f : vector -> vector.

val jacobian' : (arr -> arr) -> arr -> arr * arr

jacobian of f : vector -> vector, return f x and j x.

val jacobianT : (arr -> arr) -> arr -> arr

transposed jacobian of f : vector -> vector.

val jacobianT' : (arr -> arr) -> arr -> arr * arr

transposed jacobian of f : vector -> vector, return f x and j x.

\ No newline at end of file diff --git a/owl-base/Owl_operator/.dummy b/owl-base/Owl_operator/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl-base/Owl_operator/Make_Basic/argument-1-M/index.html b/owl-base/Owl_operator/Make_Basic/argument-1-M/index.html deleted file mode 100644 index 813fda53e..000000000 --- a/owl-base/Owl_operator/Make_Basic/argument-1-M/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -M (owl-base.Owl_operator.Make_Basic.M)

Parameter Make_Basic.M

type ('a, 'b) t
val add : ('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t
val sub : ('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t
val mul : ('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t
val div : ('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t
val add_scalar : ('a, 'b) t -> 'a -> ('a, 'b) t
val sub_scalar : ('a, 'b) t -> 'a -> ('a, 'b) t
val mul_scalar : ('a, 'b) t -> 'a -> ('a, 'b) t
val div_scalar : ('a, 'b) t -> 'a -> ('a, 'b) t
val scalar_add : 'a -> ('a, 'b) t -> ('a, 'b) t
val scalar_sub : 'a -> ('a, 'b) t -> ('a, 'b) t
val scalar_mul : 'a -> ('a, 'b) t -> ('a, 'b) t
val scalar_div : 'a -> ('a, 'b) t -> ('a, 'b) t
val equal : ('a, 'b) t -> ('a, 'b) t -> bool
val not_equal : ('a, 'b) t -> ('a, 'b) t -> bool
val greater : ('a, 'b) t -> ('a, 'b) t -> bool
val less : ('a, 'b) t -> ('a, 'b) t -> bool
val greater_equal : ('a, 'b) t -> ('a, 'b) t -> bool
val less_equal : ('a, 'b) t -> ('a, 'b) t -> bool
\ No newline at end of file diff --git a/owl-base/Owl_operator/Make_Basic/index.html b/owl-base/Owl_operator/Make_Basic/index.html deleted file mode 100644 index c602b4e38..000000000 --- a/owl-base/Owl_operator/Make_Basic/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Make_Basic (owl-base.Owl_operator.Make_Basic)

Module Owl_operator.Make_Basic

Parameters

Signature

val (+) : ('a, 'b) M.t -> ('a, 'b) M.t -> ('a, 'b) M.t

Operator of add

val (-) : ('a, 'b) M.t -> ('a, 'b) M.t -> ('a, 'b) M.t

Operator of sub

val (*) : ('a, 'b) M.t -> ('a, 'b) M.t -> ('a, 'b) M.t

Operator of mul

val (/) : ('a, 'b) M.t -> ('a, 'b) M.t -> ('a, 'b) M.t

Operator of div

val (+$) : ('a, 'b) M.t -> 'a -> ('a, 'b) M.t

Operator of add_scalar

val (-$) : ('a, 'b) M.t -> 'a -> ('a, 'b) M.t

Operator of sub_scalar

val (*$) : ('a, 'b) M.t -> 'a -> ('a, 'b) M.t

Operator of mul_scalar

val (/$) : ('a, 'b) M.t -> 'a -> ('a, 'b) M.t

Operator of div_scalar

val ($+) : 'a -> ('a, 'b) M.t -> ('a, 'b) M.t

Operator of scalar_add

val ($-) : 'a -> ('a, 'b) M.t -> ('a, 'b) M.t

Operator of scalar_sub

val ($*) : 'a -> ('a, 'b) M.t -> ('a, 'b) M.t

Operator of scalar_mul

val ($/) : 'a -> ('a, 'b) M.t -> ('a, 'b) M.t

Operator of scalar_div

val (=) : ('a, 'b) M.t -> ('a, 'b) M.t -> bool

Operator of equal

val (!=) : ('a, 'b) M.t -> ('a, 'b) M.t -> bool

Operator of not_equal

val (<>) : ('a, 'b) M.t -> ('a, 'b) M.t -> bool

Operator of not_equal

val (>) : ('a, 'b) M.t -> ('a, 'b) M.t -> bool

Operator of greater

val (<) : ('a, 'b) M.t -> ('a, 'b) M.t -> bool

Operator of less

val (>=) : ('a, 'b) M.t -> ('a, 'b) M.t -> bool

Operator of greater_equal

val (<=) : ('a, 'b) M.t -> ('a, 'b) M.t -> bool

Operator of less_equal

\ No newline at end of file diff --git a/owl-base/Owl_operator/Make_Extend/argument-1-M/index.html b/owl-base/Owl_operator/Make_Extend/argument-1-M/index.html deleted file mode 100644 index e335f3a43..000000000 --- a/owl-base/Owl_operator/Make_Extend/argument-1-M/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -M (owl-base.Owl_operator.Make_Extend.M)

Parameter Make_Extend.M

type ('a, 'b) t
val equal_scalar : ('a, 'b) t -> 'a -> bool
val not_equal_scalar : ('a, 'b) t -> 'a -> bool
val less_scalar : ('a, 'b) t -> 'a -> bool
val greater_scalar : ('a, 'b) t -> 'a -> bool
val less_equal_scalar : ('a, 'b) t -> 'a -> bool
val greater_equal_scalar : ('a, 'b) t -> 'a -> bool
val elt_equal : ('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t
val elt_not_equal : ('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t
val elt_less : ('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t
val elt_greater : ('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t
val elt_less_equal : ('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t
val elt_greater_equal : ('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t
val elt_equal_scalar : ('a, 'b) t -> 'a -> ('a, 'b) t
val elt_not_equal_scalar : ('a, 'b) t -> 'a -> ('a, 'b) t
val elt_less_scalar : ('a, 'b) t -> 'a -> ('a, 'b) t
val elt_greater_scalar : ('a, 'b) t -> 'a -> ('a, 'b) t
val elt_less_equal_scalar : ('a, 'b) t -> 'a -> ('a, 'b) t
val elt_greater_equal_scalar : ('a, 'b) t -> 'a -> ('a, 'b) t
val fmod : (float, 'a) t -> (float, 'a) t -> (float, 'a) t
val fmod_scalar : (float, 'a) t -> float -> (float, 'a) t
val pow : (float, 'a) t -> (float, 'a) t -> (float, 'a) t
val scalar_pow : float -> (float, 'a) t -> (float, 'a) t
val pow_scalar : (float, 'a) t -> float -> (float, 'a) t
val approx_equal : ?eps:float -> ('a, 'b) t -> ('a, 'b) t -> bool
val approx_equal_scalar : ?eps:float -> ('a, 'b) t -> 'a -> bool
val approx_elt_equal : ?eps:float -> ('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t
val approx_elt_equal_scalar : ?eps:float -> ('a, 'b) t -> 'a -> ('a, 'b) t
val add_ : ?out:('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t -> unit
val sub_ : ?out:('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t -> unit
val mul_ : ?out:('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t -> unit
val div_ : ?out:('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t -> unit
val add_scalar_ : ?out:('a, 'b) t -> ('a, 'b) t -> 'a -> unit
val sub_scalar_ : ?out:('a, 'b) t -> ('a, 'b) t -> 'a -> unit
val mul_scalar_ : ?out:('a, 'b) t -> ('a, 'b) t -> 'a -> unit
val div_scalar_ : ?out:('a, 'b) t -> ('a, 'b) t -> 'a -> unit
val concat_vertical : ('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t
val concat_horizontal : ('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t
val get_fancy_ext : Owl_types.index array -> ('a, 'b) t -> ('a, 'b) t
val set_fancy_ext : Owl_types.index array -> ('a, 'b) t -> ('a, 'b) t -> unit
val get_slice_ext : int list array -> ('a, 'b) t -> ('a, 'b) t
val set_slice_ext : int list array -> ('a, 'b) t -> ('a, 'b) t -> unit
\ No newline at end of file diff --git a/owl-base/Owl_operator/Make_Extend/index.html b/owl-base/Owl_operator/Make_Extend/index.html deleted file mode 100644 index 8d27e21b0..000000000 --- a/owl-base/Owl_operator/Make_Extend/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Make_Extend (owl-base.Owl_operator.Make_Extend)

Module Owl_operator.Make_Extend

Parameters

Signature

val (=$) : ('a, 'b) M.t -> 'a -> bool

Operator of equal_scalar

val (!=$) : ('a, 'b) M.t -> 'a -> bool

Operator of not_equal_scalar

val (<>$) : ('a, 'b) M.t -> 'a -> bool

Operator of not_equal_scalar

val (<$) : ('a, 'b) M.t -> 'a -> bool

Operator of less_scalar

val (>$) : ('a, 'b) M.t -> 'a -> bool

Operator of greater_scalar

val (<=$) : ('a, 'b) M.t -> 'a -> bool

Operator of less_equal_scalar

val (>=$) : ('a, 'b) M.t -> 'a -> bool

Operator of greater_equal_scalar

val (=.) : ('a, 'b) M.t -> ('a, 'b) M.t -> ('a, 'b) M.t

Operator of elt_equal

val (!=.) : ('a, 'b) M.t -> ('a, 'b) M.t -> ('a, 'b) M.t

Operator of elt_not_equal

val (<>.) : ('a, 'b) M.t -> ('a, 'b) M.t -> ('a, 'b) M.t

Operator of elt_not_equal

val (<.) : ('a, 'b) M.t -> ('a, 'b) M.t -> ('a, 'b) M.t

Operator of elt_less

val (>.) : ('a, 'b) M.t -> ('a, 'b) M.t -> ('a, 'b) M.t

Operator of elt_greater

val (<=.) : ('a, 'b) M.t -> ('a, 'b) M.t -> ('a, 'b) M.t

Operator of elt_less_equal

val (>=.) : ('a, 'b) M.t -> ('a, 'b) M.t -> ('a, 'b) M.t

Operator of elt_greater_equal

val (=.$) : ('a, 'b) M.t -> 'a -> ('a, 'b) M.t

Operator of elt_equal_scalar

val (!=.$) : ('a, 'b) M.t -> 'a -> ('a, 'b) M.t

Operator of elt_not_equal_scalar

val (<>.$) : ('a, 'b) M.t -> 'a -> ('a, 'b) M.t

Operator of elt_not_equal_scalar

val (<.$) : ('a, 'b) M.t -> 'a -> ('a, 'b) M.t

Operator of elt_less_scalar

val (>.$) : ('a, 'b) M.t -> 'a -> ('a, 'b) M.t

Operator of elt_greater_scalar

val (<=.$) : ('a, 'b) M.t -> 'a -> ('a, 'b) M.t

Operator of elt_less_equal_scalar

val (>=.$) : ('a, 'b) M.t -> 'a -> ('a, 'b) M.t

Operator of elt_greater_equal_scalar

val (=~) : ?eps:float -> ('a, 'b) M.t -> ('a, 'b) M.t -> bool

Operator of approx_equal

val (=~$) : ?eps:float -> ('a, 'b) M.t -> 'a -> bool

Operator of approx_equal_scalar

val (=~.) : ?eps:float -> ('a, 'b) M.t -> ('a, 'b) M.t -> ('a, 'b) M.t

Operator of approx_elt_equal

val (=~.$) : ?eps:float -> ('a, 'b) M.t -> 'a -> ('a, 'b) M.t

Operator of approx_elt_equal_scalar

val (%) : (float, 'a) M.t -> (float, 'a) M.t -> (float, 'a) M.t

Operator of fmod

val (%$) : (float, 'a) M.t -> float -> (float, 'a) M.t

Operator of fmod_scalar

val (**) : (float, 'a) M.t -> (float, 'a) M.t -> (float, 'a) M.t

Operator of pow

val ($**) : float -> (float, 'a) M.t -> (float, 'a) M.t

Operator of scalar_pow

val (**$) : (float, 'a) M.t -> float -> (float, 'a) M.t

Operator of pow_scalar

val (+=) : ('a, 'b) M.t -> ('a, 'b) M.t -> unit

Operator of add_

val (-=) : ('a, 'b) M.t -> ('a, 'b) M.t -> unit

Operator of sub_

val (*=) : ('a, 'b) M.t -> ('a, 'b) M.t -> unit

Operator of mul_

val (/=) : ('a, 'b) M.t -> ('a, 'b) M.t -> unit

Operator of div_

val (+$=) : ('a, 'b) M.t -> 'a -> unit

Operator of add_scalar_

val (-$=) : ('a, 'b) M.t -> 'a -> unit

Operator of sub_scalar_

val (*$=) : ('a, 'b) M.t -> 'a -> unit

Operator of mul_scalar_

val (/$=) : ('a, 'b) M.t -> 'a -> unit

Operator of div_scalar_

val (@=) : ('a, 'b) M.t -> ('a, 'b) M.t -> ('a, 'b) M.t

Operator of concat_vertical

val (@||) : ('a, 'b) M.t -> ('a, 'b) M.t -> ('a, 'b) M.t

Operator of concat_horizontal

val (.!{;..}) : ('a, 'b) M.t -> Owl_types.index array -> ('a, 'b) M.t

Operator of get_fancy

val (.!{;..}<-) : ('a, 'b) M.t -> Owl_types.index array -> ('a, 'b) M.t -> unit

Operator of set_fancy

val (.${}) : ('a, 'b) M.t -> int list -> ('a, 'b) M.t
val (.${;..}) : ('a, 'b) M.t -> int list array -> ('a, 'b) M.t

Operator of get_slice

val (.${}<-) : ('a, 'b) M.t -> int list -> ('a, 'b) M.t -> unit
val (.${;..}<-) : ('a, 'b) M.t -> int list array -> ('a, 'b) M.t -> unit

Operator of set_slice

\ No newline at end of file diff --git a/owl-base/Owl_operator/Make_Linalg/argument-1-M/index.html b/owl-base/Owl_operator/Make_Linalg/argument-1-M/index.html deleted file mode 100644 index 0c207cd71..000000000 --- a/owl-base/Owl_operator/Make_Linalg/argument-1-M/index.html +++ /dev/null @@ -1,7 +0,0 @@ - -M (owl-base.Owl_operator.Make_Linalg.M)

Parameter Make_Linalg.M

type ('a, 'b) t
val mpow : ('a, 'b) t -> float -> ('a, 'b) t
val linsolve : - ?trans:bool -> - ?typ:[ `n | `u | `l ] -> - ('a, 'b) t -> - ('a, 'b) t -> - ('a, 'b) t
\ No newline at end of file diff --git a/owl-base/Owl_operator/Make_Linalg/index.html b/owl-base/Owl_operator/Make_Linalg/index.html deleted file mode 100644 index 3fc1361cc..000000000 --- a/owl-base/Owl_operator/Make_Linalg/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Make_Linalg (owl-base.Owl_operator.Make_Linalg)

Module Owl_operator.Make_Linalg

Parameters

Signature

val (**@) : ('a, 'b) M.t -> float -> ('a, 'b) M.t

Operator of mpow, i.e. matrix power.

val (/@) : ('a, 'b) M.t -> ('a, 'b) M.t -> ('a, 'b) M.t

Operator of linsolve a b, i.e. for solving a linear system a * x = b.

\ No newline at end of file diff --git a/owl-base/Owl_operator/Make_Matrix/argument-1-M/index.html b/owl-base/Owl_operator/Make_Matrix/argument-1-M/index.html deleted file mode 100644 index 85ddcfb43..000000000 --- a/owl-base/Owl_operator/Make_Matrix/argument-1-M/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -M (owl-base.Owl_operator.Make_Matrix.M)

Parameter Make_Matrix.M

type ('a, 'b) t
val get : ('a, 'b) t -> int -> int -> 'a
val set : ('a, 'b) t -> int -> int -> 'a -> unit
val dot : ('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t
\ No newline at end of file diff --git a/owl-base/Owl_operator/Make_Matrix/index.html b/owl-base/Owl_operator/Make_Matrix/index.html deleted file mode 100644 index e9765bfa1..000000000 --- a/owl-base/Owl_operator/Make_Matrix/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Make_Matrix (owl-base.Owl_operator.Make_Matrix)

Module Owl_operator.Make_Matrix

Parameters

Signature

val (*@) : ('a, 'b) M.t -> ('a, 'b) M.t -> ('a, 'b) M.t

Operator of dot a b, i.e. matrix multiplication a * b.

val (.%{}) : ('a, 'b) M.t -> (int * int) -> 'a
val (.%{;..}) : ('a, 'b) M.t -> int array -> 'a

Operator of get

val (.%{}<-) : ('a, 'b) M.t -> (int * int) -> 'a -> unit
val (.%{;..}<-) : ('a, 'b) M.t -> int array -> 'a -> unit

Operator of set

\ No newline at end of file diff --git a/owl-base/Owl_operator/Make_Ndarray/argument-1-M/index.html b/owl-base/Owl_operator/Make_Ndarray/argument-1-M/index.html deleted file mode 100644 index 9c4c6df4d..000000000 --- a/owl-base/Owl_operator/Make_Ndarray/argument-1-M/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -M (owl-base.Owl_operator.Make_Ndarray.M)

Parameter Make_Ndarray.M

type ('a, 'b) t
val get : ('a, 'b) t -> int array -> 'a
val set : ('a, 'b) t -> int array -> 'a -> unit
\ No newline at end of file diff --git a/owl-base/Owl_operator/Make_Ndarray/index.html b/owl-base/Owl_operator/Make_Ndarray/index.html deleted file mode 100644 index 54ce7f64d..000000000 --- a/owl-base/Owl_operator/Make_Ndarray/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Make_Ndarray (owl-base.Owl_operator.Make_Ndarray)

Module Owl_operator.Make_Ndarray

Parameters

Signature

val (.%{}) : ('a, 'b) M.t -> int -> 'a
val (.%{;..}) : ('a, 'b) M.t -> int array -> 'a

Operator of get

val (.%{}<-) : ('a, 'b) M.t -> int -> 'a -> unit
val (.%{;..}<-) : ('a, 'b) M.t -> int array -> 'a -> unit

Operator of set

\ No newline at end of file diff --git a/owl-base/Owl_optimise_generic/.dummy b/owl-base/Owl_optimise_generic/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl-base/Owl_optimise_generic/Make/Batch/index.html b/owl-base/Owl_optimise_generic/Make/Batch/index.html deleted file mode 100644 index 34a073074..000000000 --- a/owl-base/Owl_optimise_generic/Make/Batch/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Batch (owl-base.Owl_optimise_generic.Make.Batch)

Module Make.Batch

type typ =
  1. | Full
  2. | Mini of int
  3. | Sample of int
  4. | Stochastic
val run : typ -> Algodiff.t -> Algodiff.t -> int -> Algodiff.t * Algodiff.t
val batches : typ -> Algodiff.t -> int
val to_string : typ -> string
\ No newline at end of file diff --git a/owl-base/Owl_optimise_generic/Make/Checkpoint/index.html b/owl-base/Owl_optimise_generic/Make/Checkpoint/index.html deleted file mode 100644 index c4d74b390..000000000 --- a/owl-base/Owl_optimise_generic/Make/Checkpoint/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Checkpoint (owl-base.Owl_optimise_generic.Make.Checkpoint)

Module Make.Checkpoint

type state = {
  1. mutable current_batch : int;
  2. mutable batches_per_epoch : int;
  3. mutable epochs : float;
  4. mutable batches : int;
  5. mutable loss : Algodiff.t array;
  6. mutable start_at : float;
  7. mutable stop : bool;
  8. mutable gs : Algodiff.t array array;
  9. mutable ps : Algodiff.t array array;
  10. mutable us : Algodiff.t array array;
  11. mutable ch : Algodiff.t array array array;
}
type typ =
  1. | Batch of int
  2. | Epoch of float
  3. | Custom of state -> unit
  4. | None
val init_state : int -> float -> state
val default_checkpoint_fun : (string -> 'a) -> 'b
val print_state_info : state -> unit
val print_summary : state -> unit
val run : typ -> (string -> unit) -> int -> Algodiff.t -> state -> unit
val to_string : typ -> string
\ No newline at end of file diff --git a/owl-base/Owl_optimise_generic/Make/Clipping/index.html b/owl-base/Owl_optimise_generic/Make/Clipping/index.html deleted file mode 100644 index 7dc5d33e1..000000000 --- a/owl-base/Owl_optimise_generic/Make/Clipping/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Clipping (owl-base.Owl_optimise_generic.Make.Clipping)

Module Make.Clipping

type typ =
  1. | L2norm of float
  2. | Value of float * float
  3. | None
val run : typ -> Algodiff.t -> Algodiff.t
val default : typ -> typ
val to_string : typ -> string
\ No newline at end of file diff --git a/owl-base/Owl_optimise_generic/Make/Gradient/index.html b/owl-base/Owl_optimise_generic/Make/Gradient/index.html deleted file mode 100644 index a20c73d96..000000000 --- a/owl-base/Owl_optimise_generic/Make/Gradient/index.html +++ /dev/null @@ -1,9 +0,0 @@ - -Gradient (owl-base.Owl_optimise_generic.Make.Gradient)

Module Make.Gradient

type typ =
  1. | GD
  2. | CG
  3. | CD
  4. | NonlinearCG
  5. | DaiYuanCG
  6. | NewtonCG
  7. | Newton
val run : - typ -> - (Algodiff.t -> Algodiff.t) -> - Algodiff.t -> - Algodiff.t -> - Algodiff.t -> - Algodiff.t -> - Algodiff.t
val to_string : typ -> string
\ No newline at end of file diff --git a/owl-base/Owl_optimise_generic/Make/Learning_Rate/index.html b/owl-base/Owl_optimise_generic/Make/Learning_Rate/index.html deleted file mode 100644 index 9c21f0dc6..000000000 --- a/owl-base/Owl_optimise_generic/Make/Learning_Rate/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Learning_Rate (owl-base.Owl_optimise_generic.Make.Learning_Rate)

Module Make.Learning_Rate

type typ =
  1. | Adagrad of float
  2. | Const of float
  3. | Decay of float * float
  4. | Exp_decay of float * float
  5. | RMSprop of float * float
  6. | Adam of float * float * float
  7. | Schedule of float array
val run : typ -> int -> Algodiff.t -> Algodiff.t array -> Algodiff.t
val default : typ -> typ
val update_ch : typ -> Algodiff.t -> Algodiff.t array -> Algodiff.t array
val to_string : typ -> string
\ No newline at end of file diff --git a/owl-base/Owl_optimise_generic/Make/Loss/index.html b/owl-base/Owl_optimise_generic/Make/Loss/index.html deleted file mode 100644 index cff1279cd..000000000 --- a/owl-base/Owl_optimise_generic/Make/Loss/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Loss (owl-base.Owl_optimise_generic.Make.Loss)

Module Make.Loss

type typ =
  1. | Hinge
  2. | L1norm
  3. | L2norm
  4. | Quadratic
  5. | Cross_entropy
  6. | Custom of Algodiff.t -> Algodiff.t -> Algodiff.t
val run : typ -> Algodiff.t -> Algodiff.t -> Algodiff.t
val to_string : typ -> string
\ No newline at end of file diff --git a/owl-base/Owl_optimise_generic/Make/Momentum/index.html b/owl-base/Owl_optimise_generic/Make/Momentum/index.html deleted file mode 100644 index 7a7169fd0..000000000 --- a/owl-base/Owl_optimise_generic/Make/Momentum/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Momentum (owl-base.Owl_optimise_generic.Make.Momentum)

Module Make.Momentum

type typ =
  1. | Standard of float
  2. | Nesterov of float
  3. | None
val run : typ -> Algodiff.t -> Algodiff.t -> Algodiff.t
val default : typ -> typ
val to_string : typ -> string
\ No newline at end of file diff --git a/owl-base/Owl_optimise_generic/Make/Params/index.html b/owl-base/Owl_optimise_generic/Make/Params/index.html deleted file mode 100644 index 0cfd71862..000000000 --- a/owl-base/Owl_optimise_generic/Make/Params/index.html +++ /dev/null @@ -1,14 +0,0 @@ - -Params (owl-base.Owl_optimise_generic.Make.Params)

Module Make.Params

type typ = {
  1. mutable epochs : float;
  2. mutable batch : Batch.typ;
  3. mutable gradient : Gradient.typ;
  4. mutable loss : Loss.typ;
  5. mutable learning_rate : Learning_Rate.typ;
  6. mutable regularisation : Regularisation.typ;
  7. mutable momentum : Momentum.typ;
  8. mutable clipping : Clipping.typ;
  9. mutable stopping : Stopping.typ;
  10. mutable checkpoint : Checkpoint.typ;
  11. mutable verbosity : bool;
}
val default : unit -> typ
val config : - ?batch:Batch.typ -> - ?gradient:Gradient.typ -> - ?loss:Loss.typ -> - ?learning_rate:Learning_Rate.typ -> - ?regularisation:Regularisation.typ -> - ?momentum:Momentum.typ -> - ?clipping:Clipping.typ -> - ?stopping:Stopping.typ -> - ?checkpoint:Checkpoint.typ -> - ?verbosity:bool -> - float -> - typ
val to_string : typ -> string
\ No newline at end of file diff --git a/owl-base/Owl_optimise_generic/Make/Regularisation/index.html b/owl-base/Owl_optimise_generic/Make/Regularisation/index.html deleted file mode 100644 index 58e81bbc8..000000000 --- a/owl-base/Owl_optimise_generic/Make/Regularisation/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Regularisation (owl-base.Owl_optimise_generic.Make.Regularisation)

Module Make.Regularisation

type typ =
  1. | L1norm of float
  2. | L2norm of float
  3. | Elastic_net of float * float
  4. | None
val run : typ -> Algodiff.t -> Algodiff.t
val to_string : typ -> string
\ No newline at end of file diff --git a/owl-base/Owl_optimise_generic/Make/Stopping/index.html b/owl-base/Owl_optimise_generic/Make/Stopping/index.html deleted file mode 100644 index 634e20ce3..000000000 --- a/owl-base/Owl_optimise_generic/Make/Stopping/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Stopping (owl-base.Owl_optimise_generic.Make.Stopping)

Module Make.Stopping

type typ =
  1. | Const of float
  2. | Early of int * int
  3. | None
val run : typ -> float -> bool
val default : typ -> typ
val to_string : typ -> string
\ No newline at end of file diff --git a/owl-base/Owl_optimise_generic/Make/Utils/index.html b/owl-base/Owl_optimise_generic/Make/Utils/index.html deleted file mode 100644 index 8c37c3dd2..000000000 --- a/owl-base/Owl_optimise_generic/Make/Utils/index.html +++ /dev/null @@ -1,7 +0,0 @@ - -Utils (owl-base.Owl_optimise_generic.Make.Utils)

Module Make.Utils

val sample_num : Algodiff.t -> int
val draw_samples : Algodiff.t -> Algodiff.t -> int -> Algodiff.t * Algodiff.t
val get_chunk : - Algodiff.t -> - Algodiff.t -> - int -> - int -> - Algodiff.t * Algodiff.t
\ No newline at end of file diff --git a/owl-base/Owl_optimise_generic/Make/argument-1-Algodiff/A/Linalg/index.html b/owl-base/Owl_optimise_generic/Make/argument-1-Algodiff/A/Linalg/index.html deleted file mode 100644 index f40a7aa3b..000000000 --- a/owl-base/Owl_optimise_generic/Make/argument-1-Algodiff/A/Linalg/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -Linalg (owl-base.Owl_optimise_generic.Make.Algodiff.A.Linalg)

Module A.Linalg

val inv : arr -> arr
val logdet : arr -> elt
val chol : ?upper:bool -> arr -> arr
val svd : ?thin:bool -> arr -> arr * arr * arr
val qr : arr -> arr * arr
val lq : arr -> arr * arr
val sylvester : arr -> arr -> arr -> arr
val lyapunov : arr -> arr -> arr
val discrete_lyapunov : - ?solver:[ `default | `bilinear | `direct ] -> - arr -> - arr -> - arr
val linsolve : ?trans:bool -> ?typ:[ `n | `u | `l ] -> arr -> arr -> arr
val care : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
val dare : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
\ No newline at end of file diff --git a/owl-base/Owl_optimise_generic/Make/argument-1-Algodiff/A/Mat/index.html b/owl-base/Owl_optimise_generic/Make/argument-1-Algodiff/A/Mat/index.html deleted file mode 100644 index eb177c987..000000000 --- a/owl-base/Owl_optimise_generic/Make/argument-1-Algodiff/A/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl-base.Owl_optimise_generic.Make.Algodiff.A.Mat)

Module A.Mat

val diagm : ?k:int -> arr -> arr
val triu : ?k:int -> arr -> arr
val tril : ?k:int -> arr -> arr
val eye : int -> arr
\ No newline at end of file diff --git a/owl-base/Owl_optimise_generic/Make/argument-1-Algodiff/A/Scalar/index.html b/owl-base/Owl_optimise_generic/Make/argument-1-Algodiff/A/Scalar/index.html deleted file mode 100644 index 886763be2..000000000 --- a/owl-base/Owl_optimise_generic/Make/argument-1-Algodiff/A/Scalar/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Scalar (owl-base.Owl_optimise_generic.Make.Algodiff.A.Scalar)

Module A.Scalar

val add : elt -> elt -> elt
val sub : elt -> elt -> elt
val mul : elt -> elt -> elt
val div : elt -> elt -> elt
val pow : elt -> elt -> elt
val atan2 : elt -> elt -> elt
val abs : elt -> elt
val neg : elt -> elt
val sqr : elt -> elt
val sqrt : elt -> elt
val exp : elt -> elt
val log : elt -> elt
val log2 : elt -> elt
val log10 : elt -> elt
val signum : elt -> elt
val floor : elt -> elt
val ceil : elt -> elt
val round : elt -> elt
val sin : elt -> elt
val cos : elt -> elt
val tan : elt -> elt
val sinh : elt -> elt
val cosh : elt -> elt
val tanh : elt -> elt
val asin : elt -> elt
val acos : elt -> elt
val atan : elt -> elt
val asinh : elt -> elt
val acosh : elt -> elt
val atanh : elt -> elt
val relu : elt -> elt
val dawsn : elt -> elt
val sigmoid : elt -> elt
\ No newline at end of file diff --git a/owl-base/Owl_optimise_generic/Make/argument-1-Algodiff/A/index.html b/owl-base/Owl_optimise_generic/Make/argument-1-Algodiff/A/index.html deleted file mode 100644 index 406dca810..000000000 --- a/owl-base/Owl_optimise_generic/Make/argument-1-Algodiff/A/index.html +++ /dev/null @@ -1,158 +0,0 @@ - -A (owl-base.Owl_optimise_generic.Make.Algodiff.A)

Module Algodiff.A

include Owl_types_ndarray_eltcmp.Sig
include Owl_types_ndarray_basic.Sig
type arr
type elt
val empty : int array -> arr
val zeros : int array -> arr
val ones : int array -> arr
val create : int array -> elt -> arr
val sequential : ?a:elt -> ?step:elt -> int array -> arr
val uniform : ?a:elt -> ?b:elt -> int array -> arr
val gaussian : ?mu:elt -> ?sigma:elt -> int array -> arr
val bernoulli : ?p:elt -> int array -> arr
val init : int array -> (int -> elt) -> arr
val init_nd : int array -> (int array -> elt) -> arr
val shape : arr -> int array
val numel : arr -> int
val get : arr -> int array -> elt
val set : arr -> int array -> elt -> unit
val get_slice : int list list -> arr -> arr
val set_slice : int list list -> arr -> arr -> unit
val get_fancy : Owl_types_common.index list -> arr -> arr
val set_fancy : Owl_types_common.index list -> arr -> arr -> unit
val copy : arr -> arr
val copy_ : out:arr -> arr -> unit
val reset : arr -> unit
val reshape : arr -> int array -> arr
val reverse : arr -> arr
val tile : arr -> int array -> arr
val repeat : arr -> int array -> arr
val concatenate : ?axis:int -> arr array -> arr
val stack : ?axis:int -> arr array -> arr
val split : ?axis:int -> int array -> arr -> arr array
val expand : ?hi:bool -> arr -> int -> arr
val squeeze : ?axis:int array -> arr -> arr
val draw : ?axis:int -> arr -> int -> arr * int array
val map : (elt -> elt) -> arr -> arr
val fold : ?axis:int -> (elt -> elt -> elt) -> elt -> arr -> arr
val scan : ?axis:int -> (elt -> elt -> elt) -> arr -> arr
val one_hot : int -> arr -> arr
val pad : ?v:elt -> int list list -> arr -> arr
val print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(elt -> string) -> - arr -> - unit
val abs : arr -> arr
val neg : arr -> arr
val floor : arr -> arr
val ceil : arr -> arr
val round : arr -> arr
val sqr : arr -> arr
val sqrt : arr -> arr
val log : arr -> arr
val log2 : arr -> arr
val log10 : arr -> arr
val exp : arr -> arr
val sin : arr -> arr
val cos : arr -> arr
val tan : arr -> arr
val sinh : arr -> arr
val cosh : arr -> arr
val tanh : arr -> arr
val asin : arr -> arr
val acos : arr -> arr
val atan : arr -> arr
val asinh : arr -> arr
val acosh : arr -> arr
val atanh : arr -> arr
val min : ?axis:int -> ?keep_dims:bool -> arr -> arr
val max : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum_reduce : ?axis:int array -> arr -> arr
val signum : arr -> arr
val sigmoid : arr -> arr
val relu : arr -> arr
val dawsn : arr -> arr
val min' : arr -> elt
val max' : arr -> elt
val sum' : arr -> elt
val log_sum_exp' : arr -> elt
val log_sum_exp : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm' : arr -> elt
val l2norm' : arr -> elt
val l2norm_sqr' : arr -> elt
val clip_by_value : ?amin:elt -> ?amax:elt -> arr -> arr
val clip_by_l2norm : elt -> arr -> arr
val pow : arr -> arr -> arr
val scalar_pow : elt -> arr -> arr
val pow_scalar : arr -> elt -> arr
val atan2 : arr -> arr -> arr
val scalar_atan2 : elt -> arr -> arr
val atan2_scalar : arr -> elt -> arr
val add : arr -> arr -> arr
val sub : arr -> arr -> arr
val mul : arr -> arr -> arr
val div : arr -> arr -> arr
val add_scalar : arr -> elt -> arr
val sub_scalar : arr -> elt -> arr
val mul_scalar : arr -> elt -> arr
val div_scalar : arr -> elt -> arr
val scalar_add : elt -> arr -> arr
val scalar_sub : elt -> arr -> arr
val scalar_mul : elt -> arr -> arr
val scalar_div : elt -> arr -> arr
val fma : arr -> arr -> arr -> arr
val conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val dilated_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val transpose_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val max_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val upsampling2d : arr -> int array -> arr
val conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val dilated_conv1d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv1d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val transpose_conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val max_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val upsampling2d_backward : arr -> int array -> arr -> arr
val row_num : arr -> int
val col_num : arr -> int
val row : arr -> int -> arr
val rows : arr -> int array -> arr
val copy_row_to : arr -> arr -> int -> unit
val copy_col_to : arr -> arr -> int -> unit
val dot : arr -> arr -> arr
val diag : ?k:int -> arr -> arr
val trace : arr -> elt
val transpose : ?axis:int array -> arr -> arr
val to_rows : arr -> arr array
val of_rows : arr array -> arr
val to_cols : arr -> arr array
val of_cols : arr array -> arr
val of_array : elt array -> int array -> arr
val of_arrays : elt array array -> arr
val float_to_elt : float -> elt
val elt_to_float : elt -> float
val elt_equal : arr -> arr -> arr
val elt_not_equal : arr -> arr -> arr
val elt_less : arr -> arr -> arr
val elt_greater : arr -> arr -> arr
val elt_less_equal : arr -> arr -> arr
val elt_greater_equal : arr -> arr -> arr
val elt_equal_scalar : arr -> elt -> arr
val elt_not_equal_scalar : arr -> elt -> arr
val elt_less_scalar : arr -> elt -> arr
val elt_greater_scalar : arr -> elt -> arr
val elt_less_equal_scalar : arr -> elt -> arr
val elt_greater_equal_scalar : arr -> elt -> arr
module Scalar : sig ... end
module Mat : sig ... end
module Linalg : sig ... end
\ No newline at end of file diff --git a/owl-base/Owl_optimise_generic/Make/argument-1-Algodiff/Arr/index.html b/owl-base/Owl_optimise_generic/Make/argument-1-Algodiff/Arr/index.html deleted file mode 100644 index 33d5d99e5..000000000 --- a/owl-base/Owl_optimise_generic/Make/argument-1-Algodiff/Arr/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Arr (owl-base.Owl_optimise_generic.Make.Algodiff.Arr)

Module Algodiff.Arr

val empty : int array -> t
val zeros : int array -> t
val ones : int array -> t
val uniform : ?a:A.elt -> ?b:A.elt -> int array -> t
val gaussian : ?mu:A.elt -> ?sigma:A.elt -> int array -> t
val shape : t -> int array
val numel : t -> int
val reset : t -> unit
val reshape : t -> int array -> t
val add : t -> t -> t
val sub : t -> t -> t
val mul : t -> t -> t
val div : t -> t -> t
val dot : t -> t -> t
\ No newline at end of file diff --git a/owl-base/Owl_optimise_generic/Make/argument-1-Algodiff/Builder/index.html b/owl-base/Owl_optimise_generic/Make/argument-1-Algodiff/Builder/index.html deleted file mode 100644 index 0b4e115da..000000000 --- a/owl-base/Owl_optimise_generic/Make/argument-1-Algodiff/Builder/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Builder (owl-base.Owl_optimise_generic.Make.Algodiff.Builder)

Module Algodiff.Builder

Ops Builder
module type Siso = sig ... end
val build_siso : (module Siso) -> t -> t

build single input single output operations

module type Sipo = sig ... end
val build_sipo : (module Sipo) -> t -> t * t

build single input pair outputs operations

module type Sito = sig ... end
val build_sito : (module Sito) -> t -> t * t * t

build single input triple outputs operations

module type Siao = sig ... end
val build_siao : (module Siao) -> t -> t array

build single input array output operations

module type Piso = sig ... end
val build_piso : (module Piso) -> t -> t -> t

build pair inputs single output operations

module type Aiso = sig ... end
val build_aiso : (module Aiso) -> t array -> t

build array input single output operations

\ No newline at end of file diff --git a/owl-base/Owl_optimise_generic/Make/argument-1-Algodiff/Builder/module-type-Aiso/index.html b/owl-base/Owl_optimise_generic/Make/argument-1-Algodiff/Builder/module-type-Aiso/index.html deleted file mode 100644 index 784469c87..000000000 --- a/owl-base/Owl_optimise_generic/Make/argument-1-Algodiff/Builder/module-type-Aiso/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Aiso (owl-base.Owl_optimise_generic.Make.Algodiff.Builder.Aiso)

Module type Builder.Aiso

val label : string
val ff : t array -> t
val df : int list -> t -> t array -> t array -> t
val dr : int list -> t array -> t -> t Stdlib.ref -> t list
\ No newline at end of file diff --git a/owl-base/Owl_optimise_generic/Make/argument-1-Algodiff/Builder/module-type-Piso/index.html b/owl-base/Owl_optimise_generic/Make/argument-1-Algodiff/Builder/module-type-Piso/index.html deleted file mode 100644 index cf8e2dfa9..000000000 --- a/owl-base/Owl_optimise_generic/Make/argument-1-Algodiff/Builder/module-type-Piso/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Piso (owl-base.Owl_optimise_generic.Make.Algodiff.Builder.Piso)

Module type Builder.Piso

val label : string
val ff_aa : A.elt -> A.elt -> t
val ff_ab : A.elt -> A.arr -> t
val ff_ba : A.arr -> A.elt -> t
val ff_bb : A.arr -> A.arr -> t
val df_da : t -> t -> t -> t -> t
val df_db : t -> t -> t -> t -> t
val df_dab : t -> t -> t -> t -> t -> t
val dr_ab : t -> t -> t -> t Stdlib.ref -> t * t
val dr_a : t -> t -> t -> t Stdlib.ref -> t
val dr_b : t -> t -> t -> t Stdlib.ref -> t
\ No newline at end of file diff --git a/owl-base/Owl_optimise_generic/Make/argument-1-Algodiff/Builder/module-type-Siao/index.html b/owl-base/Owl_optimise_generic/Make/argument-1-Algodiff/Builder/module-type-Siao/index.html deleted file mode 100644 index fda2c757d..000000000 --- a/owl-base/Owl_optimise_generic/Make/argument-1-Algodiff/Builder/module-type-Siao/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Siao (owl-base.Owl_optimise_generic.Make.Algodiff.Builder.Siao)

Module type Builder.Siao

val label : string
val ff_f : A.elt -> t array
val ff_arr : A.arr -> t array
val df : t array -> t -> t -> t array
val dr : t -> t -> t Stdlib.ref array -> t Stdlib.ref array -> t
\ No newline at end of file diff --git a/owl-base/Owl_optimise_generic/Make/argument-1-Algodiff/Builder/module-type-Sipo/index.html b/owl-base/Owl_optimise_generic/Make/argument-1-Algodiff/Builder/module-type-Sipo/index.html deleted file mode 100644 index 5c4822e17..000000000 --- a/owl-base/Owl_optimise_generic/Make/argument-1-Algodiff/Builder/module-type-Sipo/index.html +++ /dev/null @@ -1,7 +0,0 @@ - -Sipo (owl-base.Owl_optimise_generic.Make.Algodiff.Builder.Sipo)

Module type Builder.Sipo

val label : string
val ff_f : A.elt -> t * t
val ff_arr : A.arr -> t * t
val df : t -> t -> t -> t
val dr : - t -> - t -> - (t Stdlib.ref * t Stdlib.ref) -> - (t Stdlib.ref * t Stdlib.ref) -> - t
\ No newline at end of file diff --git a/owl-base/Owl_optimise_generic/Make/argument-1-Algodiff/Builder/module-type-Siso/index.html b/owl-base/Owl_optimise_generic/Make/argument-1-Algodiff/Builder/module-type-Siso/index.html deleted file mode 100644 index dbda5bdad..000000000 --- a/owl-base/Owl_optimise_generic/Make/argument-1-Algodiff/Builder/module-type-Siso/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Siso (owl-base.Owl_optimise_generic.Make.Algodiff.Builder.Siso)

Module type Builder.Siso

val label : string
val ff_f : A.elt -> t
val ff_arr : A.arr -> t
val df : t -> t -> t -> t
val dr : t -> t -> t Stdlib.ref -> t
\ No newline at end of file diff --git a/owl-base/Owl_optimise_generic/Make/argument-1-Algodiff/Builder/module-type-Sito/index.html b/owl-base/Owl_optimise_generic/Make/argument-1-Algodiff/Builder/module-type-Sito/index.html deleted file mode 100644 index f4351d19d..000000000 --- a/owl-base/Owl_optimise_generic/Make/argument-1-Algodiff/Builder/module-type-Sito/index.html +++ /dev/null @@ -1,7 +0,0 @@ - -Sito (owl-base.Owl_optimise_generic.Make.Algodiff.Builder.Sito)

Module type Builder.Sito

val label : string
val ff_f : A.elt -> t * t * t
val ff_arr : A.arr -> t * t * t
val df : t -> t -> t -> t
val dr : - t -> - t -> - (t Stdlib.ref * t Stdlib.ref * t Stdlib.ref) -> - (t Stdlib.ref * t Stdlib.ref * t Stdlib.ref) -> - t
\ No newline at end of file diff --git a/owl-base/Owl_optimise_generic/Make/argument-1-Algodiff/Linalg/index.html b/owl-base/Owl_optimise_generic/Make/argument-1-Algodiff/Linalg/index.html deleted file mode 100644 index 2f0fa9012..000000000 --- a/owl-base/Owl_optimise_generic/Make/argument-1-Algodiff/Linalg/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -Linalg (owl-base.Owl_optimise_generic.Make.Algodiff.Linalg)

Module Algodiff.Linalg

val inv : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val logdet : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val chol : ?upper:bool -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val qr : t -> t * t

Refer to :doc:`owl_dense_ndarray_generic`

val lq : t -> t * t

Refer to :doc:`owl_dense_ndarray_generic`

val svd : ?thin:bool -> t -> t * t * t

Refer to :doc:`owl_dense_ndarray_generic`

val sylvester : t -> t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val lyapunov : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val discrete_lyapunov : - ?solver:[ `default | `bilinear | `direct ] -> - t -> - t -> - t

Refer to :doc:`owl_dense_ndarray_generic`

val (/@) : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val linsolve : ?trans:bool -> ?typ:[ `n | `u | `l ] -> t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val care : ?diag_r:bool -> t -> t -> t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val dare : ?diag_r:bool -> t -> t -> t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

\ No newline at end of file diff --git a/owl-base/Owl_optimise_generic/Make/argument-1-Algodiff/Mat/index.html b/owl-base/Owl_optimise_generic/Make/argument-1-Algodiff/Mat/index.html deleted file mode 100644 index 08747a1a0..000000000 --- a/owl-base/Owl_optimise_generic/Make/argument-1-Algodiff/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl-base.Owl_optimise_generic.Make.Algodiff.Mat)

Module Algodiff.Mat

val empty : int -> int -> t
val zeros : int -> int -> t
val eye : int -> t
val ones : int -> int -> t
val uniform : ?a:A.elt -> ?b:A.elt -> int -> int -> t
val gaussian : ?mu:A.elt -> ?sigma:A.elt -> int -> int -> t
val shape : t -> int * int
val numel : t -> int
val row_num : t -> int
val col_num : t -> int
val reset : t -> unit
val reshape : int -> int -> t -> t
val get : t -> int -> int -> t
val set : t -> int -> int -> t -> t
val row : t -> int -> t
val mean : t -> t
val add : t -> t -> t
val sub : t -> t -> t
val mul : t -> t -> t
val div : t -> t -> t
val dot : t -> t -> t
val map_by_row : (t -> t) -> t -> t
val of_arrays : A.elt array array -> t
val init_2d : int -> int -> (int -> int -> t) -> t
val print : t -> unit
\ No newline at end of file diff --git a/owl-base/Owl_optimise_generic/Make/argument-1-Algodiff/Maths/index.html b/owl-base/Owl_optimise_generic/Make/argument-1-Algodiff/Maths/index.html deleted file mode 100644 index 8b068fcbf..000000000 --- a/owl-base/Owl_optimise_generic/Make/argument-1-Algodiff/Maths/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Maths (owl-base.Owl_optimise_generic.Make.Algodiff.Maths)

Module Algodiff.Maths

val (+) : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val (-) : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val (*) : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val (/) : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val (*@) : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val (**) : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val add : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val sub : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val mul : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val div : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val kron : t -> t -> t

Refer to :doc:`owl_dense_matrix_generic`

val dot : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val pow : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val atan2 : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val min2 : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val max2 : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val cross_entropy : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val inv : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val neg : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val abs : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val signum : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val floor : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val ceil : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val round : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val sqr : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val sqrt : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val log : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val log2 : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val log10 : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val exp : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val sin : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val cos : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val tan : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val sinh : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val cosh : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val tanh : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val asin : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val acos : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val atan : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val asinh : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val acosh : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val atanh : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val sum' : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val log_sum_exp' : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val log_sum_exp : ?axis:int -> ?keep_dims:bool -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val sum : ?axis:int -> ?keep_dims:bool -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val sum_reduce : ?axis:int array -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val mean : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val transpose : ?axis:int array -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val swap : int -> int -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val l1norm' : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val l2norm' : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val l2norm_sqr' : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val sigmoid : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val relu : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val dawsn : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val softplus : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val softsign : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val softmax : ?axis:int -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val reshape : t -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val flatten : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val get_item : t -> int -> int -> t

Refer to :doc:`owl_dense_ndarray_generic`

val get_row : t -> int -> t

Refer to :doc:`owl_dense_ndarray_generic`

val concat : axis:int -> t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val split : axis:int -> int array -> t -> t array

Refer to :doc:`owl_dense_ndarray_generic`

val of_arrays : t array array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val to_arrays : t -> t array array

Refer to :doc:`owl_dense_ndarray_generic`

val concatenate : axis:int -> t array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val stack : axis:int -> t array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val get_slice : int list list -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val set_slice : int list list -> t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val get_fancy : Owl_types.index list -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val set_fancy : Owl_types.index list -> t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val diag : ?k:int -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val diagm : ?k:int -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val trace : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val triu : ?k:int -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val tril : ?k:int -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

\ No newline at end of file diff --git a/owl-base/Owl_optimise_generic/Make/argument-1-Algodiff/NN/index.html b/owl-base/Owl_optimise_generic/Make/argument-1-Algodiff/NN/index.html deleted file mode 100644 index 73613537e..000000000 --- a/owl-base/Owl_optimise_generic/Make/argument-1-Algodiff/NN/index.html +++ /dev/null @@ -1,20 +0,0 @@ - -NN (owl-base.Owl_optimise_generic.Make.Algodiff.NN)

Module Algodiff.NN

val dropout : ?rate:float -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val conv1d : ?padding:Owl_types.padding -> t -> t -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val conv2d : ?padding:Owl_types.padding -> t -> t -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val conv3d : ?padding:Owl_types.padding -> t -> t -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val dilated_conv1d : - ?padding:Owl_types.padding -> - t -> - t -> - int array -> - int array -> - t

Refer to :doc:`owl_dense_ndarray_generic`

val dilated_conv2d : - ?padding:Owl_types.padding -> - t -> - t -> - int array -> - int array -> - t

Refer to :doc:`owl_dense_ndarray_generic`

val dilated_conv3d : - ?padding:Owl_types.padding -> - t -> - t -> - int array -> - int array -> - t

Refer to :doc:`owl_dense_ndarray_generic`

val transpose_conv1d : ?padding:Owl_types.padding -> t -> t -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val transpose_conv2d : ?padding:Owl_types.padding -> t -> t -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val transpose_conv3d : ?padding:Owl_types.padding -> t -> t -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val max_pool1d : Owl_types.padding -> t -> int array -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val max_pool2d : Owl_types.padding -> t -> int array -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val max_pool3d : Owl_types.padding -> t -> int array -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val avg_pool1d : Owl_types.padding -> t -> int array -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val avg_pool2d : Owl_types.padding -> t -> int array -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val avg_pool3d : Owl_types.padding -> t -> int array -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val upsampling2d : t -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val pad : ?v:A.elt -> int list list -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

\ No newline at end of file diff --git a/owl-base/Owl_optimise_generic/Make/argument-1-Algodiff/index.html b/owl-base/Owl_optimise_generic/Make/argument-1-Algodiff/index.html deleted file mode 100644 index b6d8d152b..000000000 --- a/owl-base/Owl_optimise_generic/Make/argument-1-Algodiff/index.html +++ /dev/null @@ -1,11 +0,0 @@ - -Algodiff (owl-base.Owl_optimise_generic.Make.Algodiff)

Parameter Make.Algodiff

include Owl_algodiff_core_sig.Sig
Type definition
include Owl_algodiff_types_sig.Sig with type elt := A.elt and type arr := A.arr
type t =
  1. | F of A.elt
  2. | Arr of A.arr
  3. | DF of t * t * int
  4. | DR of t * t Stdlib.ref * op * int Stdlib.ref * int * int Stdlib.ref
and adjoint = t -> t Stdlib.ref -> (t * t) list -> (t * t) list
and register = t list -> t list
and label = string * t list
and op = adjoint * register * label
Core functions
val tag : unit -> int

TODO

val primal : t -> t

TODO

val primal' : t -> t

TODO

val zero : t -> t

TODO

val reset_zero : t -> t

TODO

val tangent : t -> t

TODO

val adjref : t -> t Stdlib.ref

TODO

val adjval : t -> t

TODO

val shape : t -> int array

TODO

val is_float : t -> bool

TODO

val is_arr : t -> bool

TODO

val row_num : t -> int

number of rows

val col_num : t -> int

number of columns

val numel : t -> int

number of elements

val clip_by_value : amin:A.elt -> amax:A.elt -> t -> t

other functions, without tracking gradient

val clip_by_l2norm : A.elt -> t -> t

other functions, without tracking gradient

val copy_primal' : t -> t

TODO

val tile : t -> int array -> t

TODO

val repeat : t -> int array -> t

TODO

val pack_elt : A.elt -> t

convert from elt type to t type.

val unpack_elt : t -> A.elt

convert from t type to elt type.

val pack_flt : float -> t

convert from float type to t type.

val _f : float -> t

A shortcut function for F A.(float_to_elt x).

val unpack_flt : t -> float

convert from t type to float type.

val pack_arr : A.arr -> t

convert from arr type to t type.

val unpack_arr : t -> A.arr

convert from t type to arr type.

val deep_info : t -> string

TODO

val type_info : t -> string

TODO

val error_binop : string -> t -> t -> 'a

TODO

val error_uniop : string -> t -> 'a

TODO

val make_forward : t -> t -> int -> t

TODO

val make_reverse : t -> int -> t

TODO

val reverse_prop : t -> t -> unit

TODO

val diff : (t -> t) -> t -> t

diff f x returns the exat derivative of a function f : scalar -> scalar at point x. Simply calling diff f will return its derivative function g of the same type, i.e. g : scalar -> scalar.

Keep calling this function will give you higher-order derivatives of f, i.e. f |> diff |> diff |> diff |> ...

val diff' : (t -> t) -> t -> t * t

similar to diff, but return (f x, diff f x).

val grad : (t -> t) -> t -> t

gradient of f : (vector -> scalar) at x, reverse ad.

val grad' : (t -> t) -> t -> t * t

similar to grad, but return (f x, grad f x).

val jacobian : (t -> t) -> t -> t

jacobian of f : (vector -> vector) at x, both x and y are row vectors.

val jacobian' : (t -> t) -> t -> t * t

similar to jacobian, but return (f x, jacobian f x)

val jacobianv : (t -> t) -> t -> t -> t

jacobian vector product of f : (vector -> vector) at x along v, forward ad. Namely, it calcultes (jacobian x) v

val jacobianv' : (t -> t) -> t -> t -> t * t

similar to jacobianv', but return (f x, jacobianv f x v)

val jacobianTv : (t -> t) -> t -> t -> t

transposed jacobian vector product of f : (vector -> vector) at x along v, backward ad. Namely, it calculates transpose ((jacobianv f x v)).

val jacobianTv' : (t -> t) -> t -> t -> t * t

similar to jacobianTv, but return (f x, transpose (jacobianv f x v))

val hessian : (t -> t) -> t -> t

hessian of f : (scalar -> scalar) at x.

val hessian' : (t -> t) -> t -> t * t

simiarl to hessian, but return (f x, hessian f x)

val hessianv : (t -> t) -> t -> t -> t

hessian vector product of f : (scalar -> scalar) at x along v. Namely, it calculates (hessian x) v.

val hessianv' : (t -> t) -> t -> t -> t * t

similar to hessianv, but return (f x, hessianv f x v).

val laplacian : (t -> t) -> t -> t

laplacian of f : (scalar -> scalar) at x.

val laplacian' : (t -> t) -> t -> t * t

similar to laplacian, but return (f x, laplacian f x).

val gradhessian : (t -> t) -> t -> t * t

return (grad f x, hessian f x), f : (scalar -> scalar)

val gradhessian' : (t -> t) -> t -> t * t * t

return (f x, grad f x, hessian f x)

val gradhessianv : (t -> t) -> t -> t -> t * t

return (grad f x v, hessian f x v)

val gradhessianv' : (t -> t) -> t -> t -> t * t * t

return (f x, grad f x v, hessian f x v)

include Owl_algodiff_ops_sig.Sig - with type t := t - and type elt := A.elt - and type arr := A.arr - and type op := op
module Builder : - Owl_algodiff_ops_builder_sig.Sig - with type t := t - and type elt := A.elt - and type arr := A.arr - and type op := op
Supported Maths functions
module Maths : sig ... end
Supported Linalg functions
module Linalg : sig ... end
Supported Neural Network functions
module NN : sig ... end
Supported Mat functions
module Mat : sig ... end
Supported Arr functions
module Arr : sig ... end
Helper functions
include Owl_algodiff_graph_convert_sig.Sig with type t := t
val to_trace : t list -> string

to_trace [t0; t1; ...] outputs the trace of computation graph on the terminal in a human-readable format.

val to_dot : t list -> string

to_dot [t0; t1; ...] outputs the trace of computation graph in the dot file format which you can use other tools further visualisation, such as Graphviz.

val pp_num : Stdlib.Format.formatter -> t -> unit

pp_num t pretty prints the abstract number used in Algodiff.

\ No newline at end of file diff --git a/owl-base/Owl_optimise_generic/Make/index.html b/owl-base/Owl_optimise_generic/Make/index.html deleted file mode 100644 index 247350b14..000000000 --- a/owl-base/Owl_optimise_generic/Make/index.html +++ /dev/null @@ -1,31 +0,0 @@ - -Make (owl-base.Owl_optimise_generic.Make)

Module Owl_optimise_generic.Make

Parameters

Signature

module Algodiff = Algodiff
module Utils : sig ... end
module Learning_Rate : sig ... end
module Batch : sig ... end
module Loss : sig ... end
module Gradient : sig ... end
module Momentum : sig ... end
module Regularisation : sig ... end
module Clipping : sig ... end
module Stopping : sig ... end
module Checkpoint : sig ... end
module Params : sig ... end
val minimise_weight : - ?state:Checkpoint.state -> - Params.typ -> - (Algodiff.t -> Algodiff.t -> Algodiff.t) -> - Algodiff.t -> - Algodiff.t -> - Algodiff.t -> - Checkpoint.state * Algodiff.t
val minimise_network : - ?state:Checkpoint.state -> - Params.typ -> - (Algodiff.t -> Algodiff.t * Algodiff.t array array) -> - (Algodiff.t -> Algodiff.t array array * Algodiff.t array array) -> - (Algodiff.t array array -> unit) -> - (string -> unit) -> - Algodiff.t -> - Algodiff.t -> - Checkpoint.state
val minimise_fun : - ?state:Checkpoint.state -> - Params.typ -> - (Algodiff.t -> Algodiff.t) -> - Algodiff.t -> - Checkpoint.state * Algodiff.t
val minimise_compiled_network : - ?state:Checkpoint.state -> - Params.typ -> - (Algodiff.t -> Algodiff.t -> Algodiff.t) -> - (unit -> unit) -> - (string -> unit) -> - Algodiff.t -> - Algodiff.t -> - Checkpoint.state
\ No newline at end of file diff --git a/owl-base/Owl_optimise_generic_sig/.dummy b/owl-base/Owl_optimise_generic_sig/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl-base/Owl_optimise_generic_sig/module-type-Sig/Algodiff/A/Linalg/index.html b/owl-base/Owl_optimise_generic_sig/module-type-Sig/Algodiff/A/Linalg/index.html deleted file mode 100644 index bf4fac49c..000000000 --- a/owl-base/Owl_optimise_generic_sig/module-type-Sig/Algodiff/A/Linalg/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -Linalg (owl-base.Owl_optimise_generic_sig.Sig.Algodiff.A.Linalg)

Module A.Linalg

val inv : arr -> arr
val logdet : arr -> elt
val chol : ?upper:bool -> arr -> arr
val svd : ?thin:bool -> arr -> arr * arr * arr
val qr : arr -> arr * arr
val lq : arr -> arr * arr
val sylvester : arr -> arr -> arr -> arr
val lyapunov : arr -> arr -> arr
val discrete_lyapunov : - ?solver:[ `default | `bilinear | `direct ] -> - arr -> - arr -> - arr
val linsolve : ?trans:bool -> ?typ:[ `n | `u | `l ] -> arr -> arr -> arr
val care : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
val dare : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
\ No newline at end of file diff --git a/owl-base/Owl_optimise_generic_sig/module-type-Sig/Algodiff/A/Mat/index.html b/owl-base/Owl_optimise_generic_sig/module-type-Sig/Algodiff/A/Mat/index.html deleted file mode 100644 index 1f2d6e889..000000000 --- a/owl-base/Owl_optimise_generic_sig/module-type-Sig/Algodiff/A/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl-base.Owl_optimise_generic_sig.Sig.Algodiff.A.Mat)

Module A.Mat

val diagm : ?k:int -> arr -> arr
val triu : ?k:int -> arr -> arr
val tril : ?k:int -> arr -> arr
val eye : int -> arr
\ No newline at end of file diff --git a/owl-base/Owl_optimise_generic_sig/module-type-Sig/Algodiff/A/Scalar/index.html b/owl-base/Owl_optimise_generic_sig/module-type-Sig/Algodiff/A/Scalar/index.html deleted file mode 100644 index 1c591dc7c..000000000 --- a/owl-base/Owl_optimise_generic_sig/module-type-Sig/Algodiff/A/Scalar/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Scalar (owl-base.Owl_optimise_generic_sig.Sig.Algodiff.A.Scalar)

Module A.Scalar

val add : elt -> elt -> elt
val sub : elt -> elt -> elt
val mul : elt -> elt -> elt
val div : elt -> elt -> elt
val pow : elt -> elt -> elt
val atan2 : elt -> elt -> elt
val abs : elt -> elt
val neg : elt -> elt
val sqr : elt -> elt
val sqrt : elt -> elt
val exp : elt -> elt
val log : elt -> elt
val log2 : elt -> elt
val log10 : elt -> elt
val signum : elt -> elt
val floor : elt -> elt
val ceil : elt -> elt
val round : elt -> elt
val sin : elt -> elt
val cos : elt -> elt
val tan : elt -> elt
val sinh : elt -> elt
val cosh : elt -> elt
val tanh : elt -> elt
val asin : elt -> elt
val acos : elt -> elt
val atan : elt -> elt
val asinh : elt -> elt
val acosh : elt -> elt
val atanh : elt -> elt
val relu : elt -> elt
val dawsn : elt -> elt
val sigmoid : elt -> elt
\ No newline at end of file diff --git a/owl-base/Owl_optimise_generic_sig/module-type-Sig/Algodiff/A/index.html b/owl-base/Owl_optimise_generic_sig/module-type-Sig/Algodiff/A/index.html deleted file mode 100644 index 9665bec63..000000000 --- a/owl-base/Owl_optimise_generic_sig/module-type-Sig/Algodiff/A/index.html +++ /dev/null @@ -1,158 +0,0 @@ - -A (owl-base.Owl_optimise_generic_sig.Sig.Algodiff.A)

Module Algodiff.A

include Owl_types_ndarray_eltcmp.Sig
include Owl_types_ndarray_basic.Sig
type arr
type elt
val empty : int array -> arr
val zeros : int array -> arr
val ones : int array -> arr
val create : int array -> elt -> arr
val sequential : ?a:elt -> ?step:elt -> int array -> arr
val uniform : ?a:elt -> ?b:elt -> int array -> arr
val gaussian : ?mu:elt -> ?sigma:elt -> int array -> arr
val bernoulli : ?p:elt -> int array -> arr
val init : int array -> (int -> elt) -> arr
val init_nd : int array -> (int array -> elt) -> arr
val shape : arr -> int array
val numel : arr -> int
val get : arr -> int array -> elt
val set : arr -> int array -> elt -> unit
val get_slice : int list list -> arr -> arr
val set_slice : int list list -> arr -> arr -> unit
val get_fancy : Owl_types_common.index list -> arr -> arr
val set_fancy : Owl_types_common.index list -> arr -> arr -> unit
val copy : arr -> arr
val copy_ : out:arr -> arr -> unit
val reset : arr -> unit
val reshape : arr -> int array -> arr
val reverse : arr -> arr
val tile : arr -> int array -> arr
val repeat : arr -> int array -> arr
val concatenate : ?axis:int -> arr array -> arr
val stack : ?axis:int -> arr array -> arr
val split : ?axis:int -> int array -> arr -> arr array
val expand : ?hi:bool -> arr -> int -> arr
val squeeze : ?axis:int array -> arr -> arr
val draw : ?axis:int -> arr -> int -> arr * int array
val map : (elt -> elt) -> arr -> arr
val fold : ?axis:int -> (elt -> elt -> elt) -> elt -> arr -> arr
val scan : ?axis:int -> (elt -> elt -> elt) -> arr -> arr
val one_hot : int -> arr -> arr
val pad : ?v:elt -> int list list -> arr -> arr
val print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(elt -> string) -> - arr -> - unit
val abs : arr -> arr
val neg : arr -> arr
val floor : arr -> arr
val ceil : arr -> arr
val round : arr -> arr
val sqr : arr -> arr
val sqrt : arr -> arr
val log : arr -> arr
val log2 : arr -> arr
val log10 : arr -> arr
val exp : arr -> arr
val sin : arr -> arr
val cos : arr -> arr
val tan : arr -> arr
val sinh : arr -> arr
val cosh : arr -> arr
val tanh : arr -> arr
val asin : arr -> arr
val acos : arr -> arr
val atan : arr -> arr
val asinh : arr -> arr
val acosh : arr -> arr
val atanh : arr -> arr
val min : ?axis:int -> ?keep_dims:bool -> arr -> arr
val max : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum_reduce : ?axis:int array -> arr -> arr
val signum : arr -> arr
val sigmoid : arr -> arr
val relu : arr -> arr
val dawsn : arr -> arr
val min' : arr -> elt
val max' : arr -> elt
val sum' : arr -> elt
val log_sum_exp' : arr -> elt
val log_sum_exp : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm' : arr -> elt
val l2norm' : arr -> elt
val l2norm_sqr' : arr -> elt
val clip_by_value : ?amin:elt -> ?amax:elt -> arr -> arr
val clip_by_l2norm : elt -> arr -> arr
val pow : arr -> arr -> arr
val scalar_pow : elt -> arr -> arr
val pow_scalar : arr -> elt -> arr
val atan2 : arr -> arr -> arr
val scalar_atan2 : elt -> arr -> arr
val atan2_scalar : arr -> elt -> arr
val add : arr -> arr -> arr
val sub : arr -> arr -> arr
val mul : arr -> arr -> arr
val div : arr -> arr -> arr
val add_scalar : arr -> elt -> arr
val sub_scalar : arr -> elt -> arr
val mul_scalar : arr -> elt -> arr
val div_scalar : arr -> elt -> arr
val scalar_add : elt -> arr -> arr
val scalar_sub : elt -> arr -> arr
val scalar_mul : elt -> arr -> arr
val scalar_div : elt -> arr -> arr
val fma : arr -> arr -> arr -> arr
val conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val dilated_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val transpose_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val max_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val upsampling2d : arr -> int array -> arr
val conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val dilated_conv1d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv1d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val transpose_conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val max_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val upsampling2d_backward : arr -> int array -> arr -> arr
val row_num : arr -> int
val col_num : arr -> int
val row : arr -> int -> arr
val rows : arr -> int array -> arr
val copy_row_to : arr -> arr -> int -> unit
val copy_col_to : arr -> arr -> int -> unit
val dot : arr -> arr -> arr
val diag : ?k:int -> arr -> arr
val trace : arr -> elt
val transpose : ?axis:int array -> arr -> arr
val to_rows : arr -> arr array
val of_rows : arr array -> arr
val to_cols : arr -> arr array
val of_cols : arr array -> arr
val of_array : elt array -> int array -> arr
val of_arrays : elt array array -> arr
val float_to_elt : float -> elt
val elt_to_float : elt -> float
val elt_equal : arr -> arr -> arr
val elt_not_equal : arr -> arr -> arr
val elt_less : arr -> arr -> arr
val elt_greater : arr -> arr -> arr
val elt_less_equal : arr -> arr -> arr
val elt_greater_equal : arr -> arr -> arr
val elt_equal_scalar : arr -> elt -> arr
val elt_not_equal_scalar : arr -> elt -> arr
val elt_less_scalar : arr -> elt -> arr
val elt_greater_scalar : arr -> elt -> arr
val elt_less_equal_scalar : arr -> elt -> arr
val elt_greater_equal_scalar : arr -> elt -> arr
module Scalar : sig ... end
module Mat : sig ... end
module Linalg : sig ... end
\ No newline at end of file diff --git a/owl-base/Owl_optimise_generic_sig/module-type-Sig/Algodiff/Arr/index.html b/owl-base/Owl_optimise_generic_sig/module-type-Sig/Algodiff/Arr/index.html deleted file mode 100644 index 1f4215a0c..000000000 --- a/owl-base/Owl_optimise_generic_sig/module-type-Sig/Algodiff/Arr/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Arr (owl-base.Owl_optimise_generic_sig.Sig.Algodiff.Arr)

Module Algodiff.Arr

val empty : int array -> t
val zeros : int array -> t
val ones : int array -> t
val uniform : ?a:A.elt -> ?b:A.elt -> int array -> t
val gaussian : ?mu:A.elt -> ?sigma:A.elt -> int array -> t
val shape : t -> int array
val numel : t -> int
val reset : t -> unit
val reshape : t -> int array -> t
val add : t -> t -> t
val sub : t -> t -> t
val mul : t -> t -> t
val div : t -> t -> t
val dot : t -> t -> t
\ No newline at end of file diff --git a/owl-base/Owl_optimise_generic_sig/module-type-Sig/Algodiff/Builder/index.html b/owl-base/Owl_optimise_generic_sig/module-type-Sig/Algodiff/Builder/index.html deleted file mode 100644 index 7e0d82bbe..000000000 --- a/owl-base/Owl_optimise_generic_sig/module-type-Sig/Algodiff/Builder/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Builder (owl-base.Owl_optimise_generic_sig.Sig.Algodiff.Builder)

Module Algodiff.Builder

Ops Builder
module type Siso = sig ... end
val build_siso : (module Siso) -> t -> t

build single input single output operations

module type Sipo = sig ... end
val build_sipo : (module Sipo) -> t -> t * t

build single input pair outputs operations

module type Sito = sig ... end
val build_sito : (module Sito) -> t -> t * t * t

build single input triple outputs operations

module type Siao = sig ... end
val build_siao : (module Siao) -> t -> t array

build single input array output operations

module type Piso = sig ... end
val build_piso : (module Piso) -> t -> t -> t

build pair inputs single output operations

module type Aiso = sig ... end
val build_aiso : (module Aiso) -> t array -> t

build array input single output operations

\ No newline at end of file diff --git a/owl-base/Owl_optimise_generic_sig/module-type-Sig/Algodiff/Builder/module-type-Aiso/index.html b/owl-base/Owl_optimise_generic_sig/module-type-Sig/Algodiff/Builder/module-type-Aiso/index.html deleted file mode 100644 index 29cda5594..000000000 --- a/owl-base/Owl_optimise_generic_sig/module-type-Sig/Algodiff/Builder/module-type-Aiso/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Aiso (owl-base.Owl_optimise_generic_sig.Sig.Algodiff.Builder.Aiso)

Module type Builder.Aiso

val label : string
val ff : t array -> t
val df : int list -> t -> t array -> t array -> t
val dr : int list -> t array -> t -> t Stdlib.ref -> t list
\ No newline at end of file diff --git a/owl-base/Owl_optimise_generic_sig/module-type-Sig/Algodiff/Builder/module-type-Piso/index.html b/owl-base/Owl_optimise_generic_sig/module-type-Sig/Algodiff/Builder/module-type-Piso/index.html deleted file mode 100644 index f84af55f1..000000000 --- a/owl-base/Owl_optimise_generic_sig/module-type-Sig/Algodiff/Builder/module-type-Piso/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Piso (owl-base.Owl_optimise_generic_sig.Sig.Algodiff.Builder.Piso)

Module type Builder.Piso

val label : string
val ff_aa : A.elt -> A.elt -> t
val ff_ab : A.elt -> A.arr -> t
val ff_ba : A.arr -> A.elt -> t
val ff_bb : A.arr -> A.arr -> t
val df_da : t -> t -> t -> t -> t
val df_db : t -> t -> t -> t -> t
val df_dab : t -> t -> t -> t -> t -> t
val dr_ab : t -> t -> t -> t Stdlib.ref -> t * t
val dr_a : t -> t -> t -> t Stdlib.ref -> t
val dr_b : t -> t -> t -> t Stdlib.ref -> t
\ No newline at end of file diff --git a/owl-base/Owl_optimise_generic_sig/module-type-Sig/Algodiff/Builder/module-type-Siao/index.html b/owl-base/Owl_optimise_generic_sig/module-type-Sig/Algodiff/Builder/module-type-Siao/index.html deleted file mode 100644 index 7c65a4418..000000000 --- a/owl-base/Owl_optimise_generic_sig/module-type-Sig/Algodiff/Builder/module-type-Siao/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Siao (owl-base.Owl_optimise_generic_sig.Sig.Algodiff.Builder.Siao)

Module type Builder.Siao

val label : string
val ff_f : A.elt -> t array
val ff_arr : A.arr -> t array
val df : t array -> t -> t -> t array
val dr : t -> t -> t Stdlib.ref array -> t Stdlib.ref array -> t
\ No newline at end of file diff --git a/owl-base/Owl_optimise_generic_sig/module-type-Sig/Algodiff/Builder/module-type-Sipo/index.html b/owl-base/Owl_optimise_generic_sig/module-type-Sig/Algodiff/Builder/module-type-Sipo/index.html deleted file mode 100644 index 5cfdb7e9f..000000000 --- a/owl-base/Owl_optimise_generic_sig/module-type-Sig/Algodiff/Builder/module-type-Sipo/index.html +++ /dev/null @@ -1,7 +0,0 @@ - -Sipo (owl-base.Owl_optimise_generic_sig.Sig.Algodiff.Builder.Sipo)

Module type Builder.Sipo

val label : string
val ff_f : A.elt -> t * t
val ff_arr : A.arr -> t * t
val df : t -> t -> t -> t
val dr : - t -> - t -> - (t Stdlib.ref * t Stdlib.ref) -> - (t Stdlib.ref * t Stdlib.ref) -> - t
\ No newline at end of file diff --git a/owl-base/Owl_optimise_generic_sig/module-type-Sig/Algodiff/Builder/module-type-Siso/index.html b/owl-base/Owl_optimise_generic_sig/module-type-Sig/Algodiff/Builder/module-type-Siso/index.html deleted file mode 100644 index 8d974b323..000000000 --- a/owl-base/Owl_optimise_generic_sig/module-type-Sig/Algodiff/Builder/module-type-Siso/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Siso (owl-base.Owl_optimise_generic_sig.Sig.Algodiff.Builder.Siso)

Module type Builder.Siso

val label : string
val ff_f : A.elt -> t
val ff_arr : A.arr -> t
val df : t -> t -> t -> t
val dr : t -> t -> t Stdlib.ref -> t
\ No newline at end of file diff --git a/owl-base/Owl_optimise_generic_sig/module-type-Sig/Algodiff/Builder/module-type-Sito/index.html b/owl-base/Owl_optimise_generic_sig/module-type-Sig/Algodiff/Builder/module-type-Sito/index.html deleted file mode 100644 index 8960fdcd9..000000000 --- a/owl-base/Owl_optimise_generic_sig/module-type-Sig/Algodiff/Builder/module-type-Sito/index.html +++ /dev/null @@ -1,7 +0,0 @@ - -Sito (owl-base.Owl_optimise_generic_sig.Sig.Algodiff.Builder.Sito)

Module type Builder.Sito

val label : string
val ff_f : A.elt -> t * t * t
val ff_arr : A.arr -> t * t * t
val df : t -> t -> t -> t
val dr : - t -> - t -> - (t Stdlib.ref * t Stdlib.ref * t Stdlib.ref) -> - (t Stdlib.ref * t Stdlib.ref * t Stdlib.ref) -> - t
\ No newline at end of file diff --git a/owl-base/Owl_optimise_generic_sig/module-type-Sig/Algodiff/Linalg/index.html b/owl-base/Owl_optimise_generic_sig/module-type-Sig/Algodiff/Linalg/index.html deleted file mode 100644 index ca1700d2c..000000000 --- a/owl-base/Owl_optimise_generic_sig/module-type-Sig/Algodiff/Linalg/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -Linalg (owl-base.Owl_optimise_generic_sig.Sig.Algodiff.Linalg)

Module Algodiff.Linalg

val inv : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val logdet : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val chol : ?upper:bool -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val qr : t -> t * t

Refer to :doc:`owl_dense_ndarray_generic`

val lq : t -> t * t

Refer to :doc:`owl_dense_ndarray_generic`

val svd : ?thin:bool -> t -> t * t * t

Refer to :doc:`owl_dense_ndarray_generic`

val sylvester : t -> t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val lyapunov : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val discrete_lyapunov : - ?solver:[ `default | `bilinear | `direct ] -> - t -> - t -> - t

Refer to :doc:`owl_dense_ndarray_generic`

val (/@) : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val linsolve : ?trans:bool -> ?typ:[ `n | `u | `l ] -> t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val care : ?diag_r:bool -> t -> t -> t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val dare : ?diag_r:bool -> t -> t -> t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

\ No newline at end of file diff --git a/owl-base/Owl_optimise_generic_sig/module-type-Sig/Algodiff/Mat/index.html b/owl-base/Owl_optimise_generic_sig/module-type-Sig/Algodiff/Mat/index.html deleted file mode 100644 index 1432c291c..000000000 --- a/owl-base/Owl_optimise_generic_sig/module-type-Sig/Algodiff/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl-base.Owl_optimise_generic_sig.Sig.Algodiff.Mat)

Module Algodiff.Mat

val empty : int -> int -> t
val zeros : int -> int -> t
val eye : int -> t
val ones : int -> int -> t
val uniform : ?a:A.elt -> ?b:A.elt -> int -> int -> t
val gaussian : ?mu:A.elt -> ?sigma:A.elt -> int -> int -> t
val shape : t -> int * int
val numel : t -> int
val row_num : t -> int
val col_num : t -> int
val reset : t -> unit
val reshape : int -> int -> t -> t
val get : t -> int -> int -> t
val set : t -> int -> int -> t -> t
val row : t -> int -> t
val mean : t -> t
val add : t -> t -> t
val sub : t -> t -> t
val mul : t -> t -> t
val div : t -> t -> t
val dot : t -> t -> t
val map_by_row : (t -> t) -> t -> t
val of_arrays : A.elt array array -> t
val init_2d : int -> int -> (int -> int -> t) -> t
val print : t -> unit
\ No newline at end of file diff --git a/owl-base/Owl_optimise_generic_sig/module-type-Sig/Algodiff/Maths/index.html b/owl-base/Owl_optimise_generic_sig/module-type-Sig/Algodiff/Maths/index.html deleted file mode 100644 index 3cd9deb56..000000000 --- a/owl-base/Owl_optimise_generic_sig/module-type-Sig/Algodiff/Maths/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Maths (owl-base.Owl_optimise_generic_sig.Sig.Algodiff.Maths)

Module Algodiff.Maths

val (+) : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val (-) : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val (*) : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val (/) : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val (*@) : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val (**) : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val add : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val sub : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val mul : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val div : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val kron : t -> t -> t

Refer to :doc:`owl_dense_matrix_generic`

val dot : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val pow : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val atan2 : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val min2 : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val max2 : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val cross_entropy : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val inv : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val neg : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val abs : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val signum : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val floor : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val ceil : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val round : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val sqr : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val sqrt : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val log : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val log2 : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val log10 : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val exp : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val sin : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val cos : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val tan : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val sinh : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val cosh : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val tanh : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val asin : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val acos : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val atan : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val asinh : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val acosh : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val atanh : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val sum' : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val log_sum_exp' : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val log_sum_exp : ?axis:int -> ?keep_dims:bool -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val sum : ?axis:int -> ?keep_dims:bool -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val sum_reduce : ?axis:int array -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val mean : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val transpose : ?axis:int array -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val swap : int -> int -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val l1norm' : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val l2norm' : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val l2norm_sqr' : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val sigmoid : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val relu : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val dawsn : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val softplus : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val softsign : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val softmax : ?axis:int -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val reshape : t -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val flatten : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val get_item : t -> int -> int -> t

Refer to :doc:`owl_dense_ndarray_generic`

val get_row : t -> int -> t

Refer to :doc:`owl_dense_ndarray_generic`

val concat : axis:int -> t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val split : axis:int -> int array -> t -> t array

Refer to :doc:`owl_dense_ndarray_generic`

val of_arrays : t array array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val to_arrays : t -> t array array

Refer to :doc:`owl_dense_ndarray_generic`

val concatenate : axis:int -> t array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val stack : axis:int -> t array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val get_slice : int list list -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val set_slice : int list list -> t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val get_fancy : Owl_types.index list -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val set_fancy : Owl_types.index list -> t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val diag : ?k:int -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val diagm : ?k:int -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val trace : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val triu : ?k:int -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val tril : ?k:int -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

\ No newline at end of file diff --git a/owl-base/Owl_optimise_generic_sig/module-type-Sig/Algodiff/NN/index.html b/owl-base/Owl_optimise_generic_sig/module-type-Sig/Algodiff/NN/index.html deleted file mode 100644 index 7b18f2f63..000000000 --- a/owl-base/Owl_optimise_generic_sig/module-type-Sig/Algodiff/NN/index.html +++ /dev/null @@ -1,20 +0,0 @@ - -NN (owl-base.Owl_optimise_generic_sig.Sig.Algodiff.NN)

Module Algodiff.NN

val dropout : ?rate:float -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val conv1d : ?padding:Owl_types.padding -> t -> t -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val conv2d : ?padding:Owl_types.padding -> t -> t -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val conv3d : ?padding:Owl_types.padding -> t -> t -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val dilated_conv1d : - ?padding:Owl_types.padding -> - t -> - t -> - int array -> - int array -> - t

Refer to :doc:`owl_dense_ndarray_generic`

val dilated_conv2d : - ?padding:Owl_types.padding -> - t -> - t -> - int array -> - int array -> - t

Refer to :doc:`owl_dense_ndarray_generic`

val dilated_conv3d : - ?padding:Owl_types.padding -> - t -> - t -> - int array -> - int array -> - t

Refer to :doc:`owl_dense_ndarray_generic`

val transpose_conv1d : ?padding:Owl_types.padding -> t -> t -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val transpose_conv2d : ?padding:Owl_types.padding -> t -> t -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val transpose_conv3d : ?padding:Owl_types.padding -> t -> t -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val max_pool1d : Owl_types.padding -> t -> int array -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val max_pool2d : Owl_types.padding -> t -> int array -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val max_pool3d : Owl_types.padding -> t -> int array -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val avg_pool1d : Owl_types.padding -> t -> int array -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val avg_pool2d : Owl_types.padding -> t -> int array -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val avg_pool3d : Owl_types.padding -> t -> int array -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val upsampling2d : t -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val pad : ?v:A.elt -> int list list -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

\ No newline at end of file diff --git a/owl-base/Owl_optimise_generic_sig/module-type-Sig/Algodiff/index.html b/owl-base/Owl_optimise_generic_sig/module-type-Sig/Algodiff/index.html deleted file mode 100644 index 1b47d94b3..000000000 --- a/owl-base/Owl_optimise_generic_sig/module-type-Sig/Algodiff/index.html +++ /dev/null @@ -1,11 +0,0 @@ - -Algodiff (owl-base.Owl_optimise_generic_sig.Sig.Algodiff)

Module Sig.Algodiff

include Owl_algodiff_core_sig.Sig
Type definition
include Owl_algodiff_types_sig.Sig with type elt := A.elt and type arr := A.arr
type t =
  1. | F of A.elt
  2. | Arr of A.arr
  3. | DF of t * t * int
  4. | DR of t * t Stdlib.ref * op * int Stdlib.ref * int * int Stdlib.ref
and adjoint = t -> t Stdlib.ref -> (t * t) list -> (t * t) list
and register = t list -> t list
and label = string * t list
and op = adjoint * register * label
Core functions
val tag : unit -> int

TODO

val primal : t -> t

TODO

val primal' : t -> t

TODO

val zero : t -> t

TODO

val reset_zero : t -> t

TODO

val tangent : t -> t

TODO

val adjref : t -> t Stdlib.ref

TODO

val adjval : t -> t

TODO

val shape : t -> int array

TODO

val is_float : t -> bool

TODO

val is_arr : t -> bool

TODO

val row_num : t -> int

number of rows

val col_num : t -> int

number of columns

val numel : t -> int

number of elements

val clip_by_value : amin:A.elt -> amax:A.elt -> t -> t

other functions, without tracking gradient

val clip_by_l2norm : A.elt -> t -> t

other functions, without tracking gradient

val copy_primal' : t -> t

TODO

val tile : t -> int array -> t

TODO

val repeat : t -> int array -> t

TODO

val pack_elt : A.elt -> t

convert from elt type to t type.

val unpack_elt : t -> A.elt

convert from t type to elt type.

val pack_flt : float -> t

convert from float type to t type.

val _f : float -> t

A shortcut function for F A.(float_to_elt x).

val unpack_flt : t -> float

convert from t type to float type.

val pack_arr : A.arr -> t

convert from arr type to t type.

val unpack_arr : t -> A.arr

convert from t type to arr type.

val deep_info : t -> string

TODO

val type_info : t -> string

TODO

val error_binop : string -> t -> t -> 'a

TODO

val error_uniop : string -> t -> 'a

TODO

val make_forward : t -> t -> int -> t

TODO

val make_reverse : t -> int -> t

TODO

val reverse_prop : t -> t -> unit

TODO

val diff : (t -> t) -> t -> t

diff f x returns the exat derivative of a function f : scalar -> scalar at point x. Simply calling diff f will return its derivative function g of the same type, i.e. g : scalar -> scalar.

Keep calling this function will give you higher-order derivatives of f, i.e. f |> diff |> diff |> diff |> ...

val diff' : (t -> t) -> t -> t * t

similar to diff, but return (f x, diff f x).

val grad : (t -> t) -> t -> t

gradient of f : (vector -> scalar) at x, reverse ad.

val grad' : (t -> t) -> t -> t * t

similar to grad, but return (f x, grad f x).

val jacobian : (t -> t) -> t -> t

jacobian of f : (vector -> vector) at x, both x and y are row vectors.

val jacobian' : (t -> t) -> t -> t * t

similar to jacobian, but return (f x, jacobian f x)

val jacobianv : (t -> t) -> t -> t -> t

jacobian vector product of f : (vector -> vector) at x along v, forward ad. Namely, it calcultes (jacobian x) v

val jacobianv' : (t -> t) -> t -> t -> t * t

similar to jacobianv', but return (f x, jacobianv f x v)

val jacobianTv : (t -> t) -> t -> t -> t

transposed jacobian vector product of f : (vector -> vector) at x along v, backward ad. Namely, it calculates transpose ((jacobianv f x v)).

val jacobianTv' : (t -> t) -> t -> t -> t * t

similar to jacobianTv, but return (f x, transpose (jacobianv f x v))

val hessian : (t -> t) -> t -> t

hessian of f : (scalar -> scalar) at x.

val hessian' : (t -> t) -> t -> t * t

simiarl to hessian, but return (f x, hessian f x)

val hessianv : (t -> t) -> t -> t -> t

hessian vector product of f : (scalar -> scalar) at x along v. Namely, it calculates (hessian x) v.

val hessianv' : (t -> t) -> t -> t -> t * t

similar to hessianv, but return (f x, hessianv f x v).

val laplacian : (t -> t) -> t -> t

laplacian of f : (scalar -> scalar) at x.

val laplacian' : (t -> t) -> t -> t * t

similar to laplacian, but return (f x, laplacian f x).

val gradhessian : (t -> t) -> t -> t * t

return (grad f x, hessian f x), f : (scalar -> scalar)

val gradhessian' : (t -> t) -> t -> t * t * t

return (f x, grad f x, hessian f x)

val gradhessianv : (t -> t) -> t -> t -> t * t

return (grad f x v, hessian f x v)

val gradhessianv' : (t -> t) -> t -> t -> t * t * t

return (f x, grad f x v, hessian f x v)

include Owl_algodiff_ops_sig.Sig - with type t := t - and type elt := A.elt - and type arr := A.arr - and type op := op
module Builder : - Owl_algodiff_ops_builder_sig.Sig - with type t := t - and type elt := A.elt - and type arr := A.arr - and type op := op
Supported Maths functions
module Maths : sig ... end
Supported Linalg functions
module Linalg : sig ... end
Supported Neural Network functions
module NN : sig ... end
Supported Mat functions
module Mat : sig ... end
Supported Arr functions
module Arr : sig ... end
Helper functions
include Owl_algodiff_graph_convert_sig.Sig with type t := t
val to_trace : t list -> string

to_trace [t0; t1; ...] outputs the trace of computation graph on the terminal in a human-readable format.

val to_dot : t list -> string

to_dot [t0; t1; ...] outputs the trace of computation graph in the dot file format which you can use other tools further visualisation, such as Graphviz.

val pp_num : Stdlib.Format.formatter -> t -> unit

pp_num t pretty prints the abstract number used in Algodiff.

\ No newline at end of file diff --git a/owl-base/Owl_optimise_generic_sig/module-type-Sig/Batch/index.html b/owl-base/Owl_optimise_generic_sig/module-type-Sig/Batch/index.html deleted file mode 100644 index 744e96490..000000000 --- a/owl-base/Owl_optimise_generic_sig/module-type-Sig/Batch/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Batch (owl-base.Owl_optimise_generic_sig.Sig.Batch)

Module Sig.Batch

Batch module

type typ =
  1. | Full
  2. | Mini of int
  3. | Sample of int
  4. | Stochastic

Types of batches.

val run : typ -> Algodiff.t -> Algodiff.t -> int -> Algodiff.t * Algodiff.t

Execute the computations defined in module typ.

val batches : typ -> Algodiff.t -> int

Return the total number of batches given a batch typ.

val to_string : typ -> string

Convert the module typ to its string representation.

\ No newline at end of file diff --git a/owl-base/Owl_optimise_generic_sig/module-type-Sig/Checkpoint/index.html b/owl-base/Owl_optimise_generic_sig/module-type-Sig/Checkpoint/index.html deleted file mode 100644 index 76c6d9746..000000000 --- a/owl-base/Owl_optimise_generic_sig/module-type-Sig/Checkpoint/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Checkpoint (owl-base.Owl_optimise_generic_sig.Sig.Checkpoint)

Module Sig.Checkpoint

Checkpoint module

type state = {
  1. mutable current_batch : int;
  2. mutable batches_per_epoch : int;
  3. mutable epochs : float;
  4. mutable batches : int;
  5. mutable loss : Algodiff.t array;
  6. mutable start_at : float;
  7. mutable stop : bool;
  8. mutable gs : Algodiff.t array array;
  9. mutable ps : Algodiff.t array array;
  10. mutable us : Algodiff.t array array;
  11. mutable ch : Algodiff.t array array array;
}

Type definition of checkpoint

type typ =
  1. | Batch of int
  2. | Epoch of float
  3. | Custom of state -> unit
  4. | None

Batch type.

val init_state : int -> float -> state

init_state batches_per_epoch epochs initialises a state by specifying the number of batches per epoch and the number of epochs in total.

val default_checkpoint_fun : (string -> 'a) -> 'a

This function is used for saving intermediate files during optimisation.

val print_state_info : state -> unit

Print out the detail information of current state.

val print_summary : state -> unit

Print out the summary of current state.

val run : typ -> (string -> unit) -> int -> Algodiff.t -> state -> unit

Execute the computations defined in module typ.

val to_string : typ -> string

Convert the module typ to its string representation.

\ No newline at end of file diff --git a/owl-base/Owl_optimise_generic_sig/module-type-Sig/Clipping/index.html b/owl-base/Owl_optimise_generic_sig/module-type-Sig/Clipping/index.html deleted file mode 100644 index 4bde3e912..000000000 --- a/owl-base/Owl_optimise_generic_sig/module-type-Sig/Clipping/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Clipping (owl-base.Owl_optimise_generic_sig.Sig.Clipping)

Module Sig.Clipping

Clipping module

type typ =
  1. | L2norm of float
  2. | Value of float * float
  3. | None

Types of clipping functions.

val run : typ -> Algodiff.t -> Algodiff.t

Execute the computations defined in module typ.

val default : typ -> typ

Create module typ with default values.

val to_string : typ -> string

Convert the module typ to its string representation.

\ No newline at end of file diff --git a/owl-base/Owl_optimise_generic_sig/module-type-Sig/Gradient/index.html b/owl-base/Owl_optimise_generic_sig/module-type-Sig/Gradient/index.html deleted file mode 100644 index bda2a183f..000000000 --- a/owl-base/Owl_optimise_generic_sig/module-type-Sig/Gradient/index.html +++ /dev/null @@ -1,9 +0,0 @@ - -Gradient (owl-base.Owl_optimise_generic_sig.Sig.Gradient)

Module Sig.Gradient

Gradient module

type typ =
  1. | GD
  2. | CG
  3. | CD
  4. | NonlinearCG
  5. | DaiYuanCG
  6. | NewtonCG
  7. | Newton

Types of gradient function.

val run : - typ -> - (Algodiff.t -> Algodiff.t) -> - Algodiff.t -> - Algodiff.t -> - Algodiff.t -> - Algodiff.t -> - Algodiff.t

Execute the computations defined in module typ.

val to_string : typ -> string

Convert the module typ to its string representation.

\ No newline at end of file diff --git a/owl-base/Owl_optimise_generic_sig/module-type-Sig/Learning_Rate/index.html b/owl-base/Owl_optimise_generic_sig/module-type-Sig/Learning_Rate/index.html deleted file mode 100644 index 1eae2fe53..000000000 --- a/owl-base/Owl_optimise_generic_sig/module-type-Sig/Learning_Rate/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Learning_Rate (owl-base.Owl_optimise_generic_sig.Sig.Learning_Rate)

Module Sig.Learning_Rate

Strategies for learning rate update

type typ =
  1. | Adagrad of float
  2. | Const of float
  3. | Decay of float * float
  4. | Exp_decay of float * float
  5. | RMSprop of float * float
  6. | Adam of float * float * float
  7. | Schedule of float array

Representation of learning rate update strategies. Possible values include:

  • Adam (alpha, beta1, beta2), see ref for parameter meaning
val run : typ -> int -> Algodiff.t -> Algodiff.t array -> Algodiff.t

Execute the computations defined in module typ.

val default : typ -> typ

Create module typ with default values.

val update_ch : typ -> Algodiff.t -> Algodiff.t array -> Algodiff.t array

Update the cache of gradients.

val to_string : typ -> string

Convert the module typ to its string representation.

\ No newline at end of file diff --git a/owl-base/Owl_optimise_generic_sig/module-type-Sig/Loss/index.html b/owl-base/Owl_optimise_generic_sig/module-type-Sig/Loss/index.html deleted file mode 100644 index 81303e857..000000000 --- a/owl-base/Owl_optimise_generic_sig/module-type-Sig/Loss/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Loss (owl-base.Owl_optimise_generic_sig.Sig.Loss)

Module Sig.Loss

Loss module

type typ =
  1. | Hinge
  2. | L1norm
  3. | L2norm
  4. | Quadratic
  5. | Cross_entropy
  6. | Custom of Algodiff.t -> Algodiff.t -> Algodiff.t

Types of loss functions.

val run : typ -> Algodiff.t -> Algodiff.t -> Algodiff.t

Execute the computations defined in module typ.

val to_string : typ -> string

Convert the module typ to its string representation.

\ No newline at end of file diff --git a/owl-base/Owl_optimise_generic_sig/module-type-Sig/Momentum/index.html b/owl-base/Owl_optimise_generic_sig/module-type-Sig/Momentum/index.html deleted file mode 100644 index c20cee57a..000000000 --- a/owl-base/Owl_optimise_generic_sig/module-type-Sig/Momentum/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Momentum (owl-base.Owl_optimise_generic_sig.Sig.Momentum)

Module Sig.Momentum

Momentum module

type typ =
  1. | Standard of float
  2. | Nesterov of float
  3. | None

Types of momentum functions.

val run : typ -> Algodiff.t -> Algodiff.t -> Algodiff.t

Execute the computations defined in module typ.

val default : typ -> typ

Create module typ with default values.

val to_string : typ -> string

Convert the module typ to its string representation.

\ No newline at end of file diff --git a/owl-base/Owl_optimise_generic_sig/module-type-Sig/Params/index.html b/owl-base/Owl_optimise_generic_sig/module-type-Sig/Params/index.html deleted file mode 100644 index e0f339dea..000000000 --- a/owl-base/Owl_optimise_generic_sig/module-type-Sig/Params/index.html +++ /dev/null @@ -1,14 +0,0 @@ - -Params (owl-base.Owl_optimise_generic_sig.Sig.Params)

Module Sig.Params

Params module

type typ = {
  1. mutable epochs : float;
  2. mutable batch : Batch.typ;
  3. mutable gradient : Gradient.typ;
  4. mutable loss : Loss.typ;
  5. mutable learning_rate : Learning_Rate.typ;
  6. mutable regularisation : Regularisation.typ;
  7. mutable momentum : Momentum.typ;
  8. mutable clipping : Clipping.typ;
  9. mutable stopping : Stopping.typ;
  10. mutable checkpoint : Checkpoint.typ;
  11. mutable verbosity : bool;
}

Type definition of parameter.

val default : unit -> typ

Create module typ with default values.

val config : - ?batch:Batch.typ -> - ?gradient:Gradient.typ -> - ?loss:Loss.typ -> - ?learning_rate:Learning_Rate.typ -> - ?regularisation:Regularisation.typ -> - ?momentum:Momentum.typ -> - ?clipping:Clipping.typ -> - ?stopping:Stopping.typ -> - ?checkpoint:Checkpoint.typ -> - ?verbosity:bool -> - float -> - typ

This function creates a parameter object with many configurations.

val to_string : typ -> string

Convert the module typ to its string representation.

\ No newline at end of file diff --git a/owl-base/Owl_optimise_generic_sig/module-type-Sig/Regularisation/index.html b/owl-base/Owl_optimise_generic_sig/module-type-Sig/Regularisation/index.html deleted file mode 100644 index 17b9f18ba..000000000 --- a/owl-base/Owl_optimise_generic_sig/module-type-Sig/Regularisation/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Regularisation (owl-base.Owl_optimise_generic_sig.Sig.Regularisation)

Module Sig.Regularisation

Regularisation module

type typ =
  1. | L1norm of float
  2. | L2norm of float
  3. | Elastic_net of float * float
  4. | None

Types of regularisation functions.

val run : typ -> Algodiff.t -> Algodiff.t

Execute the computations defined in module typ.

val to_string : typ -> string

Convert the module typ to its string representation.

\ No newline at end of file diff --git a/owl-base/Owl_optimise_generic_sig/module-type-Sig/Stopping/index.html b/owl-base/Owl_optimise_generic_sig/module-type-Sig/Stopping/index.html deleted file mode 100644 index 51be6006d..000000000 --- a/owl-base/Owl_optimise_generic_sig/module-type-Sig/Stopping/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Stopping (owl-base.Owl_optimise_generic_sig.Sig.Stopping)

Module Sig.Stopping

Stopping module

type typ =
  1. | Const of float
  2. | Early of int * int
  3. | None

Types of stopping functions.

val run : typ -> float -> bool

Execute the computations defined in module typ.

val default : typ -> typ

Create module typ with default values.

val to_string : typ -> string

Convert the module typ to its string representation.

\ No newline at end of file diff --git a/owl-base/Owl_optimise_generic_sig/module-type-Sig/Utils/index.html b/owl-base/Owl_optimise_generic_sig/module-type-Sig/Utils/index.html deleted file mode 100644 index 5089bd5b4..000000000 --- a/owl-base/Owl_optimise_generic_sig/module-type-Sig/Utils/index.html +++ /dev/null @@ -1,7 +0,0 @@ - -Utils (owl-base.Owl_optimise_generic_sig.Sig.Utils)

Module Sig.Utils

Utils module

val sample_num : Algodiff.t -> int

Return the total number of samples in passed in ndarray.

val draw_samples : Algodiff.t -> Algodiff.t -> int -> Algodiff.t * Algodiff.t

draw_samples x y draws samples from both x (observations) and y (labels). The samples will be drew along axis 0, so x and y must agree along axis 0.

val get_chunk : - Algodiff.t -> - Algodiff.t -> - int -> - int -> - Algodiff.t * Algodiff.t

get_chunk x y i c gets a continuous chunk of c samples from position i from x (observations) and y (labels).

\ No newline at end of file diff --git a/owl-base/Owl_optimise_generic_sig/module-type-Sig/index.html b/owl-base/Owl_optimise_generic_sig/module-type-Sig/index.html deleted file mode 100644 index 9367c4860..000000000 --- a/owl-base/Owl_optimise_generic_sig/module-type-Sig/index.html +++ /dev/null @@ -1,31 +0,0 @@ - -Sig (owl-base.Owl_optimise_generic_sig.Sig)

Module type Owl_optimise_generic_sig.Sig

module Utils : sig ... end

Utils module

module Learning_Rate : sig ... end

Strategies for learning rate update

module Batch : sig ... end

Batch module

module Loss : sig ... end

Loss module

module Gradient : sig ... end

Gradient module

module Momentum : sig ... end

Momentum module

module Regularisation : sig ... end

Regularisation module

module Clipping : sig ... end

Clipping module

module Stopping : sig ... end

Stopping module

module Checkpoint : sig ... end

Checkpoint module

module Params : sig ... end

Params module

Core functions
val minimise_weight : - ?state:Checkpoint.state -> - Params.typ -> - (Algodiff.t -> Algodiff.t -> Algodiff.t) -> - Algodiff.t -> - Algodiff.t -> - Algodiff.t -> - Checkpoint.state * Algodiff.t

This function minimises the weight w of passed-in function f.

* f is a function f : w -> x -> y. * w is a row vector but y can have any shape.

val minimise_network : - ?state:Checkpoint.state -> - Params.typ -> - (Algodiff.t -> Algodiff.t * Algodiff.t array array) -> - (Algodiff.t -> Algodiff.t array array * Algodiff.t array array) -> - (Algodiff.t array array -> unit) -> - (string -> unit) -> - Algodiff.t -> - Algodiff.t -> - Checkpoint.state

This function is specifically designed for minimising the weights in a neural network of graph structure. In Owl's earlier versions, the functions in the regression module were actually implemented using this function.

val minimise_fun : - ?state:Checkpoint.state -> - Params.typ -> - (Algodiff.t -> Algodiff.t) -> - Algodiff.t -> - Checkpoint.state * Algodiff.t

This function minimises f : x -> y w.r.t x.

x is an ndarray; and y is an scalar value.

val minimise_compiled_network : - ?state:Checkpoint.state -> - Params.typ -> - (Algodiff.t -> Algodiff.t -> Algodiff.t) -> - (unit -> unit) -> - (string -> unit) -> - Algodiff.t -> - Algodiff.t -> - Checkpoint.state

TODO

\ No newline at end of file diff --git a/owl-base/Owl_pretty/.dummy b/owl-base/Owl_pretty/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl-base/Owl_types/.dummy b/owl-base/Owl_types/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl-base/Owl_types/module-type-Computation_Device/A/Linalg/index.html b/owl-base/Owl_types/module-type-Computation_Device/A/Linalg/index.html deleted file mode 100644 index 668823841..000000000 --- a/owl-base/Owl_types/module-type-Computation_Device/A/Linalg/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -Linalg (owl-base.Owl_types.Computation_Device.A.Linalg)

Module A.Linalg

val inv : arr -> arr
val logdet : arr -> elt
val chol : ?upper:bool -> arr -> arr
val svd : ?thin:bool -> arr -> arr * arr * arr
val qr : arr -> arr * arr
val lq : arr -> arr * arr
val sylvester : arr -> arr -> arr -> arr
val lyapunov : arr -> arr -> arr
val discrete_lyapunov : - ?solver:[ `default | `bilinear | `direct ] -> - arr -> - arr -> - arr
val linsolve : ?trans:bool -> ?typ:[ `n | `u | `l ] -> arr -> arr -> arr
val care : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
val dare : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
\ No newline at end of file diff --git a/owl-base/Owl_types/module-type-Computation_Device/A/Mat/index.html b/owl-base/Owl_types/module-type-Computation_Device/A/Mat/index.html deleted file mode 100644 index 31f713f00..000000000 --- a/owl-base/Owl_types/module-type-Computation_Device/A/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl-base.Owl_types.Computation_Device.A.Mat)

Module A.Mat

val diagm : ?k:int -> arr -> arr
val triu : ?k:int -> arr -> arr
val tril : ?k:int -> arr -> arr
val eye : int -> arr
\ No newline at end of file diff --git a/owl-base/Owl_types/module-type-Computation_Device/A/Scalar/index.html b/owl-base/Owl_types/module-type-Computation_Device/A/Scalar/index.html deleted file mode 100644 index bc2be6c04..000000000 --- a/owl-base/Owl_types/module-type-Computation_Device/A/Scalar/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Scalar (owl-base.Owl_types.Computation_Device.A.Scalar)

Module A.Scalar

val add : elt -> elt -> elt
val sub : elt -> elt -> elt
val mul : elt -> elt -> elt
val div : elt -> elt -> elt
val pow : elt -> elt -> elt
val atan2 : elt -> elt -> elt
val abs : elt -> elt
val neg : elt -> elt
val sqr : elt -> elt
val sqrt : elt -> elt
val exp : elt -> elt
val log : elt -> elt
val log2 : elt -> elt
val log10 : elt -> elt
val signum : elt -> elt
val floor : elt -> elt
val ceil : elt -> elt
val round : elt -> elt
val sin : elt -> elt
val cos : elt -> elt
val tan : elt -> elt
val sinh : elt -> elt
val cosh : elt -> elt
val tanh : elt -> elt
val asin : elt -> elt
val acos : elt -> elt
val atan : elt -> elt
val asinh : elt -> elt
val acosh : elt -> elt
val atanh : elt -> elt
val relu : elt -> elt
val dawsn : elt -> elt
val sigmoid : elt -> elt
\ No newline at end of file diff --git a/owl-base/Owl_types/module-type-Computation_Device/A/index.html b/owl-base/Owl_types/module-type-Computation_Device/A/index.html deleted file mode 100644 index aedf59e09..000000000 --- a/owl-base/Owl_types/module-type-Computation_Device/A/index.html +++ /dev/null @@ -1,379 +0,0 @@ - -A (owl-base.Owl_types.Computation_Device.A)

Module Computation_Device.A

include Owl_types_ndarray_algodiff.Sig
include Owl_types_ndarray_eltcmp.Sig
include Owl_types_ndarray_basic.Sig
type arr
type elt
val empty : int array -> arr
val zeros : int array -> arr
val ones : int array -> arr
val create : int array -> elt -> arr
val sequential : ?a:elt -> ?step:elt -> int array -> arr
val uniform : ?a:elt -> ?b:elt -> int array -> arr
val gaussian : ?mu:elt -> ?sigma:elt -> int array -> arr
val bernoulli : ?p:elt -> int array -> arr
val init : int array -> (int -> elt) -> arr
val init_nd : int array -> (int array -> elt) -> arr
val shape : arr -> int array
val numel : arr -> int
val get : arr -> int array -> elt
val set : arr -> int array -> elt -> unit
val get_slice : int list list -> arr -> arr
val set_slice : int list list -> arr -> arr -> unit
val get_fancy : Owl_types_common.index list -> arr -> arr
val set_fancy : Owl_types_common.index list -> arr -> arr -> unit
val copy : arr -> arr
val copy_ : out:arr -> arr -> unit
val reset : arr -> unit
val reshape : arr -> int array -> arr
val reverse : arr -> arr
val tile : arr -> int array -> arr
val repeat : arr -> int array -> arr
val concatenate : ?axis:int -> arr array -> arr
val stack : ?axis:int -> arr array -> arr
val split : ?axis:int -> int array -> arr -> arr array
val expand : ?hi:bool -> arr -> int -> arr
val squeeze : ?axis:int array -> arr -> arr
val draw : ?axis:int -> arr -> int -> arr * int array
val map : (elt -> elt) -> arr -> arr
val fold : ?axis:int -> (elt -> elt -> elt) -> elt -> arr -> arr
val scan : ?axis:int -> (elt -> elt -> elt) -> arr -> arr
val one_hot : int -> arr -> arr
val pad : ?v:elt -> int list list -> arr -> arr
val print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(elt -> string) -> - arr -> - unit
val abs : arr -> arr
val neg : arr -> arr
val floor : arr -> arr
val ceil : arr -> arr
val round : arr -> arr
val sqr : arr -> arr
val sqrt : arr -> arr
val log : arr -> arr
val log2 : arr -> arr
val log10 : arr -> arr
val exp : arr -> arr
val sin : arr -> arr
val cos : arr -> arr
val tan : arr -> arr
val sinh : arr -> arr
val cosh : arr -> arr
val tanh : arr -> arr
val asin : arr -> arr
val acos : arr -> arr
val atan : arr -> arr
val asinh : arr -> arr
val acosh : arr -> arr
val atanh : arr -> arr
val sum_reduce : ?axis:int array -> arr -> arr
val signum : arr -> arr
val sigmoid : arr -> arr
val relu : arr -> arr
val dawsn : arr -> arr
val min' : arr -> elt
val max' : arr -> elt
val sum' : arr -> elt
val log_sum_exp' : arr -> elt
val log_sum_exp : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm' : arr -> elt
val l2norm' : arr -> elt
val l2norm_sqr' : arr -> elt
val clip_by_value : ?amin:elt -> ?amax:elt -> arr -> arr
val clip_by_l2norm : elt -> arr -> arr
val pow : arr -> arr -> arr
val scalar_pow : elt -> arr -> arr
val pow_scalar : arr -> elt -> arr
val atan2 : arr -> arr -> arr
val scalar_atan2 : elt -> arr -> arr
val atan2_scalar : arr -> elt -> arr
val add : arr -> arr -> arr
val sub : arr -> arr -> arr
val mul : arr -> arr -> arr
val div : arr -> arr -> arr
val add_scalar : arr -> elt -> arr
val sub_scalar : arr -> elt -> arr
val mul_scalar : arr -> elt -> arr
val div_scalar : arr -> elt -> arr
val scalar_add : elt -> arr -> arr
val scalar_sub : elt -> arr -> arr
val scalar_mul : elt -> arr -> arr
val scalar_div : elt -> arr -> arr
val fma : arr -> arr -> arr -> arr
val conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val dilated_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val transpose_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val max_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val upsampling2d : arr -> int array -> arr
val conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val dilated_conv1d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv1d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val transpose_conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val max_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val upsampling2d_backward : arr -> int array -> arr -> arr
val row_num : arr -> int
val col_num : arr -> int
val row : arr -> int -> arr
val rows : arr -> int array -> arr
val copy_row_to : arr -> arr -> int -> unit
val copy_col_to : arr -> arr -> int -> unit
val dot : arr -> arr -> arr
val diag : ?k:int -> arr -> arr
val trace : arr -> elt
val transpose : ?axis:int array -> arr -> arr
val to_rows : arr -> arr array
val of_rows : arr array -> arr
val to_cols : arr -> arr array
val of_cols : arr array -> arr
val of_array : elt array -> int array -> arr
val of_arrays : elt array array -> arr
val float_to_elt : float -> elt
val elt_to_float : elt -> float
val elt_equal : arr -> arr -> arr
val elt_not_equal : arr -> arr -> arr
val elt_less : arr -> arr -> arr
val elt_greater : arr -> arr -> arr
val elt_less_equal : arr -> arr -> arr
val elt_greater_equal : arr -> arr -> arr
val elt_equal_scalar : arr -> elt -> arr
val elt_not_equal_scalar : arr -> elt -> arr
val elt_less_scalar : arr -> elt -> arr
val elt_greater_scalar : arr -> elt -> arr
val elt_less_equal_scalar : arr -> elt -> arr
val elt_greater_equal_scalar : arr -> elt -> arr
module Scalar : sig ... end
module Mat : sig ... end
module Linalg : sig ... end
val create_ : out:arr -> elt -> unit
val uniform_ : ?a:elt -> ?b:elt -> out:arr -> unit
val gaussian_ : ?mu:elt -> ?sigma:elt -> out:arr -> unit
val sequential_ : ?a:elt -> ?step:elt -> out:arr -> unit
val bernoulli_ : ?p:elt -> out:arr -> unit
val zeros_ : out:arr -> unit
val ones_ : out:arr -> unit
val one_hot_ : out:arr -> int -> arr -> unit
val get_slice_ : out:arr -> int list list -> arr -> unit
val set_slice_ : out:arr -> int list list -> arr -> arr -> unit
val sub_left : arr -> int -> int -> arr
val reshape_ : out:arr -> arr -> unit
val reverse_ : out:arr -> arr -> unit
val transpose_ : out:arr -> ?axis:int array -> arr -> unit
val repeat_ : out:arr -> arr -> int array -> unit
val tile_ : out:arr -> arr -> int array -> unit
val pad_ : out:arr -> ?v:elt -> int list list -> arr -> unit
val hypot : arr -> arr -> arr
val fmod : arr -> arr -> arr
val min2 : arr -> arr -> arr
val max2 : arr -> arr -> arr
val add_ : ?out:arr -> arr -> arr -> unit
val sub_ : ?out:arr -> arr -> arr -> unit
val mul_ : ?out:arr -> arr -> arr -> unit
val div_ : ?out:arr -> arr -> arr -> unit
val pow_ : ?out:arr -> arr -> arr -> unit
val atan2_ : ?out:arr -> arr -> arr -> unit
val hypot_ : ?out:arr -> arr -> arr -> unit
val fmod_ : ?out:arr -> arr -> arr -> unit
val min2_ : ?out:arr -> arr -> arr -> unit
val max2_ : ?out:arr -> arr -> arr -> unit
val add_scalar_ : ?out:arr -> arr -> elt -> unit
val sub_scalar_ : ?out:arr -> arr -> elt -> unit
val mul_scalar_ : ?out:arr -> arr -> elt -> unit
val div_scalar_ : ?out:arr -> arr -> elt -> unit
val pow_scalar_ : ?out:arr -> arr -> elt -> unit
val atan2_scalar_ : ?out:arr -> arr -> elt -> unit
val fmod_scalar_ : ?out:arr -> arr -> elt -> unit
val scalar_add_ : ?out:arr -> elt -> arr -> unit
val scalar_sub_ : ?out:arr -> elt -> arr -> unit
val scalar_mul_ : ?out:arr -> elt -> arr -> unit
val scalar_div_ : ?out:arr -> elt -> arr -> unit
val scalar_pow_ : ?out:arr -> elt -> arr -> unit
val scalar_atan2_ : ?out:arr -> elt -> arr -> unit
val scalar_fmod_ : ?out:arr -> elt -> arr -> unit
val fma_ : ?out:arr -> arr -> arr -> arr -> unit
val clip_by_value_ : ?out:arr -> ?amin:elt -> ?amax:elt -> arr -> unit
val clip_by_l2norm_ : ?out:arr -> elt -> arr -> unit
val dot_ : - ?transa:bool -> - ?transb:bool -> - ?alpha:elt -> - ?beta:elt -> - c:arr -> - arr -> - arr -> - unit
val abs_ : ?out:arr -> arr -> unit
val neg_ : ?out:arr -> arr -> unit
val conj_ : ?out:arr -> arr -> unit
val reci_ : ?out:arr -> arr -> unit
val signum_ : ?out:arr -> arr -> unit
val sqr_ : ?out:arr -> arr -> unit
val sqrt_ : ?out:arr -> arr -> unit
val cbrt_ : ?out:arr -> arr -> unit
val exp_ : ?out:arr -> arr -> unit
val exp2_ : ?out:arr -> arr -> unit
val exp10_ : ?out:arr -> arr -> unit
val expm1_ : ?out:arr -> arr -> unit
val log_ : ?out:arr -> arr -> unit
val log2_ : ?out:arr -> arr -> unit
val log10_ : ?out:arr -> arr -> unit
val log1p_ : ?out:arr -> arr -> unit
val sin_ : ?out:arr -> arr -> unit
val cos_ : ?out:arr -> arr -> unit
val tan_ : ?out:arr -> arr -> unit
val asin_ : ?out:arr -> arr -> unit
val acos_ : ?out:arr -> arr -> unit
val atan_ : ?out:arr -> arr -> unit
val sinh_ : ?out:arr -> arr -> unit
val cosh_ : ?out:arr -> arr -> unit
val tanh_ : ?out:arr -> arr -> unit
val asinh_ : ?out:arr -> arr -> unit
val acosh_ : ?out:arr -> arr -> unit
val atanh_ : ?out:arr -> arr -> unit
val floor_ : ?out:arr -> arr -> unit
val ceil_ : ?out:arr -> arr -> unit
val round_ : ?out:arr -> arr -> unit
val trunc_ : ?out:arr -> arr -> unit
val fix_ : ?out:arr -> arr -> unit
val erf_ : ?out:arr -> arr -> unit
val erfc_ : ?out:arr -> arr -> unit
val relu_ : ?out:arr -> arr -> unit
val softplus_ : ?out:arr -> arr -> unit
val softsign_ : ?out:arr -> arr -> unit
val softmax_ : ?out:arr -> ?axis:int -> arr -> unit
val sigmoid_ : ?out:arr -> arr -> unit
val sum_ : out:arr -> axis:int -> arr -> unit
val min_ : out:arr -> axis:int -> arr -> unit
val max_ : out:arr -> axis:int -> arr -> unit
val sum : ?axis:int -> ?keep_dims:bool -> arr -> arr
val prod : ?axis:int -> ?keep_dims:bool -> arr -> arr
val min : ?axis:int -> ?keep_dims:bool -> arr -> arr
val max : ?axis:int -> ?keep_dims:bool -> arr -> arr
val mean : ?axis:int -> ?keep_dims:bool -> arr -> arr
val var : ?axis:int -> ?keep_dims:bool -> arr -> arr
val std : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l2norm : ?axis:int -> ?keep_dims:bool -> arr -> arr
val cumsum_ : ?out:arr -> ?axis:int -> arr -> unit
val cumprod_ : ?out:arr -> ?axis:int -> arr -> unit
val cummin_ : ?out:arr -> ?axis:int -> arr -> unit
val cummax_ : ?out:arr -> ?axis:int -> arr -> unit
val dropout_ : ?out:arr -> ?rate:float -> arr -> unit
val prod' : arr -> elt
val mean' : arr -> elt
val var' : arr -> elt
val std' : arr -> elt
val elt_equal_ : ?out:arr -> arr -> arr -> unit
val elt_not_equal_ : ?out:arr -> arr -> arr -> unit
val elt_less_ : ?out:arr -> arr -> arr -> unit
val elt_greater_ : ?out:arr -> arr -> arr -> unit
val elt_less_equal_ : ?out:arr -> arr -> arr -> unit
val elt_greater_equal_ : ?out:arr -> arr -> arr -> unit
val elt_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_not_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_less_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_greater_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_less_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_greater_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val conv1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val conv2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val conv3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val dilated_conv1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val dilated_conv2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val dilated_conv3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val transpose_conv1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val transpose_conv2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val transpose_conv3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val max_pool1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val max_pool2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val max_pool3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val upsampling2d_ : out:arr -> arr -> int array -> unit
val conv1d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv1d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv2d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv2d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv3d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv3d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val dilated_conv1d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv1d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv2d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv2d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv3d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv3d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val transpose_conv1d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv1d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv2d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv2d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv3d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv3d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val max_pool1d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val max_pool2d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val max_pool3d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool1d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool2d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool3d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val upsampling2d_backward_ : out:arr -> arr -> int array -> arr -> unit
val fused_adagrad_ : ?out:arr -> rate:float -> eps:float -> arr -> unit
\ No newline at end of file diff --git a/owl-base/Owl_types/module-type-Computation_Device/index.html b/owl-base/Owl_types/module-type-Computation_Device/index.html deleted file mode 100644 index 4bacd2607..000000000 --- a/owl-base/Owl_types/module-type-Computation_Device/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Computation_Device (owl-base.Owl_types.Computation_Device)

Module type Owl_types.Computation_Device

include Owl_types_computation_device.Sig
Type definition
type device

TODO

type value

TODO

Core functions
val make_device : unit -> device

TODO

val arr_to_value : A.arr -> value

TODO

val value_to_arr : value -> A.arr

TODO

val elt_to_value : A.elt -> value

TODO

val value_to_elt : value -> A.elt

TODO

val value_to_float : value -> float

TODO

val is_arr : value -> bool

TODO

val is_elt : value -> bool

TODO

\ No newline at end of file diff --git a/owl-base/Owl_types/module-type-Ndarray_Algodiff/Linalg/index.html b/owl-base/Owl_types/module-type-Ndarray_Algodiff/Linalg/index.html deleted file mode 100644 index 6682334e7..000000000 --- a/owl-base/Owl_types/module-type-Ndarray_Algodiff/Linalg/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -Linalg (owl-base.Owl_types.Ndarray_Algodiff.Linalg)

Module Ndarray_Algodiff.Linalg

val inv : arr -> arr
val logdet : arr -> elt
val chol : ?upper:bool -> arr -> arr
val svd : ?thin:bool -> arr -> arr * arr * arr
val qr : arr -> arr * arr
val lq : arr -> arr * arr
val sylvester : arr -> arr -> arr -> arr
val lyapunov : arr -> arr -> arr
val discrete_lyapunov : - ?solver:[ `default | `bilinear | `direct ] -> - arr -> - arr -> - arr
val linsolve : ?trans:bool -> ?typ:[ `n | `u | `l ] -> arr -> arr -> arr
val care : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
val dare : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
\ No newline at end of file diff --git a/owl-base/Owl_types/module-type-Ndarray_Algodiff/Mat/index.html b/owl-base/Owl_types/module-type-Ndarray_Algodiff/Mat/index.html deleted file mode 100644 index 8d1252a55..000000000 --- a/owl-base/Owl_types/module-type-Ndarray_Algodiff/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl-base.Owl_types.Ndarray_Algodiff.Mat)

Module Ndarray_Algodiff.Mat

val diagm : ?k:int -> arr -> arr
val triu : ?k:int -> arr -> arr
val tril : ?k:int -> arr -> arr
val eye : int -> arr
\ No newline at end of file diff --git a/owl-base/Owl_types/module-type-Ndarray_Algodiff/Scalar/index.html b/owl-base/Owl_types/module-type-Ndarray_Algodiff/Scalar/index.html deleted file mode 100644 index 77f96bb7d..000000000 --- a/owl-base/Owl_types/module-type-Ndarray_Algodiff/Scalar/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Scalar (owl-base.Owl_types.Ndarray_Algodiff.Scalar)

Module Ndarray_Algodiff.Scalar

val add : elt -> elt -> elt
val sub : elt -> elt -> elt
val mul : elt -> elt -> elt
val div : elt -> elt -> elt
val pow : elt -> elt -> elt
val atan2 : elt -> elt -> elt
val abs : elt -> elt
val neg : elt -> elt
val sqr : elt -> elt
val sqrt : elt -> elt
val exp : elt -> elt
val log : elt -> elt
val log2 : elt -> elt
val log10 : elt -> elt
val signum : elt -> elt
val floor : elt -> elt
val ceil : elt -> elt
val round : elt -> elt
val sin : elt -> elt
val cos : elt -> elt
val tan : elt -> elt
val sinh : elt -> elt
val cosh : elt -> elt
val tanh : elt -> elt
val asin : elt -> elt
val acos : elt -> elt
val atan : elt -> elt
val asinh : elt -> elt
val acosh : elt -> elt
val atanh : elt -> elt
val relu : elt -> elt
val dawsn : elt -> elt
val sigmoid : elt -> elt
\ No newline at end of file diff --git a/owl-base/Owl_types/module-type-Ndarray_Algodiff/index.html b/owl-base/Owl_types/module-type-Ndarray_Algodiff/index.html deleted file mode 100644 index c4ba30153..000000000 --- a/owl-base/Owl_types/module-type-Ndarray_Algodiff/index.html +++ /dev/null @@ -1,158 +0,0 @@ - -Ndarray_Algodiff (owl-base.Owl_types.Ndarray_Algodiff)

Module type Owl_types.Ndarray_Algodiff

include Owl_types_ndarray_algodiff.Sig
include Owl_types_ndarray_eltcmp.Sig
include Owl_types_ndarray_basic.Sig
type arr
type elt
val empty : int array -> arr
val zeros : int array -> arr
val ones : int array -> arr
val create : int array -> elt -> arr
val sequential : ?a:elt -> ?step:elt -> int array -> arr
val uniform : ?a:elt -> ?b:elt -> int array -> arr
val gaussian : ?mu:elt -> ?sigma:elt -> int array -> arr
val bernoulli : ?p:elt -> int array -> arr
val init : int array -> (int -> elt) -> arr
val init_nd : int array -> (int array -> elt) -> arr
val shape : arr -> int array
val numel : arr -> int
val get : arr -> int array -> elt
val set : arr -> int array -> elt -> unit
val get_slice : int list list -> arr -> arr
val set_slice : int list list -> arr -> arr -> unit
val get_fancy : Owl_types_common.index list -> arr -> arr
val set_fancy : Owl_types_common.index list -> arr -> arr -> unit
val copy : arr -> arr
val copy_ : out:arr -> arr -> unit
val reset : arr -> unit
val reshape : arr -> int array -> arr
val reverse : arr -> arr
val tile : arr -> int array -> arr
val repeat : arr -> int array -> arr
val concatenate : ?axis:int -> arr array -> arr
val stack : ?axis:int -> arr array -> arr
val split : ?axis:int -> int array -> arr -> arr array
val expand : ?hi:bool -> arr -> int -> arr
val squeeze : ?axis:int array -> arr -> arr
val draw : ?axis:int -> arr -> int -> arr * int array
val map : (elt -> elt) -> arr -> arr
val fold : ?axis:int -> (elt -> elt -> elt) -> elt -> arr -> arr
val scan : ?axis:int -> (elt -> elt -> elt) -> arr -> arr
val one_hot : int -> arr -> arr
val pad : ?v:elt -> int list list -> arr -> arr
val print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(elt -> string) -> - arr -> - unit
val abs : arr -> arr
val neg : arr -> arr
val floor : arr -> arr
val ceil : arr -> arr
val round : arr -> arr
val sqr : arr -> arr
val sqrt : arr -> arr
val log : arr -> arr
val log2 : arr -> arr
val log10 : arr -> arr
val exp : arr -> arr
val sin : arr -> arr
val cos : arr -> arr
val tan : arr -> arr
val sinh : arr -> arr
val cosh : arr -> arr
val tanh : arr -> arr
val asin : arr -> arr
val acos : arr -> arr
val atan : arr -> arr
val asinh : arr -> arr
val acosh : arr -> arr
val atanh : arr -> arr
val min : ?axis:int -> ?keep_dims:bool -> arr -> arr
val max : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum_reduce : ?axis:int array -> arr -> arr
val signum : arr -> arr
val sigmoid : arr -> arr
val relu : arr -> arr
val dawsn : arr -> arr
val min' : arr -> elt
val max' : arr -> elt
val sum' : arr -> elt
val log_sum_exp' : arr -> elt
val log_sum_exp : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm' : arr -> elt
val l2norm' : arr -> elt
val l2norm_sqr' : arr -> elt
val clip_by_value : ?amin:elt -> ?amax:elt -> arr -> arr
val clip_by_l2norm : elt -> arr -> arr
val pow : arr -> arr -> arr
val scalar_pow : elt -> arr -> arr
val pow_scalar : arr -> elt -> arr
val atan2 : arr -> arr -> arr
val scalar_atan2 : elt -> arr -> arr
val atan2_scalar : arr -> elt -> arr
val add : arr -> arr -> arr
val sub : arr -> arr -> arr
val mul : arr -> arr -> arr
val div : arr -> arr -> arr
val add_scalar : arr -> elt -> arr
val sub_scalar : arr -> elt -> arr
val mul_scalar : arr -> elt -> arr
val div_scalar : arr -> elt -> arr
val scalar_add : elt -> arr -> arr
val scalar_sub : elt -> arr -> arr
val scalar_mul : elt -> arr -> arr
val scalar_div : elt -> arr -> arr
val fma : arr -> arr -> arr -> arr
val conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val dilated_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val transpose_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val max_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val upsampling2d : arr -> int array -> arr
val conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val dilated_conv1d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv1d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val transpose_conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val max_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val upsampling2d_backward : arr -> int array -> arr -> arr
val row_num : arr -> int
val col_num : arr -> int
val row : arr -> int -> arr
val rows : arr -> int array -> arr
val copy_row_to : arr -> arr -> int -> unit
val copy_col_to : arr -> arr -> int -> unit
val dot : arr -> arr -> arr
val diag : ?k:int -> arr -> arr
val trace : arr -> elt
val transpose : ?axis:int array -> arr -> arr
val to_rows : arr -> arr array
val of_rows : arr array -> arr
val to_cols : arr -> arr array
val of_cols : arr array -> arr
val of_array : elt array -> int array -> arr
val of_arrays : elt array array -> arr
val float_to_elt : float -> elt
val elt_to_float : elt -> float
val elt_equal : arr -> arr -> arr
val elt_not_equal : arr -> arr -> arr
val elt_less : arr -> arr -> arr
val elt_greater : arr -> arr -> arr
val elt_less_equal : arr -> arr -> arr
val elt_greater_equal : arr -> arr -> arr
val elt_equal_scalar : arr -> elt -> arr
val elt_not_equal_scalar : arr -> elt -> arr
val elt_less_scalar : arr -> elt -> arr
val elt_greater_scalar : arr -> elt -> arr
val elt_less_equal_scalar : arr -> elt -> arr
val elt_greater_equal_scalar : arr -> elt -> arr
module Scalar : sig ... end
module Mat : sig ... end
module Linalg : sig ... end
\ No newline at end of file diff --git a/owl-base/Owl_types/module-type-Ndarray_Basic/index.html b/owl-base/Owl_types/module-type-Ndarray_Basic/index.html deleted file mode 100644 index 5a7dfd6cf..000000000 --- a/owl-base/Owl_types/module-type-Ndarray_Basic/index.html +++ /dev/null @@ -1,158 +0,0 @@ - -Ndarray_Basic (owl-base.Owl_types.Ndarray_Basic)

Module type Owl_types.Ndarray_Basic

include Owl_types_ndarray_basic.Sig
type arr
type elt
val empty : int array -> arr
val zeros : int array -> arr
val ones : int array -> arr
val create : int array -> elt -> arr
val sequential : ?a:elt -> ?step:elt -> int array -> arr
val uniform : ?a:elt -> ?b:elt -> int array -> arr
val gaussian : ?mu:elt -> ?sigma:elt -> int array -> arr
val bernoulli : ?p:elt -> int array -> arr
val init : int array -> (int -> elt) -> arr
val init_nd : int array -> (int array -> elt) -> arr
val shape : arr -> int array
val numel : arr -> int
val get : arr -> int array -> elt
val set : arr -> int array -> elt -> unit
val get_slice : int list list -> arr -> arr
val set_slice : int list list -> arr -> arr -> unit
val get_fancy : Owl_types_common.index list -> arr -> arr
val set_fancy : Owl_types_common.index list -> arr -> arr -> unit
val copy : arr -> arr
val copy_ : out:arr -> arr -> unit
val reset : arr -> unit
val reshape : arr -> int array -> arr
val reverse : arr -> arr
val tile : arr -> int array -> arr
val repeat : arr -> int array -> arr
val concatenate : ?axis:int -> arr array -> arr
val stack : ?axis:int -> arr array -> arr
val split : ?axis:int -> int array -> arr -> arr array
val expand : ?hi:bool -> arr -> int -> arr
val squeeze : ?axis:int array -> arr -> arr
val draw : ?axis:int -> arr -> int -> arr * int array
val map : (elt -> elt) -> arr -> arr
val fold : ?axis:int -> (elt -> elt -> elt) -> elt -> arr -> arr
val scan : ?axis:int -> (elt -> elt -> elt) -> arr -> arr
val one_hot : int -> arr -> arr
val pad : ?v:elt -> int list list -> arr -> arr
val print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(elt -> string) -> - arr -> - unit
val abs : arr -> arr
val neg : arr -> arr
val floor : arr -> arr
val ceil : arr -> arr
val round : arr -> arr
val sqr : arr -> arr
val sqrt : arr -> arr
val log : arr -> arr
val log2 : arr -> arr
val log10 : arr -> arr
val exp : arr -> arr
val sin : arr -> arr
val cos : arr -> arr
val tan : arr -> arr
val sinh : arr -> arr
val cosh : arr -> arr
val tanh : arr -> arr
val asin : arr -> arr
val acos : arr -> arr
val atan : arr -> arr
val asinh : arr -> arr
val acosh : arr -> arr
val atanh : arr -> arr
val min : ?axis:int -> ?keep_dims:bool -> arr -> arr
val max : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum_reduce : ?axis:int array -> arr -> arr
val signum : arr -> arr
val sigmoid : arr -> arr
val relu : arr -> arr
val dawsn : arr -> arr
val min' : arr -> elt
val max' : arr -> elt
val sum' : arr -> elt
val log_sum_exp' : arr -> elt
val log_sum_exp : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm' : arr -> elt
val l2norm' : arr -> elt
val l2norm_sqr' : arr -> elt
val clip_by_value : ?amin:elt -> ?amax:elt -> arr -> arr
val clip_by_l2norm : elt -> arr -> arr
val pow : arr -> arr -> arr
val scalar_pow : elt -> arr -> arr
val pow_scalar : arr -> elt -> arr
val atan2 : arr -> arr -> arr
val scalar_atan2 : elt -> arr -> arr
val atan2_scalar : arr -> elt -> arr
val add : arr -> arr -> arr
val sub : arr -> arr -> arr
val mul : arr -> arr -> arr
val div : arr -> arr -> arr
val add_scalar : arr -> elt -> arr
val sub_scalar : arr -> elt -> arr
val mul_scalar : arr -> elt -> arr
val div_scalar : arr -> elt -> arr
val scalar_add : elt -> arr -> arr
val scalar_sub : elt -> arr -> arr
val scalar_mul : elt -> arr -> arr
val scalar_div : elt -> arr -> arr
val fma : arr -> arr -> arr -> arr
val conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val dilated_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val transpose_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val max_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val upsampling2d : arr -> int array -> arr
val conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val dilated_conv1d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv1d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val transpose_conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val max_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val upsampling2d_backward : arr -> int array -> arr -> arr
val row_num : arr -> int
val col_num : arr -> int
val row : arr -> int -> arr
val rows : arr -> int array -> arr
val copy_row_to : arr -> arr -> int -> unit
val copy_col_to : arr -> arr -> int -> unit
val dot : arr -> arr -> arr
val diag : ?k:int -> arr -> arr
val trace : arr -> elt
val transpose : ?axis:int array -> arr -> arr
val to_rows : arr -> arr array
val of_rows : arr array -> arr
val to_cols : arr -> arr array
val of_cols : arr array -> arr
val of_array : elt array -> int array -> arr
val of_arrays : elt array array -> arr
val float_to_elt : float -> elt
val elt_to_float : elt -> float
\ No newline at end of file diff --git a/owl-base/Owl_types/module-type-Ndarray_Compare/index.html b/owl-base/Owl_types/module-type-Ndarray_Compare/index.html deleted file mode 100644 index 39918e2c5..000000000 --- a/owl-base/Owl_types/module-type-Ndarray_Compare/index.html +++ /dev/null @@ -1,158 +0,0 @@ - -Ndarray_Compare (owl-base.Owl_types.Ndarray_Compare)

Module type Owl_types.Ndarray_Compare

include Owl_types_ndarray_compare.Sig
include Owl_types_ndarray_eltcmp.Sig
include Owl_types_ndarray_basic.Sig
type arr
type elt
val empty : int array -> arr
val zeros : int array -> arr
val ones : int array -> arr
val create : int array -> elt -> arr
val sequential : ?a:elt -> ?step:elt -> int array -> arr
val uniform : ?a:elt -> ?b:elt -> int array -> arr
val gaussian : ?mu:elt -> ?sigma:elt -> int array -> arr
val bernoulli : ?p:elt -> int array -> arr
val init : int array -> (int -> elt) -> arr
val init_nd : int array -> (int array -> elt) -> arr
val shape : arr -> int array
val numel : arr -> int
val get : arr -> int array -> elt
val set : arr -> int array -> elt -> unit
val get_slice : int list list -> arr -> arr
val set_slice : int list list -> arr -> arr -> unit
val get_fancy : Owl_types_common.index list -> arr -> arr
val set_fancy : Owl_types_common.index list -> arr -> arr -> unit
val copy : arr -> arr
val copy_ : out:arr -> arr -> unit
val reset : arr -> unit
val reshape : arr -> int array -> arr
val reverse : arr -> arr
val tile : arr -> int array -> arr
val repeat : arr -> int array -> arr
val concatenate : ?axis:int -> arr array -> arr
val stack : ?axis:int -> arr array -> arr
val split : ?axis:int -> int array -> arr -> arr array
val expand : ?hi:bool -> arr -> int -> arr
val squeeze : ?axis:int array -> arr -> arr
val draw : ?axis:int -> arr -> int -> arr * int array
val map : (elt -> elt) -> arr -> arr
val fold : ?axis:int -> (elt -> elt -> elt) -> elt -> arr -> arr
val scan : ?axis:int -> (elt -> elt -> elt) -> arr -> arr
val one_hot : int -> arr -> arr
val pad : ?v:elt -> int list list -> arr -> arr
val print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(elt -> string) -> - arr -> - unit
val abs : arr -> arr
val neg : arr -> arr
val floor : arr -> arr
val ceil : arr -> arr
val round : arr -> arr
val sqr : arr -> arr
val sqrt : arr -> arr
val log : arr -> arr
val log2 : arr -> arr
val log10 : arr -> arr
val exp : arr -> arr
val sin : arr -> arr
val cos : arr -> arr
val tan : arr -> arr
val sinh : arr -> arr
val cosh : arr -> arr
val tanh : arr -> arr
val asin : arr -> arr
val acos : arr -> arr
val atan : arr -> arr
val asinh : arr -> arr
val acosh : arr -> arr
val atanh : arr -> arr
val min : ?axis:int -> ?keep_dims:bool -> arr -> arr
val max : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum_reduce : ?axis:int array -> arr -> arr
val signum : arr -> arr
val sigmoid : arr -> arr
val relu : arr -> arr
val dawsn : arr -> arr
val min' : arr -> elt
val max' : arr -> elt
val sum' : arr -> elt
val log_sum_exp' : arr -> elt
val log_sum_exp : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm' : arr -> elt
val l2norm' : arr -> elt
val l2norm_sqr' : arr -> elt
val clip_by_value : ?amin:elt -> ?amax:elt -> arr -> arr
val clip_by_l2norm : elt -> arr -> arr
val pow : arr -> arr -> arr
val scalar_pow : elt -> arr -> arr
val pow_scalar : arr -> elt -> arr
val atan2 : arr -> arr -> arr
val scalar_atan2 : elt -> arr -> arr
val atan2_scalar : arr -> elt -> arr
val add : arr -> arr -> arr
val sub : arr -> arr -> arr
val mul : arr -> arr -> arr
val div : arr -> arr -> arr
val add_scalar : arr -> elt -> arr
val sub_scalar : arr -> elt -> arr
val mul_scalar : arr -> elt -> arr
val div_scalar : arr -> elt -> arr
val scalar_add : elt -> arr -> arr
val scalar_sub : elt -> arr -> arr
val scalar_mul : elt -> arr -> arr
val scalar_div : elt -> arr -> arr
val fma : arr -> arr -> arr -> arr
val conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val dilated_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val transpose_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val max_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val upsampling2d : arr -> int array -> arr
val conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val dilated_conv1d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv1d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val transpose_conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val max_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val upsampling2d_backward : arr -> int array -> arr -> arr
val row_num : arr -> int
val col_num : arr -> int
val row : arr -> int -> arr
val rows : arr -> int array -> arr
val copy_row_to : arr -> arr -> int -> unit
val copy_col_to : arr -> arr -> int -> unit
val dot : arr -> arr -> arr
val diag : ?k:int -> arr -> arr
val trace : arr -> elt
val transpose : ?axis:int array -> arr -> arr
val to_rows : arr -> arr array
val of_rows : arr array -> arr
val to_cols : arr -> arr array
val of_cols : arr array -> arr
val of_array : elt array -> int array -> arr
val of_arrays : elt array array -> arr
val float_to_elt : float -> elt
val elt_to_float : elt -> float
val elt_equal : arr -> arr -> arr
val elt_not_equal : arr -> arr -> arr
val elt_less : arr -> arr -> arr
val elt_greater : arr -> arr -> arr
val elt_less_equal : arr -> arr -> arr
val elt_greater_equal : arr -> arr -> arr
val elt_equal_scalar : arr -> elt -> arr
val elt_not_equal_scalar : arr -> elt -> arr
val elt_less_scalar : arr -> elt -> arr
val elt_greater_scalar : arr -> elt -> arr
val elt_less_equal_scalar : arr -> elt -> arr
val elt_greater_equal_scalar : arr -> elt -> arr
val is_zero : arr -> bool
val is_positive : arr -> bool
val is_negative : arr -> bool
val is_nonpositive : arr -> bool
val is_nonnegative : arr -> bool
val equal : arr -> arr -> bool
val not_equal : arr -> arr -> bool
val less : arr -> arr -> bool
val greater : arr -> arr -> bool
val less_equal : arr -> arr -> bool
val greater_equal : arr -> arr -> bool
val approx_equal : ?eps:float -> arr -> arr -> bool
val approx_equal_scalar : ?eps:float -> arr -> elt -> bool
val approx_elt_equal : ?eps:float -> arr -> arr -> arr
val approx_elt_equal_scalar : ?eps:float -> arr -> elt -> arr
\ No newline at end of file diff --git a/owl-base/Owl_types/module-type-Ndarray_Mutable/Linalg/index.html b/owl-base/Owl_types/module-type-Ndarray_Mutable/Linalg/index.html deleted file mode 100644 index 1e131b792..000000000 --- a/owl-base/Owl_types/module-type-Ndarray_Mutable/Linalg/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -Linalg (owl-base.Owl_types.Ndarray_Mutable.Linalg)

Module Ndarray_Mutable.Linalg

val inv : arr -> arr
val logdet : arr -> elt
val chol : ?upper:bool -> arr -> arr
val svd : ?thin:bool -> arr -> arr * arr * arr
val qr : arr -> arr * arr
val lq : arr -> arr * arr
val sylvester : arr -> arr -> arr -> arr
val lyapunov : arr -> arr -> arr
val discrete_lyapunov : - ?solver:[ `default | `bilinear | `direct ] -> - arr -> - arr -> - arr
val linsolve : ?trans:bool -> ?typ:[ `n | `u | `l ] -> arr -> arr -> arr
val care : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
val dare : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
\ No newline at end of file diff --git a/owl-base/Owl_types/module-type-Ndarray_Mutable/Mat/index.html b/owl-base/Owl_types/module-type-Ndarray_Mutable/Mat/index.html deleted file mode 100644 index 27f0da29a..000000000 --- a/owl-base/Owl_types/module-type-Ndarray_Mutable/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl-base.Owl_types.Ndarray_Mutable.Mat)

Module Ndarray_Mutable.Mat

val diagm : ?k:int -> arr -> arr
val triu : ?k:int -> arr -> arr
val tril : ?k:int -> arr -> arr
val eye : int -> arr
\ No newline at end of file diff --git a/owl-base/Owl_types/module-type-Ndarray_Mutable/Scalar/index.html b/owl-base/Owl_types/module-type-Ndarray_Mutable/Scalar/index.html deleted file mode 100644 index cde4849ab..000000000 --- a/owl-base/Owl_types/module-type-Ndarray_Mutable/Scalar/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Scalar (owl-base.Owl_types.Ndarray_Mutable.Scalar)

Module Ndarray_Mutable.Scalar

val add : elt -> elt -> elt
val sub : elt -> elt -> elt
val mul : elt -> elt -> elt
val div : elt -> elt -> elt
val pow : elt -> elt -> elt
val atan2 : elt -> elt -> elt
val abs : elt -> elt
val neg : elt -> elt
val sqr : elt -> elt
val sqrt : elt -> elt
val exp : elt -> elt
val log : elt -> elt
val log2 : elt -> elt
val log10 : elt -> elt
val signum : elt -> elt
val floor : elt -> elt
val ceil : elt -> elt
val round : elt -> elt
val sin : elt -> elt
val cos : elt -> elt
val tan : elt -> elt
val sinh : elt -> elt
val cosh : elt -> elt
val tanh : elt -> elt
val asin : elt -> elt
val acos : elt -> elt
val atan : elt -> elt
val asinh : elt -> elt
val acosh : elt -> elt
val atanh : elt -> elt
val relu : elt -> elt
val dawsn : elt -> elt
val sigmoid : elt -> elt
\ No newline at end of file diff --git a/owl-base/Owl_types/module-type-Ndarray_Mutable/index.html b/owl-base/Owl_types/module-type-Ndarray_Mutable/index.html deleted file mode 100644 index e7529fed9..000000000 --- a/owl-base/Owl_types/module-type-Ndarray_Mutable/index.html +++ /dev/null @@ -1,379 +0,0 @@ - -Ndarray_Mutable (owl-base.Owl_types.Ndarray_Mutable)

Module type Owl_types.Ndarray_Mutable

include Owl_types_ndarray_mutable.Sig
include Owl_types_ndarray_algodiff.Sig
include Owl_types_ndarray_eltcmp.Sig
include Owl_types_ndarray_basic.Sig
type arr
type elt
val empty : int array -> arr
val zeros : int array -> arr
val ones : int array -> arr
val create : int array -> elt -> arr
val sequential : ?a:elt -> ?step:elt -> int array -> arr
val uniform : ?a:elt -> ?b:elt -> int array -> arr
val gaussian : ?mu:elt -> ?sigma:elt -> int array -> arr
val bernoulli : ?p:elt -> int array -> arr
val init : int array -> (int -> elt) -> arr
val init_nd : int array -> (int array -> elt) -> arr
val shape : arr -> int array
val numel : arr -> int
val get : arr -> int array -> elt
val set : arr -> int array -> elt -> unit
val get_slice : int list list -> arr -> arr
val set_slice : int list list -> arr -> arr -> unit
val get_fancy : Owl_types_common.index list -> arr -> arr
val set_fancy : Owl_types_common.index list -> arr -> arr -> unit
val copy : arr -> arr
val copy_ : out:arr -> arr -> unit
val reset : arr -> unit
val reshape : arr -> int array -> arr
val reverse : arr -> arr
val tile : arr -> int array -> arr
val repeat : arr -> int array -> arr
val concatenate : ?axis:int -> arr array -> arr
val stack : ?axis:int -> arr array -> arr
val split : ?axis:int -> int array -> arr -> arr array
val expand : ?hi:bool -> arr -> int -> arr
val squeeze : ?axis:int array -> arr -> arr
val draw : ?axis:int -> arr -> int -> arr * int array
val map : (elt -> elt) -> arr -> arr
val fold : ?axis:int -> (elt -> elt -> elt) -> elt -> arr -> arr
val scan : ?axis:int -> (elt -> elt -> elt) -> arr -> arr
val one_hot : int -> arr -> arr
val pad : ?v:elt -> int list list -> arr -> arr
val print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(elt -> string) -> - arr -> - unit
val abs : arr -> arr
val neg : arr -> arr
val floor : arr -> arr
val ceil : arr -> arr
val round : arr -> arr
val sqr : arr -> arr
val sqrt : arr -> arr
val log : arr -> arr
val log2 : arr -> arr
val log10 : arr -> arr
val exp : arr -> arr
val sin : arr -> arr
val cos : arr -> arr
val tan : arr -> arr
val sinh : arr -> arr
val cosh : arr -> arr
val tanh : arr -> arr
val asin : arr -> arr
val acos : arr -> arr
val atan : arr -> arr
val asinh : arr -> arr
val acosh : arr -> arr
val atanh : arr -> arr
val sum_reduce : ?axis:int array -> arr -> arr
val signum : arr -> arr
val sigmoid : arr -> arr
val relu : arr -> arr
val dawsn : arr -> arr
val min' : arr -> elt
val max' : arr -> elt
val sum' : arr -> elt
val log_sum_exp' : arr -> elt
val log_sum_exp : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm' : arr -> elt
val l2norm' : arr -> elt
val l2norm_sqr' : arr -> elt
val clip_by_value : ?amin:elt -> ?amax:elt -> arr -> arr
val clip_by_l2norm : elt -> arr -> arr
val pow : arr -> arr -> arr
val scalar_pow : elt -> arr -> arr
val pow_scalar : arr -> elt -> arr
val atan2 : arr -> arr -> arr
val scalar_atan2 : elt -> arr -> arr
val atan2_scalar : arr -> elt -> arr
val add : arr -> arr -> arr
val sub : arr -> arr -> arr
val mul : arr -> arr -> arr
val div : arr -> arr -> arr
val add_scalar : arr -> elt -> arr
val sub_scalar : arr -> elt -> arr
val mul_scalar : arr -> elt -> arr
val div_scalar : arr -> elt -> arr
val scalar_add : elt -> arr -> arr
val scalar_sub : elt -> arr -> arr
val scalar_mul : elt -> arr -> arr
val scalar_div : elt -> arr -> arr
val fma : arr -> arr -> arr -> arr
val conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val dilated_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val transpose_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val max_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val upsampling2d : arr -> int array -> arr
val conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val dilated_conv1d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv1d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val transpose_conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val max_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val upsampling2d_backward : arr -> int array -> arr -> arr
val row_num : arr -> int
val col_num : arr -> int
val row : arr -> int -> arr
val rows : arr -> int array -> arr
val copy_row_to : arr -> arr -> int -> unit
val copy_col_to : arr -> arr -> int -> unit
val dot : arr -> arr -> arr
val diag : ?k:int -> arr -> arr
val trace : arr -> elt
val transpose : ?axis:int array -> arr -> arr
val to_rows : arr -> arr array
val of_rows : arr array -> arr
val to_cols : arr -> arr array
val of_cols : arr array -> arr
val of_array : elt array -> int array -> arr
val of_arrays : elt array array -> arr
val float_to_elt : float -> elt
val elt_to_float : elt -> float
val elt_equal : arr -> arr -> arr
val elt_not_equal : arr -> arr -> arr
val elt_less : arr -> arr -> arr
val elt_greater : arr -> arr -> arr
val elt_less_equal : arr -> arr -> arr
val elt_greater_equal : arr -> arr -> arr
val elt_equal_scalar : arr -> elt -> arr
val elt_not_equal_scalar : arr -> elt -> arr
val elt_less_scalar : arr -> elt -> arr
val elt_greater_scalar : arr -> elt -> arr
val elt_less_equal_scalar : arr -> elt -> arr
val elt_greater_equal_scalar : arr -> elt -> arr
module Scalar : sig ... end
module Mat : sig ... end
module Linalg : sig ... end
val create_ : out:arr -> elt -> unit
val uniform_ : ?a:elt -> ?b:elt -> out:arr -> unit
val gaussian_ : ?mu:elt -> ?sigma:elt -> out:arr -> unit
val sequential_ : ?a:elt -> ?step:elt -> out:arr -> unit
val bernoulli_ : ?p:elt -> out:arr -> unit
val zeros_ : out:arr -> unit
val ones_ : out:arr -> unit
val one_hot_ : out:arr -> int -> arr -> unit
val get_slice_ : out:arr -> int list list -> arr -> unit
val set_slice_ : out:arr -> int list list -> arr -> arr -> unit
val sub_left : arr -> int -> int -> arr
val reshape_ : out:arr -> arr -> unit
val reverse_ : out:arr -> arr -> unit
val transpose_ : out:arr -> ?axis:int array -> arr -> unit
val repeat_ : out:arr -> arr -> int array -> unit
val tile_ : out:arr -> arr -> int array -> unit
val pad_ : out:arr -> ?v:elt -> int list list -> arr -> unit
val hypot : arr -> arr -> arr
val fmod : arr -> arr -> arr
val min2 : arr -> arr -> arr
val max2 : arr -> arr -> arr
val add_ : ?out:arr -> arr -> arr -> unit
val sub_ : ?out:arr -> arr -> arr -> unit
val mul_ : ?out:arr -> arr -> arr -> unit
val div_ : ?out:arr -> arr -> arr -> unit
val pow_ : ?out:arr -> arr -> arr -> unit
val atan2_ : ?out:arr -> arr -> arr -> unit
val hypot_ : ?out:arr -> arr -> arr -> unit
val fmod_ : ?out:arr -> arr -> arr -> unit
val min2_ : ?out:arr -> arr -> arr -> unit
val max2_ : ?out:arr -> arr -> arr -> unit
val add_scalar_ : ?out:arr -> arr -> elt -> unit
val sub_scalar_ : ?out:arr -> arr -> elt -> unit
val mul_scalar_ : ?out:arr -> arr -> elt -> unit
val div_scalar_ : ?out:arr -> arr -> elt -> unit
val pow_scalar_ : ?out:arr -> arr -> elt -> unit
val atan2_scalar_ : ?out:arr -> arr -> elt -> unit
val fmod_scalar_ : ?out:arr -> arr -> elt -> unit
val scalar_add_ : ?out:arr -> elt -> arr -> unit
val scalar_sub_ : ?out:arr -> elt -> arr -> unit
val scalar_mul_ : ?out:arr -> elt -> arr -> unit
val scalar_div_ : ?out:arr -> elt -> arr -> unit
val scalar_pow_ : ?out:arr -> elt -> arr -> unit
val scalar_atan2_ : ?out:arr -> elt -> arr -> unit
val scalar_fmod_ : ?out:arr -> elt -> arr -> unit
val fma_ : ?out:arr -> arr -> arr -> arr -> unit
val clip_by_value_ : ?out:arr -> ?amin:elt -> ?amax:elt -> arr -> unit
val clip_by_l2norm_ : ?out:arr -> elt -> arr -> unit
val dot_ : - ?transa:bool -> - ?transb:bool -> - ?alpha:elt -> - ?beta:elt -> - c:arr -> - arr -> - arr -> - unit
val abs_ : ?out:arr -> arr -> unit
val neg_ : ?out:arr -> arr -> unit
val conj_ : ?out:arr -> arr -> unit
val reci_ : ?out:arr -> arr -> unit
val signum_ : ?out:arr -> arr -> unit
val sqr_ : ?out:arr -> arr -> unit
val sqrt_ : ?out:arr -> arr -> unit
val cbrt_ : ?out:arr -> arr -> unit
val exp_ : ?out:arr -> arr -> unit
val exp2_ : ?out:arr -> arr -> unit
val exp10_ : ?out:arr -> arr -> unit
val expm1_ : ?out:arr -> arr -> unit
val log_ : ?out:arr -> arr -> unit
val log2_ : ?out:arr -> arr -> unit
val log10_ : ?out:arr -> arr -> unit
val log1p_ : ?out:arr -> arr -> unit
val sin_ : ?out:arr -> arr -> unit
val cos_ : ?out:arr -> arr -> unit
val tan_ : ?out:arr -> arr -> unit
val asin_ : ?out:arr -> arr -> unit
val acos_ : ?out:arr -> arr -> unit
val atan_ : ?out:arr -> arr -> unit
val sinh_ : ?out:arr -> arr -> unit
val cosh_ : ?out:arr -> arr -> unit
val tanh_ : ?out:arr -> arr -> unit
val asinh_ : ?out:arr -> arr -> unit
val acosh_ : ?out:arr -> arr -> unit
val atanh_ : ?out:arr -> arr -> unit
val floor_ : ?out:arr -> arr -> unit
val ceil_ : ?out:arr -> arr -> unit
val round_ : ?out:arr -> arr -> unit
val trunc_ : ?out:arr -> arr -> unit
val fix_ : ?out:arr -> arr -> unit
val erf_ : ?out:arr -> arr -> unit
val erfc_ : ?out:arr -> arr -> unit
val relu_ : ?out:arr -> arr -> unit
val softplus_ : ?out:arr -> arr -> unit
val softsign_ : ?out:arr -> arr -> unit
val softmax_ : ?out:arr -> ?axis:int -> arr -> unit
val sigmoid_ : ?out:arr -> arr -> unit
val sum_ : out:arr -> axis:int -> arr -> unit
val min_ : out:arr -> axis:int -> arr -> unit
val max_ : out:arr -> axis:int -> arr -> unit
val sum : ?axis:int -> ?keep_dims:bool -> arr -> arr
val prod : ?axis:int -> ?keep_dims:bool -> arr -> arr
val min : ?axis:int -> ?keep_dims:bool -> arr -> arr
val max : ?axis:int -> ?keep_dims:bool -> arr -> arr
val mean : ?axis:int -> ?keep_dims:bool -> arr -> arr
val var : ?axis:int -> ?keep_dims:bool -> arr -> arr
val std : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l2norm : ?axis:int -> ?keep_dims:bool -> arr -> arr
val cumsum_ : ?out:arr -> ?axis:int -> arr -> unit
val cumprod_ : ?out:arr -> ?axis:int -> arr -> unit
val cummin_ : ?out:arr -> ?axis:int -> arr -> unit
val cummax_ : ?out:arr -> ?axis:int -> arr -> unit
val dropout_ : ?out:arr -> ?rate:float -> arr -> unit
val prod' : arr -> elt
val mean' : arr -> elt
val var' : arr -> elt
val std' : arr -> elt
val elt_equal_ : ?out:arr -> arr -> arr -> unit
val elt_not_equal_ : ?out:arr -> arr -> arr -> unit
val elt_less_ : ?out:arr -> arr -> arr -> unit
val elt_greater_ : ?out:arr -> arr -> arr -> unit
val elt_less_equal_ : ?out:arr -> arr -> arr -> unit
val elt_greater_equal_ : ?out:arr -> arr -> arr -> unit
val elt_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_not_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_less_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_greater_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_less_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_greater_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val conv1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val conv2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val conv3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val dilated_conv1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val dilated_conv2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val dilated_conv3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val transpose_conv1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val transpose_conv2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val transpose_conv3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val max_pool1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val max_pool2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val max_pool3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val upsampling2d_ : out:arr -> arr -> int array -> unit
val conv1d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv1d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv2d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv2d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv3d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv3d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val dilated_conv1d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv1d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv2d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv2d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv3d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv3d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val transpose_conv1d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv1d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv2d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv2d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv3d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv3d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val max_pool1d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val max_pool2d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val max_pool3d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool1d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool2d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool3d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val upsampling2d_backward_ : out:arr -> arr -> int array -> arr -> unit
val fused_adagrad_ : ?out:arr -> rate:float -> eps:float -> arr -> unit
\ No newline at end of file diff --git a/owl-base/Owl_types/module-type-Ndarray_Numdiff/index.html b/owl-base/Owl_types/module-type-Ndarray_Numdiff/index.html deleted file mode 100644 index af0786677..000000000 --- a/owl-base/Owl_types/module-type-Ndarray_Numdiff/index.html +++ /dev/null @@ -1,158 +0,0 @@ - -Ndarray_Numdiff (owl-base.Owl_types.Ndarray_Numdiff)

Module type Owl_types.Ndarray_Numdiff

include Owl_types_ndarray_numdiff.Sig
include Owl_types_ndarray_basic.Sig
type arr
type elt
val empty : int array -> arr
val zeros : int array -> arr
val ones : int array -> arr
val create : int array -> elt -> arr
val sequential : ?a:elt -> ?step:elt -> int array -> arr
val uniform : ?a:elt -> ?b:elt -> int array -> arr
val gaussian : ?mu:elt -> ?sigma:elt -> int array -> arr
val bernoulli : ?p:elt -> int array -> arr
val init : int array -> (int -> elt) -> arr
val init_nd : int array -> (int array -> elt) -> arr
val shape : arr -> int array
val numel : arr -> int
val get : arr -> int array -> elt
val set : arr -> int array -> elt -> unit
val get_slice : int list list -> arr -> arr
val set_slice : int list list -> arr -> arr -> unit
val get_fancy : Owl_types_common.index list -> arr -> arr
val set_fancy : Owl_types_common.index list -> arr -> arr -> unit
val copy : arr -> arr
val copy_ : out:arr -> arr -> unit
val reset : arr -> unit
val reshape : arr -> int array -> arr
val reverse : arr -> arr
val tile : arr -> int array -> arr
val repeat : arr -> int array -> arr
val concatenate : ?axis:int -> arr array -> arr
val stack : ?axis:int -> arr array -> arr
val split : ?axis:int -> int array -> arr -> arr array
val expand : ?hi:bool -> arr -> int -> arr
val squeeze : ?axis:int array -> arr -> arr
val draw : ?axis:int -> arr -> int -> arr * int array
val map : (elt -> elt) -> arr -> arr
val fold : ?axis:int -> (elt -> elt -> elt) -> elt -> arr -> arr
val scan : ?axis:int -> (elt -> elt -> elt) -> arr -> arr
val one_hot : int -> arr -> arr
val pad : ?v:elt -> int list list -> arr -> arr
val print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(elt -> string) -> - arr -> - unit
val abs : arr -> arr
val neg : arr -> arr
val floor : arr -> arr
val ceil : arr -> arr
val round : arr -> arr
val sqr : arr -> arr
val sqrt : arr -> arr
val log : arr -> arr
val log2 : arr -> arr
val log10 : arr -> arr
val exp : arr -> arr
val sin : arr -> arr
val cos : arr -> arr
val tan : arr -> arr
val sinh : arr -> arr
val cosh : arr -> arr
val tanh : arr -> arr
val asin : arr -> arr
val acos : arr -> arr
val atan : arr -> arr
val asinh : arr -> arr
val acosh : arr -> arr
val atanh : arr -> arr
val min : ?axis:int -> ?keep_dims:bool -> arr -> arr
val max : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum_reduce : ?axis:int array -> arr -> arr
val signum : arr -> arr
val sigmoid : arr -> arr
val relu : arr -> arr
val dawsn : arr -> arr
val min' : arr -> elt
val max' : arr -> elt
val sum' : arr -> elt
val log_sum_exp' : arr -> elt
val log_sum_exp : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm' : arr -> elt
val l2norm' : arr -> elt
val l2norm_sqr' : arr -> elt
val clip_by_value : ?amin:elt -> ?amax:elt -> arr -> arr
val clip_by_l2norm : elt -> arr -> arr
val pow : arr -> arr -> arr
val scalar_pow : elt -> arr -> arr
val pow_scalar : arr -> elt -> arr
val atan2 : arr -> arr -> arr
val scalar_atan2 : elt -> arr -> arr
val atan2_scalar : arr -> elt -> arr
val add : arr -> arr -> arr
val sub : arr -> arr -> arr
val mul : arr -> arr -> arr
val div : arr -> arr -> arr
val add_scalar : arr -> elt -> arr
val sub_scalar : arr -> elt -> arr
val mul_scalar : arr -> elt -> arr
val div_scalar : arr -> elt -> arr
val scalar_add : elt -> arr -> arr
val scalar_sub : elt -> arr -> arr
val scalar_mul : elt -> arr -> arr
val scalar_div : elt -> arr -> arr
val fma : arr -> arr -> arr -> arr
val conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val dilated_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val transpose_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val max_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val upsampling2d : arr -> int array -> arr
val conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val dilated_conv1d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv1d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val transpose_conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val max_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val upsampling2d_backward : arr -> int array -> arr -> arr
val row_num : arr -> int
val col_num : arr -> int
val row : arr -> int -> arr
val rows : arr -> int array -> arr
val copy_row_to : arr -> arr -> int -> unit
val copy_col_to : arr -> arr -> int -> unit
val dot : arr -> arr -> arr
val diag : ?k:int -> arr -> arr
val trace : arr -> elt
val transpose : ?axis:int array -> arr -> arr
val to_rows : arr -> arr array
val of_rows : arr array -> arr
val to_cols : arr -> arr array
val of_cols : arr array -> arr
val of_array : elt array -> int array -> arr
val of_arrays : elt array array -> arr
val float_to_elt : float -> elt
val elt_to_float : elt -> float
val mapi : (int -> elt -> elt) -> arr -> arr
val (+) : arr -> arr -> arr
val (-) : arr -> arr -> arr
val (*) : arr -> arr -> arr
val (/) : arr -> arr -> arr
val (+$) : arr -> elt -> arr
val (-$) : arr -> elt -> arr
val (*$) : arr -> elt -> arr
val (/$) : arr -> elt -> arr
\ No newline at end of file diff --git a/owl-base/Owl_types/module-type-Stats_Dist/Linalg/index.html b/owl-base/Owl_types/module-type-Stats_Dist/Linalg/index.html deleted file mode 100644 index 76824ea65..000000000 --- a/owl-base/Owl_types/module-type-Stats_Dist/Linalg/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -Linalg (owl-base.Owl_types.Stats_Dist.Linalg)

Module Stats_Dist.Linalg

val inv : arr -> arr
val logdet : arr -> elt
val chol : ?upper:bool -> arr -> arr
val svd : ?thin:bool -> arr -> arr * arr * arr
val qr : arr -> arr * arr
val lq : arr -> arr * arr
val sylvester : arr -> arr -> arr -> arr
val lyapunov : arr -> arr -> arr
val discrete_lyapunov : - ?solver:[ `default | `bilinear | `direct ] -> - arr -> - arr -> - arr
val linsolve : ?trans:bool -> ?typ:[ `n | `u | `l ] -> arr -> arr -> arr
val care : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
val dare : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
\ No newline at end of file diff --git a/owl-base/Owl_types/module-type-Stats_Dist/Mat/index.html b/owl-base/Owl_types/module-type-Stats_Dist/Mat/index.html deleted file mode 100644 index 76620900c..000000000 --- a/owl-base/Owl_types/module-type-Stats_Dist/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl-base.Owl_types.Stats_Dist.Mat)

Module Stats_Dist.Mat

val diagm : ?k:int -> arr -> arr
val triu : ?k:int -> arr -> arr
val tril : ?k:int -> arr -> arr
val eye : int -> arr
\ No newline at end of file diff --git a/owl-base/Owl_types/module-type-Stats_Dist/Scalar/index.html b/owl-base/Owl_types/module-type-Stats_Dist/Scalar/index.html deleted file mode 100644 index 27896bf1b..000000000 --- a/owl-base/Owl_types/module-type-Stats_Dist/Scalar/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Scalar (owl-base.Owl_types.Stats_Dist.Scalar)

Module Stats_Dist.Scalar

val add : elt -> elt -> elt
val sub : elt -> elt -> elt
val mul : elt -> elt -> elt
val div : elt -> elt -> elt
val pow : elt -> elt -> elt
val atan2 : elt -> elt -> elt
val abs : elt -> elt
val neg : elt -> elt
val sqr : elt -> elt
val sqrt : elt -> elt
val exp : elt -> elt
val log : elt -> elt
val log2 : elt -> elt
val log10 : elt -> elt
val signum : elt -> elt
val floor : elt -> elt
val ceil : elt -> elt
val round : elt -> elt
val sin : elt -> elt
val cos : elt -> elt
val tan : elt -> elt
val sinh : elt -> elt
val cosh : elt -> elt
val tanh : elt -> elt
val asin : elt -> elt
val acos : elt -> elt
val atan : elt -> elt
val asinh : elt -> elt
val acosh : elt -> elt
val atanh : elt -> elt
val relu : elt -> elt
val dawsn : elt -> elt
val sigmoid : elt -> elt
\ No newline at end of file diff --git a/owl-base/Owl_types/module-type-Stats_Dist/index.html b/owl-base/Owl_types/module-type-Stats_Dist/index.html deleted file mode 100644 index abf40b810..000000000 --- a/owl-base/Owl_types/module-type-Stats_Dist/index.html +++ /dev/null @@ -1,379 +0,0 @@ - -Stats_Dist (owl-base.Owl_types.Stats_Dist)

Module type Owl_types.Stats_Dist

include Owl_types_stats_dist.Sig
include Owl_types_ndarray_mutable.Sig
include Owl_types_ndarray_algodiff.Sig
include Owl_types_ndarray_eltcmp.Sig
include Owl_types_ndarray_basic.Sig
type arr
type elt
val empty : int array -> arr
val zeros : int array -> arr
val ones : int array -> arr
val create : int array -> elt -> arr
val sequential : ?a:elt -> ?step:elt -> int array -> arr
val uniform : ?a:elt -> ?b:elt -> int array -> arr
val gaussian : ?mu:elt -> ?sigma:elt -> int array -> arr
val bernoulli : ?p:elt -> int array -> arr
val init : int array -> (int -> elt) -> arr
val init_nd : int array -> (int array -> elt) -> arr
val shape : arr -> int array
val numel : arr -> int
val get : arr -> int array -> elt
val set : arr -> int array -> elt -> unit
val get_slice : int list list -> arr -> arr
val set_slice : int list list -> arr -> arr -> unit
val get_fancy : Owl_types_common.index list -> arr -> arr
val set_fancy : Owl_types_common.index list -> arr -> arr -> unit
val copy : arr -> arr
val copy_ : out:arr -> arr -> unit
val reset : arr -> unit
val reshape : arr -> int array -> arr
val reverse : arr -> arr
val tile : arr -> int array -> arr
val repeat : arr -> int array -> arr
val concatenate : ?axis:int -> arr array -> arr
val stack : ?axis:int -> arr array -> arr
val split : ?axis:int -> int array -> arr -> arr array
val expand : ?hi:bool -> arr -> int -> arr
val squeeze : ?axis:int array -> arr -> arr
val draw : ?axis:int -> arr -> int -> arr * int array
val map : (elt -> elt) -> arr -> arr
val fold : ?axis:int -> (elt -> elt -> elt) -> elt -> arr -> arr
val scan : ?axis:int -> (elt -> elt -> elt) -> arr -> arr
val one_hot : int -> arr -> arr
val pad : ?v:elt -> int list list -> arr -> arr
val print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(elt -> string) -> - arr -> - unit
val abs : arr -> arr
val neg : arr -> arr
val floor : arr -> arr
val ceil : arr -> arr
val round : arr -> arr
val sqr : arr -> arr
val sqrt : arr -> arr
val log : arr -> arr
val log2 : arr -> arr
val log10 : arr -> arr
val exp : arr -> arr
val sin : arr -> arr
val cos : arr -> arr
val tan : arr -> arr
val sinh : arr -> arr
val cosh : arr -> arr
val tanh : arr -> arr
val asin : arr -> arr
val acos : arr -> arr
val atan : arr -> arr
val asinh : arr -> arr
val acosh : arr -> arr
val atanh : arr -> arr
val sum_reduce : ?axis:int array -> arr -> arr
val signum : arr -> arr
val sigmoid : arr -> arr
val relu : arr -> arr
val dawsn : arr -> arr
val min' : arr -> elt
val max' : arr -> elt
val sum' : arr -> elt
val log_sum_exp' : arr -> elt
val log_sum_exp : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm' : arr -> elt
val l2norm' : arr -> elt
val l2norm_sqr' : arr -> elt
val clip_by_value : ?amin:elt -> ?amax:elt -> arr -> arr
val clip_by_l2norm : elt -> arr -> arr
val pow : arr -> arr -> arr
val scalar_pow : elt -> arr -> arr
val pow_scalar : arr -> elt -> arr
val atan2 : arr -> arr -> arr
val scalar_atan2 : elt -> arr -> arr
val atan2_scalar : arr -> elt -> arr
val add : arr -> arr -> arr
val sub : arr -> arr -> arr
val mul : arr -> arr -> arr
val div : arr -> arr -> arr
val add_scalar : arr -> elt -> arr
val sub_scalar : arr -> elt -> arr
val mul_scalar : arr -> elt -> arr
val div_scalar : arr -> elt -> arr
val scalar_add : elt -> arr -> arr
val scalar_sub : elt -> arr -> arr
val scalar_mul : elt -> arr -> arr
val scalar_div : elt -> arr -> arr
val fma : arr -> arr -> arr -> arr
val conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val dilated_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val transpose_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val max_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val upsampling2d : arr -> int array -> arr
val conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val dilated_conv1d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv1d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val transpose_conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val max_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val upsampling2d_backward : arr -> int array -> arr -> arr
val row_num : arr -> int
val col_num : arr -> int
val row : arr -> int -> arr
val rows : arr -> int array -> arr
val copy_row_to : arr -> arr -> int -> unit
val copy_col_to : arr -> arr -> int -> unit
val dot : arr -> arr -> arr
val diag : ?k:int -> arr -> arr
val trace : arr -> elt
val transpose : ?axis:int array -> arr -> arr
val to_rows : arr -> arr array
val of_rows : arr array -> arr
val to_cols : arr -> arr array
val of_cols : arr array -> arr
val of_array : elt array -> int array -> arr
val of_arrays : elt array array -> arr
val float_to_elt : float -> elt
val elt_to_float : elt -> float
val elt_equal : arr -> arr -> arr
val elt_not_equal : arr -> arr -> arr
val elt_less : arr -> arr -> arr
val elt_greater : arr -> arr -> arr
val elt_less_equal : arr -> arr -> arr
val elt_greater_equal : arr -> arr -> arr
val elt_equal_scalar : arr -> elt -> arr
val elt_not_equal_scalar : arr -> elt -> arr
val elt_less_scalar : arr -> elt -> arr
val elt_greater_scalar : arr -> elt -> arr
val elt_less_equal_scalar : arr -> elt -> arr
val elt_greater_equal_scalar : arr -> elt -> arr
module Scalar : sig ... end
module Mat : sig ... end
module Linalg : sig ... end
val create_ : out:arr -> elt -> unit
val uniform_ : ?a:elt -> ?b:elt -> out:arr -> unit
val gaussian_ : ?mu:elt -> ?sigma:elt -> out:arr -> unit
val sequential_ : ?a:elt -> ?step:elt -> out:arr -> unit
val bernoulli_ : ?p:elt -> out:arr -> unit
val zeros_ : out:arr -> unit
val ones_ : out:arr -> unit
val one_hot_ : out:arr -> int -> arr -> unit
val get_slice_ : out:arr -> int list list -> arr -> unit
val set_slice_ : out:arr -> int list list -> arr -> arr -> unit
val sub_left : arr -> int -> int -> arr
val reshape_ : out:arr -> arr -> unit
val reverse_ : out:arr -> arr -> unit
val transpose_ : out:arr -> ?axis:int array -> arr -> unit
val repeat_ : out:arr -> arr -> int array -> unit
val tile_ : out:arr -> arr -> int array -> unit
val pad_ : out:arr -> ?v:elt -> int list list -> arr -> unit
val hypot : arr -> arr -> arr
val fmod : arr -> arr -> arr
val min2 : arr -> arr -> arr
val max2 : arr -> arr -> arr
val add_ : ?out:arr -> arr -> arr -> unit
val sub_ : ?out:arr -> arr -> arr -> unit
val mul_ : ?out:arr -> arr -> arr -> unit
val div_ : ?out:arr -> arr -> arr -> unit
val pow_ : ?out:arr -> arr -> arr -> unit
val atan2_ : ?out:arr -> arr -> arr -> unit
val hypot_ : ?out:arr -> arr -> arr -> unit
val fmod_ : ?out:arr -> arr -> arr -> unit
val min2_ : ?out:arr -> arr -> arr -> unit
val max2_ : ?out:arr -> arr -> arr -> unit
val add_scalar_ : ?out:arr -> arr -> elt -> unit
val sub_scalar_ : ?out:arr -> arr -> elt -> unit
val mul_scalar_ : ?out:arr -> arr -> elt -> unit
val div_scalar_ : ?out:arr -> arr -> elt -> unit
val pow_scalar_ : ?out:arr -> arr -> elt -> unit
val atan2_scalar_ : ?out:arr -> arr -> elt -> unit
val fmod_scalar_ : ?out:arr -> arr -> elt -> unit
val scalar_add_ : ?out:arr -> elt -> arr -> unit
val scalar_sub_ : ?out:arr -> elt -> arr -> unit
val scalar_mul_ : ?out:arr -> elt -> arr -> unit
val scalar_div_ : ?out:arr -> elt -> arr -> unit
val scalar_pow_ : ?out:arr -> elt -> arr -> unit
val scalar_atan2_ : ?out:arr -> elt -> arr -> unit
val scalar_fmod_ : ?out:arr -> elt -> arr -> unit
val fma_ : ?out:arr -> arr -> arr -> arr -> unit
val clip_by_value_ : ?out:arr -> ?amin:elt -> ?amax:elt -> arr -> unit
val clip_by_l2norm_ : ?out:arr -> elt -> arr -> unit
val dot_ : - ?transa:bool -> - ?transb:bool -> - ?alpha:elt -> - ?beta:elt -> - c:arr -> - arr -> - arr -> - unit
val abs_ : ?out:arr -> arr -> unit
val neg_ : ?out:arr -> arr -> unit
val conj_ : ?out:arr -> arr -> unit
val reci_ : ?out:arr -> arr -> unit
val signum_ : ?out:arr -> arr -> unit
val sqr_ : ?out:arr -> arr -> unit
val sqrt_ : ?out:arr -> arr -> unit
val cbrt_ : ?out:arr -> arr -> unit
val exp_ : ?out:arr -> arr -> unit
val exp2_ : ?out:arr -> arr -> unit
val exp10_ : ?out:arr -> arr -> unit
val expm1_ : ?out:arr -> arr -> unit
val log_ : ?out:arr -> arr -> unit
val log2_ : ?out:arr -> arr -> unit
val log10_ : ?out:arr -> arr -> unit
val log1p_ : ?out:arr -> arr -> unit
val sin_ : ?out:arr -> arr -> unit
val cos_ : ?out:arr -> arr -> unit
val tan_ : ?out:arr -> arr -> unit
val asin_ : ?out:arr -> arr -> unit
val acos_ : ?out:arr -> arr -> unit
val atan_ : ?out:arr -> arr -> unit
val sinh_ : ?out:arr -> arr -> unit
val cosh_ : ?out:arr -> arr -> unit
val tanh_ : ?out:arr -> arr -> unit
val asinh_ : ?out:arr -> arr -> unit
val acosh_ : ?out:arr -> arr -> unit
val atanh_ : ?out:arr -> arr -> unit
val floor_ : ?out:arr -> arr -> unit
val ceil_ : ?out:arr -> arr -> unit
val round_ : ?out:arr -> arr -> unit
val trunc_ : ?out:arr -> arr -> unit
val fix_ : ?out:arr -> arr -> unit
val erf_ : ?out:arr -> arr -> unit
val erfc_ : ?out:arr -> arr -> unit
val relu_ : ?out:arr -> arr -> unit
val softplus_ : ?out:arr -> arr -> unit
val softsign_ : ?out:arr -> arr -> unit
val softmax_ : ?out:arr -> ?axis:int -> arr -> unit
val sigmoid_ : ?out:arr -> arr -> unit
val sum_ : out:arr -> axis:int -> arr -> unit
val min_ : out:arr -> axis:int -> arr -> unit
val max_ : out:arr -> axis:int -> arr -> unit
val sum : ?axis:int -> ?keep_dims:bool -> arr -> arr
val prod : ?axis:int -> ?keep_dims:bool -> arr -> arr
val min : ?axis:int -> ?keep_dims:bool -> arr -> arr
val max : ?axis:int -> ?keep_dims:bool -> arr -> arr
val mean : ?axis:int -> ?keep_dims:bool -> arr -> arr
val var : ?axis:int -> ?keep_dims:bool -> arr -> arr
val std : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l2norm : ?axis:int -> ?keep_dims:bool -> arr -> arr
val cumsum_ : ?out:arr -> ?axis:int -> arr -> unit
val cumprod_ : ?out:arr -> ?axis:int -> arr -> unit
val cummin_ : ?out:arr -> ?axis:int -> arr -> unit
val cummax_ : ?out:arr -> ?axis:int -> arr -> unit
val dropout_ : ?out:arr -> ?rate:float -> arr -> unit
val prod' : arr -> elt
val mean' : arr -> elt
val var' : arr -> elt
val std' : arr -> elt
val elt_equal_ : ?out:arr -> arr -> arr -> unit
val elt_not_equal_ : ?out:arr -> arr -> arr -> unit
val elt_less_ : ?out:arr -> arr -> arr -> unit
val elt_greater_ : ?out:arr -> arr -> arr -> unit
val elt_less_equal_ : ?out:arr -> arr -> arr -> unit
val elt_greater_equal_ : ?out:arr -> arr -> arr -> unit
val elt_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_not_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_less_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_greater_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_less_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_greater_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val conv1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val conv2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val conv3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val dilated_conv1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val dilated_conv2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val dilated_conv3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val transpose_conv1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val transpose_conv2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val transpose_conv3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val max_pool1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val max_pool2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val max_pool3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val upsampling2d_ : out:arr -> arr -> int array -> unit
val conv1d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv1d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv2d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv2d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv3d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv3d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val dilated_conv1d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv1d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv2d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv2d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv3d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv3d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val transpose_conv1d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv1d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv2d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv2d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv3d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv3d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val max_pool1d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val max_pool2d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val max_pool3d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool1d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool2d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool3d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val upsampling2d_backward_ : out:arr -> arr -> int array -> arr -> unit
val fused_adagrad_ : ?out:arr -> rate:float -> eps:float -> arr -> unit
val uniform_rvs : a:arr -> b:arr -> n:int -> arr
val uniform_pdf : a:arr -> b:arr -> arr -> arr
val uniform_logpdf : a:arr -> b:arr -> arr -> arr
val uniform_cdf : a:arr -> b:arr -> arr -> arr
val uniform_logcdf : a:arr -> b:arr -> arr -> arr
val uniform_ppf : a:arr -> b:arr -> arr -> arr
val uniform_sf : a:arr -> b:arr -> arr -> arr
val uniform_logsf : a:arr -> b:arr -> arr -> arr
val uniform_isf : a:arr -> b:arr -> arr -> arr
val gaussian_rvs : mu:arr -> sigma:arr -> n:int -> arr
val gaussian_pdf : mu:arr -> sigma:arr -> arr -> arr
val gaussian_logpdf : mu:arr -> sigma:arr -> arr -> arr
val gaussian_cdf : mu:arr -> sigma:arr -> arr -> arr
val gaussian_logcdf : mu:arr -> sigma:arr -> arr -> arr
val gaussian_ppf : mu:arr -> sigma:arr -> arr -> arr
val gaussian_sf : mu:arr -> sigma:arr -> arr -> arr
val gaussian_logsf : mu:arr -> sigma:arr -> arr -> arr
val gaussian_isf : mu:arr -> sigma:arr -> arr -> arr
val exponential_rvs : lambda:arr -> n:int -> arr
val exponential_pdf : lambda:arr -> arr -> arr
val exponential_logpdf : lambda:arr -> arr -> arr
val exponential_cdf : lambda:arr -> arr -> arr
val exponential_logcdf : lambda:arr -> arr -> arr
val exponential_ppf : lambda:arr -> arr -> arr
val exponential_sf : lambda:arr -> arr -> arr
val exponential_logsf : lambda:arr -> arr -> arr
val exponential_isf : lambda:arr -> arr -> arr
val poisson_rvs : mu:arr -> n:int -> arr
val gamma_rvs : shape:arr -> scale:arr -> n:int -> arr
val gamma_pdf : shape:arr -> scale:arr -> arr -> arr
val gamma_logpdf : shape:arr -> scale:arr -> arr -> arr
val gamma_cdf : shape:arr -> scale:arr -> arr -> arr
val gamma_logcdf : shape:arr -> scale:arr -> arr -> arr
val gamma_ppf : shape:arr -> scale:arr -> arr -> arr
val gamma_sf : shape:arr -> scale:arr -> arr -> arr
val gamma_logsf : shape:arr -> scale:arr -> arr -> arr
val gamma_isf : shape:arr -> scale:arr -> arr -> arr
val beta_rvs : a:arr -> b:arr -> n:int -> arr
val beta_pdf : a:arr -> b:arr -> arr -> arr
val beta_logpdf : a:arr -> b:arr -> arr -> arr
val beta_cdf : a:arr -> b:arr -> arr -> arr
val beta_logcdf : a:arr -> b:arr -> arr -> arr
val beta_ppf : a:arr -> b:arr -> arr -> arr
val beta_sf : a:arr -> b:arr -> arr -> arr
val beta_logsf : a:arr -> b:arr -> arr -> arr
val beta_isf : a:arr -> b:arr -> arr -> arr
val chi2_rvs : df:arr -> n:int -> arr
val chi2_pdf : df:arr -> arr -> arr
val chi2_logpdf : df:arr -> arr -> arr
val chi2_cdf : df:arr -> arr -> arr
val chi2_logcdf : df:arr -> arr -> arr
val chi2_ppf : df:arr -> arr -> arr
val chi2_sf : df:arr -> arr -> arr
val chi2_logsf : df:arr -> arr -> arr
val chi2_isf : df:arr -> arr -> arr
val f_rvs : dfnum:arr -> dfden:arr -> n:int -> arr
val f_pdf : dfnum:arr -> dfden:arr -> arr -> arr
val f_logpdf : dfnum:arr -> dfden:arr -> arr -> arr
val f_cdf : dfnum:arr -> dfden:arr -> arr -> arr
val f_logcdf : dfnum:arr -> dfden:arr -> arr -> arr
val f_ppf : dfnum:arr -> dfden:arr -> arr -> arr
val f_sf : dfnum:arr -> dfden:arr -> arr -> arr
val f_logsf : dfnum:arr -> dfden:arr -> arr -> arr
val f_isf : dfnum:arr -> dfden:arr -> arr -> arr
val cauchy_rvs : loc:arr -> scale:arr -> n:int -> arr
val cauchy_pdf : loc:arr -> scale:arr -> arr -> arr
val cauchy_logpdf : loc:arr -> scale:arr -> arr -> arr
val cauchy_cdf : loc:arr -> scale:arr -> arr -> arr
val cauchy_logcdf : loc:arr -> scale:arr -> arr -> arr
val cauchy_ppf : loc:arr -> scale:arr -> arr -> arr
val cauchy_sf : loc:arr -> scale:arr -> arr -> arr
val cauchy_logsf : loc:arr -> scale:arr -> arr -> arr
val cauchy_isf : loc:arr -> scale:arr -> arr -> arr
val lomax_rvs : shape:arr -> scale:arr -> n:int -> arr
val lomax_pdf : shape:arr -> scale:arr -> arr -> arr
val lomax_logpdf : shape:arr -> scale:arr -> arr -> arr
val lomax_cdf : shape:arr -> scale:arr -> arr -> arr
val lomax_logcdf : shape:arr -> scale:arr -> arr -> arr
val lomax_ppf : shape:arr -> scale:arr -> arr -> arr
val lomax_sf : shape:arr -> scale:arr -> arr -> arr
val lomax_logsf : shape:arr -> scale:arr -> arr -> arr
val lomax_isf : shape:arr -> scale:arr -> arr -> arr
val weibull_rvs : shape:arr -> scale:arr -> n:int -> arr
val weibull_pdf : shape:arr -> scale:arr -> arr -> arr
val weibull_logpdf : shape:arr -> scale:arr -> arr -> arr
val weibull_cdf : shape:arr -> scale:arr -> arr -> arr
val weibull_logcdf : shape:arr -> scale:arr -> arr -> arr
val weibull_ppf : shape:arr -> scale:arr -> arr -> arr
val weibull_sf : shape:arr -> scale:arr -> arr -> arr
val weibull_logsf : shape:arr -> scale:arr -> arr -> arr
val weibull_isf : shape:arr -> scale:arr -> arr -> arr
val laplace_rvs : loc:arr -> scale:arr -> n:int -> arr
val laplace_pdf : loc:arr -> scale:arr -> arr -> arr
val laplace_logpdf : loc:arr -> scale:arr -> arr -> arr
val laplace_cdf : loc:arr -> scale:arr -> arr -> arr
val laplace_logcdf : loc:arr -> scale:arr -> arr -> arr
val laplace_ppf : loc:arr -> scale:arr -> arr -> arr
val laplace_sf : loc:arr -> scale:arr -> arr -> arr
val laplace_logsf : loc:arr -> scale:arr -> arr -> arr
val laplace_isf : loc:arr -> scale:arr -> arr -> arr
val gumbel1_rvs : a:arr -> b:arr -> n:int -> arr
val gumbel1_pdf : a:arr -> b:arr -> arr -> arr
val gumbel1_logpdf : a:arr -> b:arr -> arr -> arr
val gumbel1_cdf : a:arr -> b:arr -> arr -> arr
val gumbel1_logcdf : a:arr -> b:arr -> arr -> arr
val gumbel1_ppf : a:arr -> b:arr -> arr -> arr
val gumbel1_sf : a:arr -> b:arr -> arr -> arr
val gumbel1_logsf : a:arr -> b:arr -> arr -> arr
val gumbel1_isf : a:arr -> b:arr -> arr -> arr
val gumbel2_rvs : a:arr -> b:arr -> n:int -> arr
val gumbel2_pdf : a:arr -> b:arr -> arr -> arr
val gumbel2_logpdf : a:arr -> b:arr -> arr -> arr
val gumbel2_cdf : a:arr -> b:arr -> arr -> arr
val gumbel2_logcdf : a:arr -> b:arr -> arr -> arr
val gumbel2_ppf : a:arr -> b:arr -> arr -> arr
val gumbel2_sf : a:arr -> b:arr -> arr -> arr
val gumbel2_logsf : a:arr -> b:arr -> arr -> arr
val gumbel2_isf : a:arr -> b:arr -> arr -> arr
val logistic_rvs : loc:arr -> scale:arr -> n:int -> arr
val logistic_pdf : loc:arr -> scale:arr -> arr -> arr
val logistic_logpdf : loc:arr -> scale:arr -> arr -> arr
val logistic_cdf : loc:arr -> scale:arr -> arr -> arr
val logistic_logcdf : loc:arr -> scale:arr -> arr -> arr
val logistic_ppf : loc:arr -> scale:arr -> arr -> arr
val logistic_sf : loc:arr -> scale:arr -> arr -> arr
val logistic_logsf : loc:arr -> scale:arr -> arr -> arr
val logistic_isf : loc:arr -> scale:arr -> arr -> arr
val lognormal_rvs : mu:arr -> sigma:arr -> n:int -> arr
val lognormal_pdf : mu:arr -> sigma:arr -> arr -> arr
val lognormal_logpdf : mu:arr -> sigma:arr -> arr -> arr
val lognormal_cdf : mu:arr -> sigma:arr -> arr -> arr
val lognormal_logcdf : mu:arr -> sigma:arr -> arr -> arr
val lognormal_ppf : mu:arr -> sigma:arr -> arr -> arr
val lognormal_sf : mu:arr -> sigma:arr -> arr -> arr
val lognormal_logsf : mu:arr -> sigma:arr -> arr -> arr
val lognormal_isf : mu:arr -> sigma:arr -> arr -> arr
val rayleigh_rvs : sigma:arr -> n:int -> arr
val rayleigh_pdf : sigma:arr -> arr -> arr
val rayleigh_logpdf : sigma:arr -> arr -> arr
val rayleigh_cdf : sigma:arr -> arr -> arr
val rayleigh_logcdf : sigma:arr -> arr -> arr
val rayleigh_ppf : sigma:arr -> arr -> arr
val rayleigh_sf : sigma:arr -> arr -> arr
val rayleigh_logsf : sigma:arr -> arr -> arr
val rayleigh_isf : sigma:arr -> arr -> arr
\ No newline at end of file diff --git a/owl-base/Owl_types_common/.dummy b/owl-base/Owl_types_common/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl-base/Owl_types_computation_device/.dummy b/owl-base/Owl_types_computation_device/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl-base/Owl_types_computation_device/module-type-Sig/A/Linalg/index.html b/owl-base/Owl_types_computation_device/module-type-Sig/A/Linalg/index.html deleted file mode 100644 index 8b507cbd1..000000000 --- a/owl-base/Owl_types_computation_device/module-type-Sig/A/Linalg/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -Linalg (owl-base.Owl_types_computation_device.Sig.A.Linalg)

Module A.Linalg

val inv : arr -> arr
val logdet : arr -> elt
val chol : ?upper:bool -> arr -> arr
val svd : ?thin:bool -> arr -> arr * arr * arr
val qr : arr -> arr * arr
val lq : arr -> arr * arr
val sylvester : arr -> arr -> arr -> arr
val lyapunov : arr -> arr -> arr
val discrete_lyapunov : - ?solver:[ `default | `bilinear | `direct ] -> - arr -> - arr -> - arr
val linsolve : ?trans:bool -> ?typ:[ `n | `u | `l ] -> arr -> arr -> arr
val care : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
val dare : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
\ No newline at end of file diff --git a/owl-base/Owl_types_computation_device/module-type-Sig/A/Mat/index.html b/owl-base/Owl_types_computation_device/module-type-Sig/A/Mat/index.html deleted file mode 100644 index d14d56ca3..000000000 --- a/owl-base/Owl_types_computation_device/module-type-Sig/A/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl-base.Owl_types_computation_device.Sig.A.Mat)

Module A.Mat

val diagm : ?k:int -> arr -> arr
val triu : ?k:int -> arr -> arr
val tril : ?k:int -> arr -> arr
val eye : int -> arr
\ No newline at end of file diff --git a/owl-base/Owl_types_computation_device/module-type-Sig/A/Scalar/index.html b/owl-base/Owl_types_computation_device/module-type-Sig/A/Scalar/index.html deleted file mode 100644 index aa9d30e6f..000000000 --- a/owl-base/Owl_types_computation_device/module-type-Sig/A/Scalar/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Scalar (owl-base.Owl_types_computation_device.Sig.A.Scalar)

Module A.Scalar

val add : elt -> elt -> elt
val sub : elt -> elt -> elt
val mul : elt -> elt -> elt
val div : elt -> elt -> elt
val pow : elt -> elt -> elt
val atan2 : elt -> elt -> elt
val abs : elt -> elt
val neg : elt -> elt
val sqr : elt -> elt
val sqrt : elt -> elt
val exp : elt -> elt
val log : elt -> elt
val log2 : elt -> elt
val log10 : elt -> elt
val signum : elt -> elt
val floor : elt -> elt
val ceil : elt -> elt
val round : elt -> elt
val sin : elt -> elt
val cos : elt -> elt
val tan : elt -> elt
val sinh : elt -> elt
val cosh : elt -> elt
val tanh : elt -> elt
val asin : elt -> elt
val acos : elt -> elt
val atan : elt -> elt
val asinh : elt -> elt
val acosh : elt -> elt
val atanh : elt -> elt
val relu : elt -> elt
val dawsn : elt -> elt
val sigmoid : elt -> elt
\ No newline at end of file diff --git a/owl-base/Owl_types_computation_device/module-type-Sig/A/index.html b/owl-base/Owl_types_computation_device/module-type-Sig/A/index.html deleted file mode 100644 index c5d349f7a..000000000 --- a/owl-base/Owl_types_computation_device/module-type-Sig/A/index.html +++ /dev/null @@ -1,379 +0,0 @@ - -A (owl-base.Owl_types_computation_device.Sig.A)

Module Sig.A

include Owl_types_ndarray_algodiff.Sig
include Owl_types_ndarray_eltcmp.Sig
include Owl_types_ndarray_basic.Sig
type arr
type elt
val empty : int array -> arr
val zeros : int array -> arr
val ones : int array -> arr
val create : int array -> elt -> arr
val sequential : ?a:elt -> ?step:elt -> int array -> arr
val uniform : ?a:elt -> ?b:elt -> int array -> arr
val gaussian : ?mu:elt -> ?sigma:elt -> int array -> arr
val bernoulli : ?p:elt -> int array -> arr
val init : int array -> (int -> elt) -> arr
val init_nd : int array -> (int array -> elt) -> arr
val shape : arr -> int array
val numel : arr -> int
val get : arr -> int array -> elt
val set : arr -> int array -> elt -> unit
val get_slice : int list list -> arr -> arr
val set_slice : int list list -> arr -> arr -> unit
val get_fancy : Owl_types_common.index list -> arr -> arr
val set_fancy : Owl_types_common.index list -> arr -> arr -> unit
val copy : arr -> arr
val copy_ : out:arr -> arr -> unit
val reset : arr -> unit
val reshape : arr -> int array -> arr
val reverse : arr -> arr
val tile : arr -> int array -> arr
val repeat : arr -> int array -> arr
val concatenate : ?axis:int -> arr array -> arr
val stack : ?axis:int -> arr array -> arr
val split : ?axis:int -> int array -> arr -> arr array
val expand : ?hi:bool -> arr -> int -> arr
val squeeze : ?axis:int array -> arr -> arr
val draw : ?axis:int -> arr -> int -> arr * int array
val map : (elt -> elt) -> arr -> arr
val fold : ?axis:int -> (elt -> elt -> elt) -> elt -> arr -> arr
val scan : ?axis:int -> (elt -> elt -> elt) -> arr -> arr
val one_hot : int -> arr -> arr
val pad : ?v:elt -> int list list -> arr -> arr
val print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(elt -> string) -> - arr -> - unit
val abs : arr -> arr
val neg : arr -> arr
val floor : arr -> arr
val ceil : arr -> arr
val round : arr -> arr
val sqr : arr -> arr
val sqrt : arr -> arr
val log : arr -> arr
val log2 : arr -> arr
val log10 : arr -> arr
val exp : arr -> arr
val sin : arr -> arr
val cos : arr -> arr
val tan : arr -> arr
val sinh : arr -> arr
val cosh : arr -> arr
val tanh : arr -> arr
val asin : arr -> arr
val acos : arr -> arr
val atan : arr -> arr
val asinh : arr -> arr
val acosh : arr -> arr
val atanh : arr -> arr
val sum_reduce : ?axis:int array -> arr -> arr
val signum : arr -> arr
val sigmoid : arr -> arr
val relu : arr -> arr
val dawsn : arr -> arr
val min' : arr -> elt
val max' : arr -> elt
val sum' : arr -> elt
val log_sum_exp' : arr -> elt
val log_sum_exp : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm' : arr -> elt
val l2norm' : arr -> elt
val l2norm_sqr' : arr -> elt
val clip_by_value : ?amin:elt -> ?amax:elt -> arr -> arr
val clip_by_l2norm : elt -> arr -> arr
val pow : arr -> arr -> arr
val scalar_pow : elt -> arr -> arr
val pow_scalar : arr -> elt -> arr
val atan2 : arr -> arr -> arr
val scalar_atan2 : elt -> arr -> arr
val atan2_scalar : arr -> elt -> arr
val add : arr -> arr -> arr
val sub : arr -> arr -> arr
val mul : arr -> arr -> arr
val div : arr -> arr -> arr
val add_scalar : arr -> elt -> arr
val sub_scalar : arr -> elt -> arr
val mul_scalar : arr -> elt -> arr
val div_scalar : arr -> elt -> arr
val scalar_add : elt -> arr -> arr
val scalar_sub : elt -> arr -> arr
val scalar_mul : elt -> arr -> arr
val scalar_div : elt -> arr -> arr
val fma : arr -> arr -> arr -> arr
val conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val dilated_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val transpose_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val max_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val upsampling2d : arr -> int array -> arr
val conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val dilated_conv1d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv1d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val transpose_conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val max_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val upsampling2d_backward : arr -> int array -> arr -> arr
val row_num : arr -> int
val col_num : arr -> int
val row : arr -> int -> arr
val rows : arr -> int array -> arr
val copy_row_to : arr -> arr -> int -> unit
val copy_col_to : arr -> arr -> int -> unit
val dot : arr -> arr -> arr
val diag : ?k:int -> arr -> arr
val trace : arr -> elt
val transpose : ?axis:int array -> arr -> arr
val to_rows : arr -> arr array
val of_rows : arr array -> arr
val to_cols : arr -> arr array
val of_cols : arr array -> arr
val of_array : elt array -> int array -> arr
val of_arrays : elt array array -> arr
val float_to_elt : float -> elt
val elt_to_float : elt -> float
val elt_equal : arr -> arr -> arr
val elt_not_equal : arr -> arr -> arr
val elt_less : arr -> arr -> arr
val elt_greater : arr -> arr -> arr
val elt_less_equal : arr -> arr -> arr
val elt_greater_equal : arr -> arr -> arr
val elt_equal_scalar : arr -> elt -> arr
val elt_not_equal_scalar : arr -> elt -> arr
val elt_less_scalar : arr -> elt -> arr
val elt_greater_scalar : arr -> elt -> arr
val elt_less_equal_scalar : arr -> elt -> arr
val elt_greater_equal_scalar : arr -> elt -> arr
module Scalar : sig ... end
module Mat : sig ... end
module Linalg : sig ... end
val create_ : out:arr -> elt -> unit
val uniform_ : ?a:elt -> ?b:elt -> out:arr -> unit
val gaussian_ : ?mu:elt -> ?sigma:elt -> out:arr -> unit
val sequential_ : ?a:elt -> ?step:elt -> out:arr -> unit
val bernoulli_ : ?p:elt -> out:arr -> unit
val zeros_ : out:arr -> unit
val ones_ : out:arr -> unit
val one_hot_ : out:arr -> int -> arr -> unit
val get_slice_ : out:arr -> int list list -> arr -> unit
val set_slice_ : out:arr -> int list list -> arr -> arr -> unit
val sub_left : arr -> int -> int -> arr
val reshape_ : out:arr -> arr -> unit
val reverse_ : out:arr -> arr -> unit
val transpose_ : out:arr -> ?axis:int array -> arr -> unit
val repeat_ : out:arr -> arr -> int array -> unit
val tile_ : out:arr -> arr -> int array -> unit
val pad_ : out:arr -> ?v:elt -> int list list -> arr -> unit
val hypot : arr -> arr -> arr
val fmod : arr -> arr -> arr
val min2 : arr -> arr -> arr
val max2 : arr -> arr -> arr
val add_ : ?out:arr -> arr -> arr -> unit
val sub_ : ?out:arr -> arr -> arr -> unit
val mul_ : ?out:arr -> arr -> arr -> unit
val div_ : ?out:arr -> arr -> arr -> unit
val pow_ : ?out:arr -> arr -> arr -> unit
val atan2_ : ?out:arr -> arr -> arr -> unit
val hypot_ : ?out:arr -> arr -> arr -> unit
val fmod_ : ?out:arr -> arr -> arr -> unit
val min2_ : ?out:arr -> arr -> arr -> unit
val max2_ : ?out:arr -> arr -> arr -> unit
val add_scalar_ : ?out:arr -> arr -> elt -> unit
val sub_scalar_ : ?out:arr -> arr -> elt -> unit
val mul_scalar_ : ?out:arr -> arr -> elt -> unit
val div_scalar_ : ?out:arr -> arr -> elt -> unit
val pow_scalar_ : ?out:arr -> arr -> elt -> unit
val atan2_scalar_ : ?out:arr -> arr -> elt -> unit
val fmod_scalar_ : ?out:arr -> arr -> elt -> unit
val scalar_add_ : ?out:arr -> elt -> arr -> unit
val scalar_sub_ : ?out:arr -> elt -> arr -> unit
val scalar_mul_ : ?out:arr -> elt -> arr -> unit
val scalar_div_ : ?out:arr -> elt -> arr -> unit
val scalar_pow_ : ?out:arr -> elt -> arr -> unit
val scalar_atan2_ : ?out:arr -> elt -> arr -> unit
val scalar_fmod_ : ?out:arr -> elt -> arr -> unit
val fma_ : ?out:arr -> arr -> arr -> arr -> unit
val clip_by_value_ : ?out:arr -> ?amin:elt -> ?amax:elt -> arr -> unit
val clip_by_l2norm_ : ?out:arr -> elt -> arr -> unit
val dot_ : - ?transa:bool -> - ?transb:bool -> - ?alpha:elt -> - ?beta:elt -> - c:arr -> - arr -> - arr -> - unit
val abs_ : ?out:arr -> arr -> unit
val neg_ : ?out:arr -> arr -> unit
val conj_ : ?out:arr -> arr -> unit
val reci_ : ?out:arr -> arr -> unit
val signum_ : ?out:arr -> arr -> unit
val sqr_ : ?out:arr -> arr -> unit
val sqrt_ : ?out:arr -> arr -> unit
val cbrt_ : ?out:arr -> arr -> unit
val exp_ : ?out:arr -> arr -> unit
val exp2_ : ?out:arr -> arr -> unit
val exp10_ : ?out:arr -> arr -> unit
val expm1_ : ?out:arr -> arr -> unit
val log_ : ?out:arr -> arr -> unit
val log2_ : ?out:arr -> arr -> unit
val log10_ : ?out:arr -> arr -> unit
val log1p_ : ?out:arr -> arr -> unit
val sin_ : ?out:arr -> arr -> unit
val cos_ : ?out:arr -> arr -> unit
val tan_ : ?out:arr -> arr -> unit
val asin_ : ?out:arr -> arr -> unit
val acos_ : ?out:arr -> arr -> unit
val atan_ : ?out:arr -> arr -> unit
val sinh_ : ?out:arr -> arr -> unit
val cosh_ : ?out:arr -> arr -> unit
val tanh_ : ?out:arr -> arr -> unit
val asinh_ : ?out:arr -> arr -> unit
val acosh_ : ?out:arr -> arr -> unit
val atanh_ : ?out:arr -> arr -> unit
val floor_ : ?out:arr -> arr -> unit
val ceil_ : ?out:arr -> arr -> unit
val round_ : ?out:arr -> arr -> unit
val trunc_ : ?out:arr -> arr -> unit
val fix_ : ?out:arr -> arr -> unit
val erf_ : ?out:arr -> arr -> unit
val erfc_ : ?out:arr -> arr -> unit
val relu_ : ?out:arr -> arr -> unit
val softplus_ : ?out:arr -> arr -> unit
val softsign_ : ?out:arr -> arr -> unit
val softmax_ : ?out:arr -> ?axis:int -> arr -> unit
val sigmoid_ : ?out:arr -> arr -> unit
val sum_ : out:arr -> axis:int -> arr -> unit
val min_ : out:arr -> axis:int -> arr -> unit
val max_ : out:arr -> axis:int -> arr -> unit
val sum : ?axis:int -> ?keep_dims:bool -> arr -> arr
val prod : ?axis:int -> ?keep_dims:bool -> arr -> arr
val min : ?axis:int -> ?keep_dims:bool -> arr -> arr
val max : ?axis:int -> ?keep_dims:bool -> arr -> arr
val mean : ?axis:int -> ?keep_dims:bool -> arr -> arr
val var : ?axis:int -> ?keep_dims:bool -> arr -> arr
val std : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l2norm : ?axis:int -> ?keep_dims:bool -> arr -> arr
val cumsum_ : ?out:arr -> ?axis:int -> arr -> unit
val cumprod_ : ?out:arr -> ?axis:int -> arr -> unit
val cummin_ : ?out:arr -> ?axis:int -> arr -> unit
val cummax_ : ?out:arr -> ?axis:int -> arr -> unit
val dropout_ : ?out:arr -> ?rate:float -> arr -> unit
val prod' : arr -> elt
val mean' : arr -> elt
val var' : arr -> elt
val std' : arr -> elt
val elt_equal_ : ?out:arr -> arr -> arr -> unit
val elt_not_equal_ : ?out:arr -> arr -> arr -> unit
val elt_less_ : ?out:arr -> arr -> arr -> unit
val elt_greater_ : ?out:arr -> arr -> arr -> unit
val elt_less_equal_ : ?out:arr -> arr -> arr -> unit
val elt_greater_equal_ : ?out:arr -> arr -> arr -> unit
val elt_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_not_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_less_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_greater_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_less_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_greater_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val conv1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val conv2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val conv3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val dilated_conv1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val dilated_conv2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val dilated_conv3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val transpose_conv1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val transpose_conv2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val transpose_conv3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val max_pool1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val max_pool2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val max_pool3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val upsampling2d_ : out:arr -> arr -> int array -> unit
val conv1d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv1d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv2d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv2d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv3d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv3d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val dilated_conv1d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv1d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv2d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv2d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv3d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv3d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val transpose_conv1d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv1d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv2d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv2d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv3d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv3d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val max_pool1d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val max_pool2d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val max_pool3d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool1d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool2d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool3d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val upsampling2d_backward_ : out:arr -> arr -> int array -> arr -> unit
val fused_adagrad_ : ?out:arr -> rate:float -> eps:float -> arr -> unit
\ No newline at end of file diff --git a/owl-base/Owl_types_computation_device/module-type-Sig/index.html b/owl-base/Owl_types_computation_device/module-type-Sig/index.html deleted file mode 100644 index bbad4dbba..000000000 --- a/owl-base/Owl_types_computation_device/module-type-Sig/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Sig (owl-base.Owl_types_computation_device.Sig)

Module type Owl_types_computation_device.Sig

Type definition
type device

TODO

type value

TODO

Core functions
val make_device : unit -> device

TODO

val arr_to_value : A.arr -> value

TODO

val value_to_arr : value -> A.arr

TODO

val elt_to_value : A.elt -> value

TODO

val value_to_elt : value -> A.elt

TODO

val value_to_float : value -> float

TODO

val is_arr : value -> bool

TODO

val is_elt : value -> bool

TODO

\ No newline at end of file diff --git a/owl-base/Owl_types_computation_engine/.dummy b/owl-base/Owl_types_computation_engine/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl-base/Owl_types_computation_engine/module-type-Sig/Graph/Optimiser/Operator/Linalg/index.html b/owl-base/Owl_types_computation_engine/module-type-Sig/Graph/Optimiser/Operator/Linalg/index.html deleted file mode 100644 index 41f5aeaaf..000000000 --- a/owl-base/Owl_types_computation_engine/module-type-Sig/Graph/Optimiser/Operator/Linalg/index.html +++ /dev/null @@ -1,33 +0,0 @@ - -Linalg (owl-base.Owl_types_computation_engine.Sig.Graph.Optimiser.Operator.Linalg)

Module Operator.Linalg

\ No newline at end of file diff --git a/owl-base/Owl_types_computation_engine/module-type-Sig/Graph/Optimiser/Operator/Mat/index.html b/owl-base/Owl_types_computation_engine/module-type-Sig/Graph/Optimiser/Operator/Mat/index.html deleted file mode 100644 index 2ecb0b451..000000000 --- a/owl-base/Owl_types_computation_engine/module-type-Sig/Graph/Optimiser/Operator/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl-base.Owl_types_computation_engine.Sig.Graph.Optimiser.Operator.Mat)

Module Operator.Mat

val eye : int -> Symbol.Shape.Type.arr

TODO

TODO

TODO

TODO

\ No newline at end of file diff --git a/owl-base/Owl_types_computation_engine/module-type-Sig/Graph/Optimiser/Operator/Scalar/index.html b/owl-base/Owl_types_computation_engine/module-type-Sig/Graph/Optimiser/Operator/Scalar/index.html deleted file mode 100644 index 21ddff802..000000000 --- a/owl-base/Owl_types_computation_engine/module-type-Sig/Graph/Optimiser/Operator/Scalar/index.html +++ /dev/null @@ -1,20 +0,0 @@ - -Scalar (owl-base.Owl_types_computation_engine.Sig.Graph.Optimiser.Operator.Scalar)

Module Operator.Scalar

\ No newline at end of file diff --git a/owl-base/Owl_types_computation_engine/module-type-Sig/Graph/Optimiser/Operator/Symbol/Shape/Type/Device/A/Linalg/index.html b/owl-base/Owl_types_computation_engine/module-type-Sig/Graph/Optimiser/Operator/Symbol/Shape/Type/Device/A/Linalg/index.html deleted file mode 100644 index f3a4234d9..000000000 --- a/owl-base/Owl_types_computation_engine/module-type-Sig/Graph/Optimiser/Operator/Symbol/Shape/Type/Device/A/Linalg/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -Linalg (owl-base.Owl_types_computation_engine.Sig.Graph.Optimiser.Operator.Symbol.Shape.Type.Device.A.Linalg)

Module A.Linalg

val inv : arr -> arr
val logdet : arr -> elt
val chol : ?upper:bool -> arr -> arr
val svd : ?thin:bool -> arr -> arr * arr * arr
val qr : arr -> arr * arr
val lq : arr -> arr * arr
val sylvester : arr -> arr -> arr -> arr
val lyapunov : arr -> arr -> arr
val discrete_lyapunov : - ?solver:[ `default | `bilinear | `direct ] -> - arr -> - arr -> - arr
val linsolve : ?trans:bool -> ?typ:[ `n | `u | `l ] -> arr -> arr -> arr
val care : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
val dare : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
\ No newline at end of file diff --git a/owl-base/Owl_types_computation_engine/module-type-Sig/Graph/Optimiser/Operator/Symbol/Shape/Type/Device/A/Mat/index.html b/owl-base/Owl_types_computation_engine/module-type-Sig/Graph/Optimiser/Operator/Symbol/Shape/Type/Device/A/Mat/index.html deleted file mode 100644 index fb1f3647f..000000000 --- a/owl-base/Owl_types_computation_engine/module-type-Sig/Graph/Optimiser/Operator/Symbol/Shape/Type/Device/A/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl-base.Owl_types_computation_engine.Sig.Graph.Optimiser.Operator.Symbol.Shape.Type.Device.A.Mat)

Module A.Mat

val diagm : ?k:int -> arr -> arr
val triu : ?k:int -> arr -> arr
val tril : ?k:int -> arr -> arr
val eye : int -> arr
\ No newline at end of file diff --git a/owl-base/Owl_types_computation_engine/module-type-Sig/Graph/Optimiser/Operator/Symbol/Shape/Type/Device/A/Scalar/index.html b/owl-base/Owl_types_computation_engine/module-type-Sig/Graph/Optimiser/Operator/Symbol/Shape/Type/Device/A/Scalar/index.html deleted file mode 100644 index e868415c3..000000000 --- a/owl-base/Owl_types_computation_engine/module-type-Sig/Graph/Optimiser/Operator/Symbol/Shape/Type/Device/A/Scalar/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Scalar (owl-base.Owl_types_computation_engine.Sig.Graph.Optimiser.Operator.Symbol.Shape.Type.Device.A.Scalar)

Module A.Scalar

val add : elt -> elt -> elt
val sub : elt -> elt -> elt
val mul : elt -> elt -> elt
val div : elt -> elt -> elt
val pow : elt -> elt -> elt
val atan2 : elt -> elt -> elt
val abs : elt -> elt
val neg : elt -> elt
val sqr : elt -> elt
val sqrt : elt -> elt
val exp : elt -> elt
val log : elt -> elt
val log2 : elt -> elt
val log10 : elt -> elt
val signum : elt -> elt
val floor : elt -> elt
val ceil : elt -> elt
val round : elt -> elt
val sin : elt -> elt
val cos : elt -> elt
val tan : elt -> elt
val sinh : elt -> elt
val cosh : elt -> elt
val tanh : elt -> elt
val asin : elt -> elt
val acos : elt -> elt
val atan : elt -> elt
val asinh : elt -> elt
val acosh : elt -> elt
val atanh : elt -> elt
val relu : elt -> elt
val dawsn : elt -> elt
val sigmoid : elt -> elt
\ No newline at end of file diff --git a/owl-base/Owl_types_computation_engine/module-type-Sig/Graph/Optimiser/Operator/Symbol/Shape/Type/Device/A/index.html b/owl-base/Owl_types_computation_engine/module-type-Sig/Graph/Optimiser/Operator/Symbol/Shape/Type/Device/A/index.html deleted file mode 100644 index 1c4b06030..000000000 --- a/owl-base/Owl_types_computation_engine/module-type-Sig/Graph/Optimiser/Operator/Symbol/Shape/Type/Device/A/index.html +++ /dev/null @@ -1,379 +0,0 @@ - -A (owl-base.Owl_types_computation_engine.Sig.Graph.Optimiser.Operator.Symbol.Shape.Type.Device.A)

Module Device.A

include Owl_types_ndarray_algodiff.Sig
include Owl_types_ndarray_eltcmp.Sig
include Owl_types_ndarray_basic.Sig
type arr
type elt
val empty : int array -> arr
val zeros : int array -> arr
val ones : int array -> arr
val create : int array -> elt -> arr
val sequential : ?a:elt -> ?step:elt -> int array -> arr
val uniform : ?a:elt -> ?b:elt -> int array -> arr
val gaussian : ?mu:elt -> ?sigma:elt -> int array -> arr
val bernoulli : ?p:elt -> int array -> arr
val init : int array -> (int -> elt) -> arr
val init_nd : int array -> (int array -> elt) -> arr
val shape : arr -> int array
val numel : arr -> int
val get : arr -> int array -> elt
val set : arr -> int array -> elt -> unit
val get_slice : int list list -> arr -> arr
val set_slice : int list list -> arr -> arr -> unit
val get_fancy : Owl_types_common.index list -> arr -> arr
val set_fancy : Owl_types_common.index list -> arr -> arr -> unit
val copy : arr -> arr
val copy_ : out:arr -> arr -> unit
val reset : arr -> unit
val reshape : arr -> int array -> arr
val reverse : arr -> arr
val tile : arr -> int array -> arr
val repeat : arr -> int array -> arr
val concatenate : ?axis:int -> arr array -> arr
val stack : ?axis:int -> arr array -> arr
val split : ?axis:int -> int array -> arr -> arr array
val expand : ?hi:bool -> arr -> int -> arr
val squeeze : ?axis:int array -> arr -> arr
val draw : ?axis:int -> arr -> int -> arr * int array
val map : (elt -> elt) -> arr -> arr
val fold : ?axis:int -> (elt -> elt -> elt) -> elt -> arr -> arr
val scan : ?axis:int -> (elt -> elt -> elt) -> arr -> arr
val one_hot : int -> arr -> arr
val pad : ?v:elt -> int list list -> arr -> arr
val print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(elt -> string) -> - arr -> - unit
val abs : arr -> arr
val neg : arr -> arr
val floor : arr -> arr
val ceil : arr -> arr
val round : arr -> arr
val sqr : arr -> arr
val sqrt : arr -> arr
val log : arr -> arr
val log2 : arr -> arr
val log10 : arr -> arr
val exp : arr -> arr
val sin : arr -> arr
val cos : arr -> arr
val tan : arr -> arr
val sinh : arr -> arr
val cosh : arr -> arr
val tanh : arr -> arr
val asin : arr -> arr
val acos : arr -> arr
val atan : arr -> arr
val asinh : arr -> arr
val acosh : arr -> arr
val atanh : arr -> arr
val sum_reduce : ?axis:int array -> arr -> arr
val signum : arr -> arr
val sigmoid : arr -> arr
val relu : arr -> arr
val dawsn : arr -> arr
val min' : arr -> elt
val max' : arr -> elt
val sum' : arr -> elt
val log_sum_exp' : arr -> elt
val log_sum_exp : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm' : arr -> elt
val l2norm' : arr -> elt
val l2norm_sqr' : arr -> elt
val clip_by_value : ?amin:elt -> ?amax:elt -> arr -> arr
val clip_by_l2norm : elt -> arr -> arr
val pow : arr -> arr -> arr
val scalar_pow : elt -> arr -> arr
val pow_scalar : arr -> elt -> arr
val atan2 : arr -> arr -> arr
val scalar_atan2 : elt -> arr -> arr
val atan2_scalar : arr -> elt -> arr
val add : arr -> arr -> arr
val sub : arr -> arr -> arr
val mul : arr -> arr -> arr
val div : arr -> arr -> arr
val add_scalar : arr -> elt -> arr
val sub_scalar : arr -> elt -> arr
val mul_scalar : arr -> elt -> arr
val div_scalar : arr -> elt -> arr
val scalar_add : elt -> arr -> arr
val scalar_sub : elt -> arr -> arr
val scalar_mul : elt -> arr -> arr
val scalar_div : elt -> arr -> arr
val fma : arr -> arr -> arr -> arr
val conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val dilated_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val transpose_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val max_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val upsampling2d : arr -> int array -> arr
val conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val dilated_conv1d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv1d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val transpose_conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val max_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val upsampling2d_backward : arr -> int array -> arr -> arr
val row_num : arr -> int
val col_num : arr -> int
val row : arr -> int -> arr
val rows : arr -> int array -> arr
val copy_row_to : arr -> arr -> int -> unit
val copy_col_to : arr -> arr -> int -> unit
val dot : arr -> arr -> arr
val diag : ?k:int -> arr -> arr
val trace : arr -> elt
val transpose : ?axis:int array -> arr -> arr
val to_rows : arr -> arr array
val of_rows : arr array -> arr
val to_cols : arr -> arr array
val of_cols : arr array -> arr
val of_array : elt array -> int array -> arr
val of_arrays : elt array array -> arr
val float_to_elt : float -> elt
val elt_to_float : elt -> float
val elt_equal : arr -> arr -> arr
val elt_not_equal : arr -> arr -> arr
val elt_less : arr -> arr -> arr
val elt_greater : arr -> arr -> arr
val elt_less_equal : arr -> arr -> arr
val elt_greater_equal : arr -> arr -> arr
val elt_equal_scalar : arr -> elt -> arr
val elt_not_equal_scalar : arr -> elt -> arr
val elt_less_scalar : arr -> elt -> arr
val elt_greater_scalar : arr -> elt -> arr
val elt_less_equal_scalar : arr -> elt -> arr
val elt_greater_equal_scalar : arr -> elt -> arr
module Scalar : sig ... end
module Mat : sig ... end
module Linalg : sig ... end
val create_ : out:arr -> elt -> unit
val uniform_ : ?a:elt -> ?b:elt -> out:arr -> unit
val gaussian_ : ?mu:elt -> ?sigma:elt -> out:arr -> unit
val sequential_ : ?a:elt -> ?step:elt -> out:arr -> unit
val bernoulli_ : ?p:elt -> out:arr -> unit
val zeros_ : out:arr -> unit
val ones_ : out:arr -> unit
val one_hot_ : out:arr -> int -> arr -> unit
val get_slice_ : out:arr -> int list list -> arr -> unit
val set_slice_ : out:arr -> int list list -> arr -> arr -> unit
val sub_left : arr -> int -> int -> arr
val reshape_ : out:arr -> arr -> unit
val reverse_ : out:arr -> arr -> unit
val transpose_ : out:arr -> ?axis:int array -> arr -> unit
val repeat_ : out:arr -> arr -> int array -> unit
val tile_ : out:arr -> arr -> int array -> unit
val pad_ : out:arr -> ?v:elt -> int list list -> arr -> unit
val hypot : arr -> arr -> arr
val fmod : arr -> arr -> arr
val min2 : arr -> arr -> arr
val max2 : arr -> arr -> arr
val add_ : ?out:arr -> arr -> arr -> unit
val sub_ : ?out:arr -> arr -> arr -> unit
val mul_ : ?out:arr -> arr -> arr -> unit
val div_ : ?out:arr -> arr -> arr -> unit
val pow_ : ?out:arr -> arr -> arr -> unit
val atan2_ : ?out:arr -> arr -> arr -> unit
val hypot_ : ?out:arr -> arr -> arr -> unit
val fmod_ : ?out:arr -> arr -> arr -> unit
val min2_ : ?out:arr -> arr -> arr -> unit
val max2_ : ?out:arr -> arr -> arr -> unit
val add_scalar_ : ?out:arr -> arr -> elt -> unit
val sub_scalar_ : ?out:arr -> arr -> elt -> unit
val mul_scalar_ : ?out:arr -> arr -> elt -> unit
val div_scalar_ : ?out:arr -> arr -> elt -> unit
val pow_scalar_ : ?out:arr -> arr -> elt -> unit
val atan2_scalar_ : ?out:arr -> arr -> elt -> unit
val fmod_scalar_ : ?out:arr -> arr -> elt -> unit
val scalar_add_ : ?out:arr -> elt -> arr -> unit
val scalar_sub_ : ?out:arr -> elt -> arr -> unit
val scalar_mul_ : ?out:arr -> elt -> arr -> unit
val scalar_div_ : ?out:arr -> elt -> arr -> unit
val scalar_pow_ : ?out:arr -> elt -> arr -> unit
val scalar_atan2_ : ?out:arr -> elt -> arr -> unit
val scalar_fmod_ : ?out:arr -> elt -> arr -> unit
val fma_ : ?out:arr -> arr -> arr -> arr -> unit
val clip_by_value_ : ?out:arr -> ?amin:elt -> ?amax:elt -> arr -> unit
val clip_by_l2norm_ : ?out:arr -> elt -> arr -> unit
val dot_ : - ?transa:bool -> - ?transb:bool -> - ?alpha:elt -> - ?beta:elt -> - c:arr -> - arr -> - arr -> - unit
val abs_ : ?out:arr -> arr -> unit
val neg_ : ?out:arr -> arr -> unit
val conj_ : ?out:arr -> arr -> unit
val reci_ : ?out:arr -> arr -> unit
val signum_ : ?out:arr -> arr -> unit
val sqr_ : ?out:arr -> arr -> unit
val sqrt_ : ?out:arr -> arr -> unit
val cbrt_ : ?out:arr -> arr -> unit
val exp_ : ?out:arr -> arr -> unit
val exp2_ : ?out:arr -> arr -> unit
val exp10_ : ?out:arr -> arr -> unit
val expm1_ : ?out:arr -> arr -> unit
val log_ : ?out:arr -> arr -> unit
val log2_ : ?out:arr -> arr -> unit
val log10_ : ?out:arr -> arr -> unit
val log1p_ : ?out:arr -> arr -> unit
val sin_ : ?out:arr -> arr -> unit
val cos_ : ?out:arr -> arr -> unit
val tan_ : ?out:arr -> arr -> unit
val asin_ : ?out:arr -> arr -> unit
val acos_ : ?out:arr -> arr -> unit
val atan_ : ?out:arr -> arr -> unit
val sinh_ : ?out:arr -> arr -> unit
val cosh_ : ?out:arr -> arr -> unit
val tanh_ : ?out:arr -> arr -> unit
val asinh_ : ?out:arr -> arr -> unit
val acosh_ : ?out:arr -> arr -> unit
val atanh_ : ?out:arr -> arr -> unit
val floor_ : ?out:arr -> arr -> unit
val ceil_ : ?out:arr -> arr -> unit
val round_ : ?out:arr -> arr -> unit
val trunc_ : ?out:arr -> arr -> unit
val fix_ : ?out:arr -> arr -> unit
val erf_ : ?out:arr -> arr -> unit
val erfc_ : ?out:arr -> arr -> unit
val relu_ : ?out:arr -> arr -> unit
val softplus_ : ?out:arr -> arr -> unit
val softsign_ : ?out:arr -> arr -> unit
val softmax_ : ?out:arr -> ?axis:int -> arr -> unit
val sigmoid_ : ?out:arr -> arr -> unit
val sum_ : out:arr -> axis:int -> arr -> unit
val min_ : out:arr -> axis:int -> arr -> unit
val max_ : out:arr -> axis:int -> arr -> unit
val sum : ?axis:int -> ?keep_dims:bool -> arr -> arr
val prod : ?axis:int -> ?keep_dims:bool -> arr -> arr
val min : ?axis:int -> ?keep_dims:bool -> arr -> arr
val max : ?axis:int -> ?keep_dims:bool -> arr -> arr
val mean : ?axis:int -> ?keep_dims:bool -> arr -> arr
val var : ?axis:int -> ?keep_dims:bool -> arr -> arr
val std : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l2norm : ?axis:int -> ?keep_dims:bool -> arr -> arr
val cumsum_ : ?out:arr -> ?axis:int -> arr -> unit
val cumprod_ : ?out:arr -> ?axis:int -> arr -> unit
val cummin_ : ?out:arr -> ?axis:int -> arr -> unit
val cummax_ : ?out:arr -> ?axis:int -> arr -> unit
val dropout_ : ?out:arr -> ?rate:float -> arr -> unit
val prod' : arr -> elt
val mean' : arr -> elt
val var' : arr -> elt
val std' : arr -> elt
val elt_equal_ : ?out:arr -> arr -> arr -> unit
val elt_not_equal_ : ?out:arr -> arr -> arr -> unit
val elt_less_ : ?out:arr -> arr -> arr -> unit
val elt_greater_ : ?out:arr -> arr -> arr -> unit
val elt_less_equal_ : ?out:arr -> arr -> arr -> unit
val elt_greater_equal_ : ?out:arr -> arr -> arr -> unit
val elt_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_not_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_less_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_greater_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_less_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_greater_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val conv1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val conv2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val conv3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val dilated_conv1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val dilated_conv2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val dilated_conv3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val transpose_conv1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val transpose_conv2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val transpose_conv3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val max_pool1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val max_pool2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val max_pool3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val upsampling2d_ : out:arr -> arr -> int array -> unit
val conv1d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv1d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv2d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv2d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv3d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv3d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val dilated_conv1d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv1d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv2d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv2d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv3d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv3d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val transpose_conv1d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv1d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv2d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv2d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv3d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv3d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val max_pool1d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val max_pool2d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val max_pool3d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool1d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool2d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool3d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val upsampling2d_backward_ : out:arr -> arr -> int array -> arr -> unit
val fused_adagrad_ : ?out:arr -> rate:float -> eps:float -> arr -> unit
\ No newline at end of file diff --git a/owl-base/Owl_types_computation_engine/module-type-Sig/Graph/Optimiser/Operator/Symbol/Shape/Type/Device/index.html b/owl-base/Owl_types_computation_engine/module-type-Sig/Graph/Optimiser/Operator/Symbol/Shape/Type/Device/index.html deleted file mode 100644 index 35b16f588..000000000 --- a/owl-base/Owl_types_computation_engine/module-type-Sig/Graph/Optimiser/Operator/Symbol/Shape/Type/Device/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Device (owl-base.Owl_types_computation_engine.Sig.Graph.Optimiser.Operator.Symbol.Shape.Type.Device)

Module Type.Device

Type definition
type device

TODO

type value

TODO

Core functions
val make_device : unit -> device

TODO

val arr_to_value : A.arr -> value

TODO

val value_to_arr : value -> A.arr

TODO

val elt_to_value : A.elt -> value

TODO

val value_to_elt : value -> A.elt

TODO

val value_to_float : value -> float

TODO

val is_arr : value -> bool

TODO

val is_elt : value -> bool

TODO

\ No newline at end of file diff --git a/owl-base/Owl_types_computation_engine/module-type-Sig/Graph/Optimiser/Operator/Symbol/Shape/Type/index.html b/owl-base/Owl_types_computation_engine/module-type-Sig/Graph/Optimiser/Operator/Symbol/Shape/Type/index.html deleted file mode 100644 index 9d3ef58e6..000000000 --- a/owl-base/Owl_types_computation_engine/module-type-Sig/Graph/Optimiser/Operator/Symbol/Shape/Type/index.html +++ /dev/null @@ -1,5 +0,0 @@ - -Type (owl-base.Owl_types_computation_engine.Sig.Graph.Optimiser.Operator.Symbol.Shape.Type)

Module Shape.Type

Type definition
type state =
  1. | Valid
  2. | Invalid
    (*

    TODO

    *)

TODO

and block = {
  1. size : int;
  2. block_id : int;
  3. mutable active : t option;
  4. mutable memory : Device.value;
  5. mutable nodes : t list;
}

block type keeps a reference to a block of memory and to the nodes sharing that block.

and attr = {
  1. mutable op : op;
  2. mutable freeze : bool;
  3. mutable reuse : bool;
  4. mutable state : state;
  5. mutable shape : int array option array;
  6. mutable value : Device.value array;
  7. mutable block : block array option;
}

TODO

and arr =
  1. | Arr of t
and elt =
  1. | Elt of t
and op =
  1. | Noop
  2. | Var
  3. | Const
  4. | Empty of int array
  5. | Zeros of int array
  6. | Ones of int array
  7. | Create of int array
  8. | Sequential of int array
  9. | Uniform of int array
  10. | Gaussian of int array
  11. | Bernoulli of int array
  12. | Init of int array * int -> elt
  13. | Get of int array
  14. | Set of int array
  15. | GetSlice of int list list
  16. | SetSlice of int list list
  17. | GetFancy of Owl_types_common.index list
  18. | SetFancy of Owl_types_common.index list
  19. | Copy
  20. | Reset
  21. | Reshape of int array
  22. | Reverse
  23. | Tile of int array
  24. | Repeat of int array
  25. | Pad of elt * int list list
  26. | Concatenate of int
  27. | Stack of int
  28. | Split of int * int array
  29. | Draw of int * int
  30. | Map of elt -> elt
  31. | Fold of int * elt -> elt -> elt
  32. | Scan of int * elt -> elt -> elt
  33. | OneHot of int
  34. | OfArray of int array
  35. | Delay of Device.A.arr -> Device.A.arr
  36. | DelayArray of int array * Device.A.arr array -> Device.A.arr
  37. | LazyPrint of int option - * int option - * bool option - * (Device.A.elt -> string) option
  38. | Abs
  39. | Neg
  40. | Floor
  41. | Ceil
  42. | Round
  43. | Sqr
  44. | Sqrt
  45. | Log
  46. | Log2
  47. | Log10
  48. | Exp
  49. | Sin
  50. | Cos
  51. | Tan
  52. | Sinh
  53. | Cosh
  54. | Tanh
  55. | Asin
  56. | Acos
  57. | Atan
  58. | Asinh
  59. | Acosh
  60. | Atanh
  61. | Min of bool * int
  62. | Max of bool * int
  63. | Sum of bool * int
  64. | SumReduce of int array
  65. | Signum
  66. | Sigmoid
  67. | Relu
  68. | Dawsn
  69. | Min'
  70. | Max'
  71. | Sum'
  72. | LogSumExp'
  73. | LogSumExp of bool * int
  74. | L1norm'
  75. | L2norm'
  76. | L2NormSqr'
  77. | ClipByValue
  78. | ClipByL2norm
  79. | Pow
  80. | ScalarPow
  81. | PowScalar
  82. | Atan2
  83. | ScalarAtan2
  84. | Atan2Scalar
  85. | Hypot
  86. | Min2
  87. | Max2
  88. | Add
  89. | Sub
  90. | Mul
  91. | Div
  92. | AddScalar
  93. | SubScalar
  94. | MulScalar
  95. | DivScalar
  96. | ScalarAdd
  97. | ScalarSub
  98. | ScalarMul
  99. | ScalarDiv
  100. | FMA
  101. | EltEqual
  102. | EltNotEqual
  103. | EltLess
  104. | EltGreater
  105. | EltLessEqual
  106. | EltGreaterEqual
  107. | EltEqualScalar
  108. | EltNotEqualScalar
  109. | EltLessScalar
  110. | EltGreaterScalar
  111. | EltLessEqualScalar
  112. | EltGreaterEqualScalar
  113. | Conv1d of Owl_types_common.padding * int array
  114. | Conv2d of Owl_types_common.padding * int array
  115. | Conv3d of Owl_types_common.padding * int array
  116. | TransposeConv1d of Owl_types_common.padding * int array
  117. | TransposeConv2d of Owl_types_common.padding * int array
  118. | TransposeConv3d of Owl_types_common.padding * int array
  119. | DilatedConv1d of Owl_types_common.padding * int array * int array
  120. | DilatedConv2d of Owl_types_common.padding * int array * int array
  121. | DilatedConv3d of Owl_types_common.padding * int array * int array
  122. | MaxPool1d of Owl_types_common.padding * int array * int array
  123. | MaxPool2d of Owl_types_common.padding * int array * int array
  124. | MaxPool3d of Owl_types_common.padding * int array * int array
  125. | AvgPool1d of Owl_types_common.padding * int array * int array
  126. | AvgPool2d of Owl_types_common.padding * int array * int array
  127. | AvgPool3d of Owl_types_common.padding * int array * int array
  128. | UpSampling2d of int array
  129. | Conv1dBackwardInput of int array
  130. | Conv1dBackwardKernel of int array
  131. | Conv2dBackwardInput of int array
  132. | Conv2dBackwardKernel of int array
  133. | Conv3dBackwardInput of int array
  134. | Conv3dBackwardKernel of int array
  135. | TransposeConv1dBackwardInput of int array
  136. | TransposeConv1dBackwardKernel of int array
  137. | TransposeConv2dBackwardInput of int array
  138. | TransposeConv2dBackwardKernel of int array
  139. | TransposeConv3dBackwardInput of int array
  140. | TransposeConv3dBackwardKernel of int array
  141. | DilatedConv1dBackwardInput of int array * int array
  142. | DilatedConv1dBackwardKernel of int array * int array
  143. | DilatedConv2dBackwardInput of int array * int array
  144. | DilatedConv2dBackwardKernel of int array * int array
  145. | DilatedConv3dBackwardInput of int array * int array
  146. | DilatedConv3dBackwardKernel of int array * int array
  147. | MaxPool1dBackward of Owl_types_common.padding * int array * int array
  148. | MaxPool2dBackward of Owl_types_common.padding * int array * int array
  149. | MaxPool3dBackward of Owl_types_common.padding * int array * int array
  150. | AvgPool1dBackward of Owl_types_common.padding * int array * int array
  151. | AvgPool2dBackward of Owl_types_common.padding * int array * int array
  152. | AvgPool3dBackward of Owl_types_common.padding * int array * int array
  153. | UpSampling2dBackward of int array
  154. | RowNum
  155. | ColNum
  156. | Row
  157. | Rows of int array
  158. | CopyRowTo
  159. | CopyColTo
  160. | Dot of bool * bool * elt * elt
  161. | Inv
  162. | Trace
  163. | Transpose of int array
  164. | ToRows
  165. | OfRows
  166. | Scalar_Add
  167. | Scalar_Sub
  168. | Scalar_Mul
  169. | Scalar_Div
  170. | Scalar_Pow
  171. | Scalar_Atan2
  172. | Scalar_Abs
  173. | Scalar_Neg
  174. | Scalar_Sqr
  175. | Scalar_Sqrt
  176. | Scalar_Exp
  177. | Scalar_Log
  178. | Scalar_Log2
  179. | Scalar_Log10
  180. | Scalar_Signum
  181. | Scalar_Floor
  182. | Scalar_Ceil
  183. | Scalar_Round
  184. | Scalar_Sin
  185. | Scalar_Cos
  186. | Scalar_Tan
  187. | Scalar_Sinh
  188. | Scalar_Cosh
  189. | Scalar_Tanh
  190. | Scalar_Asin
  191. | Scalar_Acos
  192. | Scalar_Atan
  193. | Scalar_Asinh
  194. | Scalar_Acosh
  195. | Scalar_Atanh
  196. | Scalar_Relu
  197. | Scalar_Dawsn
  198. | Scalar_Sigmoid
  199. | Fused_Adagrad of float * float
    (*

    TODO

    *)
\ No newline at end of file diff --git a/owl-base/Owl_types_computation_engine/module-type-Sig/Graph/Optimiser/Operator/Symbol/Shape/index.html b/owl-base/Owl_types_computation_engine/module-type-Sig/Graph/Optimiser/Operator/Symbol/Shape/index.html deleted file mode 100644 index d00339b98..000000000 --- a/owl-base/Owl_types_computation_engine/module-type-Sig/Graph/Optimiser/Operator/Symbol/Shape/index.html +++ /dev/null @@ -1,5 +0,0 @@ - -Shape (owl-base.Owl_types_computation_engine.Sig.Graph.Optimiser.Operator.Symbol.Shape)

Module Symbol.Shape

Core functions
val infer_shape : - Type.op -> - Type.attr Owl_graph.node array -> - int array option array

TODO

\ No newline at end of file diff --git a/owl-base/Owl_types_computation_engine/module-type-Sig/Graph/Optimiser/Operator/Symbol/index.html b/owl-base/Owl_types_computation_engine/module-type-Sig/Graph/Optimiser/Operator/Symbol/index.html deleted file mode 100644 index 9609270bf..000000000 --- a/owl-base/Owl_types_computation_engine/module-type-Sig/Graph/Optimiser/Operator/Symbol/index.html +++ /dev/null @@ -1,28 +0,0 @@ - -Symbol (owl-base.Owl_types_computation_engine.Sig.Graph.Optimiser.Operator.Symbol)

Module Operator.Symbol

Core functions
val op_to_str : Shape.Type.op -> string

TODO

val is_random_variable : Shape.Type.op -> bool

TODO

val refnum : 'a Owl_graph.node -> int

TODO

val node_shape : Shape.Type.attr Owl_graph.node -> int array

TODO

val node_numel : Shape.Type.attr Owl_graph.node -> int

TODO

val is_shape_unknown : Shape.Type.attr Owl_graph.node -> bool

TODO

val infer_shape_graph : Shape.Type.attr Owl_graph.node array -> unit

TODO

val shape_to_str : int array option array -> string

TODO

val node_to_str : Shape.Type.attr Owl_graph.node -> string

TODO

val node_to_arr : Shape.Type.t -> Shape.Type.arr

TODO

val arr_to_node : Shape.Type.arr -> Shape.Type.t

TODO

val node_to_elt : Shape.Type.t -> Shape.Type.elt

TODO

val elt_to_node : Shape.Type.elt -> Shape.Type.t

TODO

val make_node : - ?name:string -> - ?value:Shape.Type.Device.value array -> - ?shape:int array option array -> - ?freeze:bool -> - ?reuse:bool -> - ?state:Shape.Type.state -> - Shape.Type.op -> - Shape.Type.attr Owl_graph.node

TODO

val make_then_connect : - ?shape:int array option array -> - Shape.Type.op -> - Shape.Type.attr Owl_graph.node array -> - Shape.Type.attr Owl_graph.node

TODO

val var_arr : ?shape:int array -> string -> Shape.Type.arr

TODO

val var_elt : string -> Shape.Type.elt

TODO

val const_arr : string -> Shape.Type.Device.A.arr -> Shape.Type.arr

TODO

val const_elt : string -> Shape.Type.Device.A.elt -> Shape.Type.elt

TODO

val new_block_id : unit -> int

new_block_id () returns an unused block id.

val make_empty_block : ?block_id:int -> int -> Shape.Type.block

make_empty_block s returns an empty block of memory of size s.

val make_value_block : - Shape.Type.Device.value -> - Shape.Type.attr Owl_graph.node -> - unit

make_value_block value node creates a block of memory initialised with value and links the new block to node.

get_block node returns the memory block allocated to node. If no block is allocated, throws an exception.

val add_node_to_block : - Shape.Type.attr Owl_graph.node -> - Shape.Type.block -> - unit

Link a node to a reusable block and initialises its memory on the memory of the block.

val get_active_node : Shape.Type.block -> Shape.Type.attr Owl_graph.node option

Return the node that is currently using the memory of the block.

val set_active_node : - Shape.Type.block -> - Shape.Type.attr Owl_graph.node -> - unit

Update the node that is currently using the block of memory.

val get_block_id : Shape.Type.attr Owl_graph.node -> int

get_block_id node returns the id of the block assigned to node. If node has not been assigned yet, returns -1.

val set_value : - Shape.Type.attr Owl_graph.node -> - Shape.Type.Device.value array -> - unit

TODO

val set_operator : Shape.Type.attr Owl_graph.node -> Shape.Type.op -> unit

TODO

TODO

val set_reuse : Shape.Type.attr Owl_graph.node -> bool -> unit

TODO

val get_reuse : Shape.Type.attr Owl_graph.node -> bool

TODO

val is_shared : Shape.Type.attr Owl_graph.node -> bool

TODO

val get_shared_nodes : - Shape.Type.attr Owl_graph.node -> - Shape.Type.attr Owl_graph.node array

get_shared_nodes node returns the nodes sharing the same block of memory as node.

val is_var : Shape.Type.attr Owl_graph.node -> bool

TODO

val is_const : Shape.Type.attr Owl_graph.node -> bool

TODO

val is_node_arr : Shape.Type.attr Owl_graph.node -> bool

TODO

val is_node_elt : Shape.Type.attr Owl_graph.node -> bool

TODO

val is_assigned : Shape.Type.attr Owl_graph.node -> bool

is_assigned node checks if a block of memory has been assigned to node.

val check_assigned : Shape.Type.attr Owl_graph.node -> unit

check_assigned node throws an exception if node has not been assigned to a block.

val is_valid : Shape.Type.attr Owl_graph.node -> bool

TODO

val validate : Shape.Type.attr Owl_graph.node -> unit

TODO

val invalidate : Shape.Type.attr Owl_graph.node -> unit

TODO

val invalidate_graph : Shape.Type.attr Owl_graph.node -> unit

TODO

val is_freeze : Shape.Type.attr Owl_graph.node -> bool

TODO

val freeze : Shape.Type.attr Owl_graph.node -> unit

TODO

val freeze_descendants : Shape.Type.attr Owl_graph.node array -> unit

TODO

val freeze_ancestors : Shape.Type.attr Owl_graph.node array -> unit

TODO

TODO

TODO

val unsafe_assign_arr : Shape.Type.arr -> Shape.Type.Device.A.arr -> unit

TODO

val assign_arr : Shape.Type.arr -> Shape.Type.Device.A.arr -> unit

TODO

val assign_elt : Shape.Type.elt -> Shape.Type.Device.A.elt -> unit

TODO

val float_to_elt : float -> Shape.Type.elt

TODO

val elt_to_float : Shape.Type.elt -> float

TODO

\ No newline at end of file diff --git a/owl-base/Owl_types_computation_engine/module-type-Sig/Graph/Optimiser/Operator/index.html b/owl-base/Owl_types_computation_engine/module-type-Sig/Graph/Optimiser/Operator/index.html deleted file mode 100644 index 1044c2a1d..000000000 --- a/owl-base/Owl_types_computation_engine/module-type-Sig/Graph/Optimiser/Operator/index.html +++ /dev/null @@ -1,420 +0,0 @@ - -Operator (owl-base.Owl_types_computation_engine.Sig.Graph.Optimiser.Operator)

Module Optimiser.Operator

Vectorised functions
val empty : int array -> Symbol.Shape.Type.arr

TODO

val zeros : int array -> Symbol.Shape.Type.arr

TODO

val ones : int array -> Symbol.Shape.Type.arr

TODO

val create : int array -> Symbol.Shape.Type.elt -> Symbol.Shape.Type.arr

TODO

val sequential : - ?a:Symbol.Shape.Type.elt -> - ?step:Symbol.Shape.Type.elt -> - int array -> - Symbol.Shape.Type.arr

TODO

val uniform : - ?a:Symbol.Shape.Type.elt -> - ?b:Symbol.Shape.Type.elt -> - int array -> - Symbol.Shape.Type.arr

TODO

val gaussian : - ?mu:Symbol.Shape.Type.elt -> - ?sigma:Symbol.Shape.Type.elt -> - int array -> - Symbol.Shape.Type.arr

TODO

val bernoulli : ?p:Symbol.Shape.Type.elt -> int array -> Symbol.Shape.Type.arr

TODO

val init : int array -> (int -> Symbol.Shape.Type.elt) -> Symbol.Shape.Type.arr

TODO

val init_nd : - int array -> - (int array -> Symbol.Shape.Type.elt) -> - Symbol.Shape.Type.arr

TODO

val shape : Symbol.Shape.Type.arr -> int array

TODO

val numel : Symbol.Shape.Type.arr -> int

TODO

TODO

val set : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.elt -> unit

TODO

val get_slice : int list list -> Symbol.Shape.Type.arr -> Symbol.Shape.Type.arr

TODO

val set_slice : - int list list -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - unit

TODO

TODO

val set_fancy : - Owl_types.index list -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - unit

TODO

val copy_ : out:'a -> 'b -> 'c

TODO

val reset : Symbol.Shape.Type.arr -> unit

TODO

val reshape : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.arr

TODO

val tile : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.arr

TODO

val repeat : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.arr

TODO

val pad : - ?v:Symbol.Shape.Type.elt -> - int list list -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val expand : ?hi:bool -> Symbol.Shape.Type.arr -> int -> Symbol.Shape.Type.arr

TODO

val squeeze : ?axis:int array -> Symbol.Shape.Type.arr -> Symbol.Shape.Type.arr

TODO

val concatenate : - ?axis:int -> - Symbol.Shape.Type.arr array -> - Symbol.Shape.Type.arr

TODO

val stack : ?axis:int -> Symbol.Shape.Type.arr array -> Symbol.Shape.Type.arr

TODO

TODO

val split : ?axis:int -> 'a -> 'b -> 'c

TODO

val draw : - ?axis:int -> - Symbol.Shape.Type.arr -> - int -> - Symbol.Shape.Type.arr * 'a array

TODO

TODO

delay f x returns f x. It allows to use a function that is not tracked by the computation graph and delay its evaluation. The output must have the same shape as the input.

delay_array out_shape f x works in the same way as delay but is applied on an array of ndarrays. Needs the shape of the output as an argument.

val lazy_print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(Symbol.Shape.Type.Device.A.elt -> string) -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

lazy_print x prints the output of x when it is evaluated. Is implemented as an identity node. For information about the optional parameters, refer to the print function of the Ndarray module.

val print : ?max_row:'a -> ?max_col:'b -> ?header:'c -> ?fmt:'d -> 'e -> unit

TODO

val min : - ?axis:int -> - ?keep_dims:bool -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val max : - ?axis:int -> - ?keep_dims:bool -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val sum : - ?axis:int -> - ?keep_dims:bool -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val sum_reduce : - ?axis:int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val log_sum_exp : - ?axis:int -> - ?keep_dims:bool -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

TODO

TODO

TODO

TODO

val elt_less_equal_scalar : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.elt -> - Symbol.Shape.Type.arr

TODO

val elt_greater_equal_scalar : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.elt -> - Symbol.Shape.Type.arr

TODO

val conv1d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr

TODO

val conv2d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr

TODO

val conv3d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr

TODO

val transpose_conv1d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr

TODO

val transpose_conv2d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr

TODO

val transpose_conv3d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr

TODO

val dilated_conv1d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val dilated_conv2d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val dilated_conv3d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val max_pool1d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val max_pool2d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val max_pool3d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val avg_pool1d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val avg_pool2d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val avg_pool3d : - ?padding:Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr

TODO

val upsampling2d : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.arr

TODO

val conv1d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val conv1d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val conv2d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val conv2d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val conv3d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val conv3d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val transpose_conv1d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val transpose_conv1d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val transpose_conv2d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val transpose_conv2d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val transpose_conv3d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val transpose_conv3d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val dilated_conv1d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val dilated_conv1d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val dilated_conv2d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val dilated_conv2d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val dilated_conv3d_backward_input : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val dilated_conv3d_backward_kernel : - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val max_pool1d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val max_pool2d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val max_pool3d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val avg_pool1d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val avg_pool2d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val avg_pool3d_backward : - Owl_types.padding -> - Symbol.Shape.Type.arr -> - int array -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val upsampling2d_backward : - Symbol.Shape.Type.arr -> - int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val row_num : Symbol.Shape.Type.arr -> int

TODO

val col_num : Symbol.Shape.Type.arr -> int

TODO

val rows : Symbol.Shape.Type.arr -> int array -> Symbol.Shape.Type.arr

TODO

val copy_row_to : Symbol.Shape.Type.arr -> 'a -> 'b -> unit

TODO

val copy_col_to : Symbol.Shape.Type.arr -> 'a -> 'b -> unit

TODO

TODO

val transpose : - ?axis:int array -> - Symbol.Shape.Type.arr -> - Symbol.Shape.Type.arr

TODO

val to_rows : Symbol.Shape.Type.arr -> 'a array

TODO

TODO

val to_cols : Symbol.Shape.Type.arr -> 'a array

TODO

TODO

val of_array : - Symbol.Shape.Type.elt array -> - int array -> - Symbol.Shape.Type.arr

TODO

val of_arrays : Symbol.Shape.Type.elt array array -> Symbol.Shape.Type.arr

TODO

val to_arrays : Symbol.Shape.Type.arr -> Symbol.Shape.Type.elt array array

TODO

Scalar functions
module Scalar : sig ... end
module Mat : sig ... end
module Linalg : sig ... end
\ No newline at end of file diff --git a/owl-base/Owl_types_computation_engine/module-type-Sig/Graph/Optimiser/index.html b/owl-base/Owl_types_computation_engine/module-type-Sig/Graph/Optimiser/index.html deleted file mode 100644 index 6b427ca07..000000000 --- a/owl-base/Owl_types_computation_engine/module-type-Sig/Graph/Optimiser/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Optimiser (owl-base.Owl_types_computation_engine.Sig.Graph.Optimiser)

Module Graph.Optimiser

Core functions
val estimate_complexity : 'a Owl_graph.node array -> int * int

TODO

val optimise_nodes : - Operator.Symbol.Shape.Type.attr Owl_graph.node array -> - unit

TODO

\ No newline at end of file diff --git a/owl-base/Owl_types_computation_engine/module-type-Sig/Graph/index.html b/owl-base/Owl_types_computation_engine/module-type-Sig/Graph/index.html deleted file mode 100644 index 6689edad0..000000000 --- a/owl-base/Owl_types_computation_engine/module-type-Sig/Graph/index.html +++ /dev/null @@ -1,33 +0,0 @@ - -Graph (owl-base.Owl_types_computation_engine.Sig.Graph)

Module Sig.Graph

Type definition
type graph

TODO

Core functions
val shape_or_value : Optimiser.Operator.Symbol.Shape.Type.t -> string

TODO

val graph_to_dot : graph -> string

TODO

val graph_to_trace : graph -> string

TODO

val save_graph : 'a -> string -> unit

TODO

val load_graph : string -> 'a * 'b

TODO

val invalidate_rvs : graph -> unit

TODO

val is_iopair_safe : 'a Owl_graph.node -> 'a Owl_graph.node -> bool

TODO

val update_iopair : graph -> unit

TODO

val remove_unused_iopair : - 'a Owl_graph.node array -> - 'b array -> - 'a Owl_graph.node array * 'b array

TODO

val optimise : graph -> unit

TODO

\ No newline at end of file diff --git a/owl-base/Owl_types_computation_engine/module-type-Sig/index.html b/owl-base/Owl_types_computation_engine/module-type-Sig/index.html deleted file mode 100644 index 19093e65d..000000000 --- a/owl-base/Owl_types_computation_engine/module-type-Sig/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Sig (owl-base.Owl_types_computation_engine.Sig)

Module type Owl_types_computation_engine.Sig

Core evaluation functions of the engine

TODO

TODO

val eval_graph : Graph.graph -> unit

TODO

\ No newline at end of file diff --git a/owl-base/Owl_types_maths_basic/.dummy b/owl-base/Owl_types_maths_basic/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl-base/Owl_types_maths_basic/module-type-Sig/index.html b/owl-base/Owl_types_maths_basic/module-type-Sig/index.html deleted file mode 100644 index 90ddd5cff..000000000 --- a/owl-base/Owl_types_maths_basic/module-type-Sig/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Sig (owl-base.Owl_types_maths_basic.Sig)

Module type Owl_types_maths_basic.Sig

type elt
val add : elt -> elt -> elt
\ No newline at end of file diff --git a/owl-base/Owl_types_ndarray_algodiff/.dummy b/owl-base/Owl_types_ndarray_algodiff/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl-base/Owl_types_ndarray_algodiff/module-type-Sig/Linalg/index.html b/owl-base/Owl_types_ndarray_algodiff/module-type-Sig/Linalg/index.html deleted file mode 100644 index 06e921cd2..000000000 --- a/owl-base/Owl_types_ndarray_algodiff/module-type-Sig/Linalg/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -Linalg (owl-base.Owl_types_ndarray_algodiff.Sig.Linalg)

Module Sig.Linalg

val inv : arr -> arr
val logdet : arr -> elt
val chol : ?upper:bool -> arr -> arr
val svd : ?thin:bool -> arr -> arr * arr * arr
val qr : arr -> arr * arr
val lq : arr -> arr * arr
val sylvester : arr -> arr -> arr -> arr
val lyapunov : arr -> arr -> arr
val discrete_lyapunov : - ?solver:[ `default | `bilinear | `direct ] -> - arr -> - arr -> - arr
val linsolve : ?trans:bool -> ?typ:[ `n | `u | `l ] -> arr -> arr -> arr
val care : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
val dare : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
\ No newline at end of file diff --git a/owl-base/Owl_types_ndarray_algodiff/module-type-Sig/Mat/index.html b/owl-base/Owl_types_ndarray_algodiff/module-type-Sig/Mat/index.html deleted file mode 100644 index 70e9acdfd..000000000 --- a/owl-base/Owl_types_ndarray_algodiff/module-type-Sig/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl-base.Owl_types_ndarray_algodiff.Sig.Mat)

Module Sig.Mat

val diagm : ?k:int -> arr -> arr
val triu : ?k:int -> arr -> arr
val tril : ?k:int -> arr -> arr
val eye : int -> arr
\ No newline at end of file diff --git a/owl-base/Owl_types_ndarray_algodiff/module-type-Sig/Scalar/index.html b/owl-base/Owl_types_ndarray_algodiff/module-type-Sig/Scalar/index.html deleted file mode 100644 index cb2331eed..000000000 --- a/owl-base/Owl_types_ndarray_algodiff/module-type-Sig/Scalar/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Scalar (owl-base.Owl_types_ndarray_algodiff.Sig.Scalar)

Module Sig.Scalar

val add : elt -> elt -> elt
val sub : elt -> elt -> elt
val mul : elt -> elt -> elt
val div : elt -> elt -> elt
val pow : elt -> elt -> elt
val atan2 : elt -> elt -> elt
val abs : elt -> elt
val neg : elt -> elt
val sqr : elt -> elt
val sqrt : elt -> elt
val exp : elt -> elt
val log : elt -> elt
val log2 : elt -> elt
val log10 : elt -> elt
val signum : elt -> elt
val floor : elt -> elt
val ceil : elt -> elt
val round : elt -> elt
val sin : elt -> elt
val cos : elt -> elt
val tan : elt -> elt
val sinh : elt -> elt
val cosh : elt -> elt
val tanh : elt -> elt
val asin : elt -> elt
val acos : elt -> elt
val atan : elt -> elt
val asinh : elt -> elt
val acosh : elt -> elt
val atanh : elt -> elt
val relu : elt -> elt
val dawsn : elt -> elt
val sigmoid : elt -> elt
\ No newline at end of file diff --git a/owl-base/Owl_types_ndarray_algodiff/module-type-Sig/index.html b/owl-base/Owl_types_ndarray_algodiff/module-type-Sig/index.html deleted file mode 100644 index 9ec8470b9..000000000 --- a/owl-base/Owl_types_ndarray_algodiff/module-type-Sig/index.html +++ /dev/null @@ -1,158 +0,0 @@ - -Sig (owl-base.Owl_types_ndarray_algodiff.Sig)

Module type Owl_types_ndarray_algodiff.Sig

include Owl_types_ndarray_eltcmp.Sig
include Owl_types_ndarray_basic.Sig
type arr
type elt
val empty : int array -> arr
val zeros : int array -> arr
val ones : int array -> arr
val create : int array -> elt -> arr
val sequential : ?a:elt -> ?step:elt -> int array -> arr
val uniform : ?a:elt -> ?b:elt -> int array -> arr
val gaussian : ?mu:elt -> ?sigma:elt -> int array -> arr
val bernoulli : ?p:elt -> int array -> arr
val init : int array -> (int -> elt) -> arr
val init_nd : int array -> (int array -> elt) -> arr
val shape : arr -> int array
val numel : arr -> int
val get : arr -> int array -> elt
val set : arr -> int array -> elt -> unit
val get_slice : int list list -> arr -> arr
val set_slice : int list list -> arr -> arr -> unit
val get_fancy : Owl_types_common.index list -> arr -> arr
val set_fancy : Owl_types_common.index list -> arr -> arr -> unit
val copy : arr -> arr
val copy_ : out:arr -> arr -> unit
val reset : arr -> unit
val reshape : arr -> int array -> arr
val reverse : arr -> arr
val tile : arr -> int array -> arr
val repeat : arr -> int array -> arr
val concatenate : ?axis:int -> arr array -> arr
val stack : ?axis:int -> arr array -> arr
val split : ?axis:int -> int array -> arr -> arr array
val expand : ?hi:bool -> arr -> int -> arr
val squeeze : ?axis:int array -> arr -> arr
val draw : ?axis:int -> arr -> int -> arr * int array
val map : (elt -> elt) -> arr -> arr
val fold : ?axis:int -> (elt -> elt -> elt) -> elt -> arr -> arr
val scan : ?axis:int -> (elt -> elt -> elt) -> arr -> arr
val one_hot : int -> arr -> arr
val pad : ?v:elt -> int list list -> arr -> arr
val print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(elt -> string) -> - arr -> - unit
val abs : arr -> arr
val neg : arr -> arr
val floor : arr -> arr
val ceil : arr -> arr
val round : arr -> arr
val sqr : arr -> arr
val sqrt : arr -> arr
val log : arr -> arr
val log2 : arr -> arr
val log10 : arr -> arr
val exp : arr -> arr
val sin : arr -> arr
val cos : arr -> arr
val tan : arr -> arr
val sinh : arr -> arr
val cosh : arr -> arr
val tanh : arr -> arr
val asin : arr -> arr
val acos : arr -> arr
val atan : arr -> arr
val asinh : arr -> arr
val acosh : arr -> arr
val atanh : arr -> arr
val min : ?axis:int -> ?keep_dims:bool -> arr -> arr
val max : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum_reduce : ?axis:int array -> arr -> arr
val signum : arr -> arr
val sigmoid : arr -> arr
val relu : arr -> arr
val dawsn : arr -> arr
val min' : arr -> elt
val max' : arr -> elt
val sum' : arr -> elt
val log_sum_exp' : arr -> elt
val log_sum_exp : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm' : arr -> elt
val l2norm' : arr -> elt
val l2norm_sqr' : arr -> elt
val clip_by_value : ?amin:elt -> ?amax:elt -> arr -> arr
val clip_by_l2norm : elt -> arr -> arr
val pow : arr -> arr -> arr
val scalar_pow : elt -> arr -> arr
val pow_scalar : arr -> elt -> arr
val atan2 : arr -> arr -> arr
val scalar_atan2 : elt -> arr -> arr
val atan2_scalar : arr -> elt -> arr
val add : arr -> arr -> arr
val sub : arr -> arr -> arr
val mul : arr -> arr -> arr
val div : arr -> arr -> arr
val add_scalar : arr -> elt -> arr
val sub_scalar : arr -> elt -> arr
val mul_scalar : arr -> elt -> arr
val div_scalar : arr -> elt -> arr
val scalar_add : elt -> arr -> arr
val scalar_sub : elt -> arr -> arr
val scalar_mul : elt -> arr -> arr
val scalar_div : elt -> arr -> arr
val fma : arr -> arr -> arr -> arr
val conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val dilated_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val transpose_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val max_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val upsampling2d : arr -> int array -> arr
val conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val dilated_conv1d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv1d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val transpose_conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val max_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val upsampling2d_backward : arr -> int array -> arr -> arr
val row_num : arr -> int
val col_num : arr -> int
val row : arr -> int -> arr
val rows : arr -> int array -> arr
val copy_row_to : arr -> arr -> int -> unit
val copy_col_to : arr -> arr -> int -> unit
val dot : arr -> arr -> arr
val diag : ?k:int -> arr -> arr
val trace : arr -> elt
val transpose : ?axis:int array -> arr -> arr
val to_rows : arr -> arr array
val of_rows : arr array -> arr
val to_cols : arr -> arr array
val of_cols : arr array -> arr
val of_array : elt array -> int array -> arr
val of_arrays : elt array array -> arr
val float_to_elt : float -> elt
val elt_to_float : elt -> float
val elt_equal : arr -> arr -> arr
val elt_not_equal : arr -> arr -> arr
val elt_less : arr -> arr -> arr
val elt_greater : arr -> arr -> arr
val elt_less_equal : arr -> arr -> arr
val elt_greater_equal : arr -> arr -> arr
val elt_equal_scalar : arr -> elt -> arr
val elt_not_equal_scalar : arr -> elt -> arr
val elt_less_scalar : arr -> elt -> arr
val elt_greater_scalar : arr -> elt -> arr
val elt_less_equal_scalar : arr -> elt -> arr
val elt_greater_equal_scalar : arr -> elt -> arr
module Scalar : sig ... end
module Mat : sig ... end
module Linalg : sig ... end
\ No newline at end of file diff --git a/owl-base/Owl_types_ndarray_basic/.dummy b/owl-base/Owl_types_ndarray_basic/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl-base/Owl_types_ndarray_basic/module-type-Sig/index.html b/owl-base/Owl_types_ndarray_basic/module-type-Sig/index.html deleted file mode 100644 index cf83db0d2..000000000 --- a/owl-base/Owl_types_ndarray_basic/module-type-Sig/index.html +++ /dev/null @@ -1,158 +0,0 @@ - -Sig (owl-base.Owl_types_ndarray_basic.Sig)

Module type Owl_types_ndarray_basic.Sig

type arr
type elt
val empty : int array -> arr
val zeros : int array -> arr
val ones : int array -> arr
val create : int array -> elt -> arr
val sequential : ?a:elt -> ?step:elt -> int array -> arr
val uniform : ?a:elt -> ?b:elt -> int array -> arr
val gaussian : ?mu:elt -> ?sigma:elt -> int array -> arr
val bernoulli : ?p:elt -> int array -> arr
val init : int array -> (int -> elt) -> arr
val init_nd : int array -> (int array -> elt) -> arr
val shape : arr -> int array
val numel : arr -> int
val get : arr -> int array -> elt
val set : arr -> int array -> elt -> unit
val get_slice : int list list -> arr -> arr
val set_slice : int list list -> arr -> arr -> unit
val get_fancy : Owl_types_common.index list -> arr -> arr
val set_fancy : Owl_types_common.index list -> arr -> arr -> unit
val copy : arr -> arr
val copy_ : out:arr -> arr -> unit
val reset : arr -> unit
val reshape : arr -> int array -> arr
val reverse : arr -> arr
val tile : arr -> int array -> arr
val repeat : arr -> int array -> arr
val concatenate : ?axis:int -> arr array -> arr
val stack : ?axis:int -> arr array -> arr
val split : ?axis:int -> int array -> arr -> arr array
val expand : ?hi:bool -> arr -> int -> arr
val squeeze : ?axis:int array -> arr -> arr
val draw : ?axis:int -> arr -> int -> arr * int array
val map : (elt -> elt) -> arr -> arr
val fold : ?axis:int -> (elt -> elt -> elt) -> elt -> arr -> arr
val scan : ?axis:int -> (elt -> elt -> elt) -> arr -> arr
val one_hot : int -> arr -> arr
val pad : ?v:elt -> int list list -> arr -> arr
val print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(elt -> string) -> - arr -> - unit
val abs : arr -> arr
val neg : arr -> arr
val floor : arr -> arr
val ceil : arr -> arr
val round : arr -> arr
val sqr : arr -> arr
val sqrt : arr -> arr
val log : arr -> arr
val log2 : arr -> arr
val log10 : arr -> arr
val exp : arr -> arr
val sin : arr -> arr
val cos : arr -> arr
val tan : arr -> arr
val sinh : arr -> arr
val cosh : arr -> arr
val tanh : arr -> arr
val asin : arr -> arr
val acos : arr -> arr
val atan : arr -> arr
val asinh : arr -> arr
val acosh : arr -> arr
val atanh : arr -> arr
val min : ?axis:int -> ?keep_dims:bool -> arr -> arr
val max : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum_reduce : ?axis:int array -> arr -> arr
val signum : arr -> arr
val sigmoid : arr -> arr
val relu : arr -> arr
val dawsn : arr -> arr
val min' : arr -> elt
val max' : arr -> elt
val sum' : arr -> elt
val log_sum_exp' : arr -> elt
val log_sum_exp : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm' : arr -> elt
val l2norm' : arr -> elt
val l2norm_sqr' : arr -> elt
val clip_by_value : ?amin:elt -> ?amax:elt -> arr -> arr
val clip_by_l2norm : elt -> arr -> arr
val pow : arr -> arr -> arr
val scalar_pow : elt -> arr -> arr
val pow_scalar : arr -> elt -> arr
val atan2 : arr -> arr -> arr
val scalar_atan2 : elt -> arr -> arr
val atan2_scalar : arr -> elt -> arr
val add : arr -> arr -> arr
val sub : arr -> arr -> arr
val mul : arr -> arr -> arr
val div : arr -> arr -> arr
val add_scalar : arr -> elt -> arr
val sub_scalar : arr -> elt -> arr
val mul_scalar : arr -> elt -> arr
val div_scalar : arr -> elt -> arr
val scalar_add : elt -> arr -> arr
val scalar_sub : elt -> arr -> arr
val scalar_mul : elt -> arr -> arr
val scalar_div : elt -> arr -> arr
val fma : arr -> arr -> arr -> arr
val conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val dilated_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val transpose_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val max_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val upsampling2d : arr -> int array -> arr
val conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val dilated_conv1d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv1d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val transpose_conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val max_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val upsampling2d_backward : arr -> int array -> arr -> arr
val row_num : arr -> int
val col_num : arr -> int
val row : arr -> int -> arr
val rows : arr -> int array -> arr
val copy_row_to : arr -> arr -> int -> unit
val copy_col_to : arr -> arr -> int -> unit
val dot : arr -> arr -> arr
val diag : ?k:int -> arr -> arr
val trace : arr -> elt
val transpose : ?axis:int array -> arr -> arr
val to_rows : arr -> arr array
val of_rows : arr array -> arr
val to_cols : arr -> arr array
val of_cols : arr array -> arr
val of_array : elt array -> int array -> arr
val of_arrays : elt array array -> arr
val float_to_elt : float -> elt
val elt_to_float : elt -> float
\ No newline at end of file diff --git a/owl-base/Owl_types_ndarray_compare/.dummy b/owl-base/Owl_types_ndarray_compare/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl-base/Owl_types_ndarray_compare/module-type-Sig/index.html b/owl-base/Owl_types_ndarray_compare/module-type-Sig/index.html deleted file mode 100644 index 3a71d3862..000000000 --- a/owl-base/Owl_types_ndarray_compare/module-type-Sig/index.html +++ /dev/null @@ -1,158 +0,0 @@ - -Sig (owl-base.Owl_types_ndarray_compare.Sig)

Module type Owl_types_ndarray_compare.Sig

include Owl_types_ndarray_eltcmp.Sig
include Owl_types_ndarray_basic.Sig
type arr
type elt
val empty : int array -> arr
val zeros : int array -> arr
val ones : int array -> arr
val create : int array -> elt -> arr
val sequential : ?a:elt -> ?step:elt -> int array -> arr
val uniform : ?a:elt -> ?b:elt -> int array -> arr
val gaussian : ?mu:elt -> ?sigma:elt -> int array -> arr
val bernoulli : ?p:elt -> int array -> arr
val init : int array -> (int -> elt) -> arr
val init_nd : int array -> (int array -> elt) -> arr
val shape : arr -> int array
val numel : arr -> int
val get : arr -> int array -> elt
val set : arr -> int array -> elt -> unit
val get_slice : int list list -> arr -> arr
val set_slice : int list list -> arr -> arr -> unit
val get_fancy : Owl_types_common.index list -> arr -> arr
val set_fancy : Owl_types_common.index list -> arr -> arr -> unit
val copy : arr -> arr
val copy_ : out:arr -> arr -> unit
val reset : arr -> unit
val reshape : arr -> int array -> arr
val reverse : arr -> arr
val tile : arr -> int array -> arr
val repeat : arr -> int array -> arr
val concatenate : ?axis:int -> arr array -> arr
val stack : ?axis:int -> arr array -> arr
val split : ?axis:int -> int array -> arr -> arr array
val expand : ?hi:bool -> arr -> int -> arr
val squeeze : ?axis:int array -> arr -> arr
val draw : ?axis:int -> arr -> int -> arr * int array
val map : (elt -> elt) -> arr -> arr
val fold : ?axis:int -> (elt -> elt -> elt) -> elt -> arr -> arr
val scan : ?axis:int -> (elt -> elt -> elt) -> arr -> arr
val one_hot : int -> arr -> arr
val pad : ?v:elt -> int list list -> arr -> arr
val print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(elt -> string) -> - arr -> - unit
val abs : arr -> arr
val neg : arr -> arr
val floor : arr -> arr
val ceil : arr -> arr
val round : arr -> arr
val sqr : arr -> arr
val sqrt : arr -> arr
val log : arr -> arr
val log2 : arr -> arr
val log10 : arr -> arr
val exp : arr -> arr
val sin : arr -> arr
val cos : arr -> arr
val tan : arr -> arr
val sinh : arr -> arr
val cosh : arr -> arr
val tanh : arr -> arr
val asin : arr -> arr
val acos : arr -> arr
val atan : arr -> arr
val asinh : arr -> arr
val acosh : arr -> arr
val atanh : arr -> arr
val min : ?axis:int -> ?keep_dims:bool -> arr -> arr
val max : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum_reduce : ?axis:int array -> arr -> arr
val signum : arr -> arr
val sigmoid : arr -> arr
val relu : arr -> arr
val dawsn : arr -> arr
val min' : arr -> elt
val max' : arr -> elt
val sum' : arr -> elt
val log_sum_exp' : arr -> elt
val log_sum_exp : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm' : arr -> elt
val l2norm' : arr -> elt
val l2norm_sqr' : arr -> elt
val clip_by_value : ?amin:elt -> ?amax:elt -> arr -> arr
val clip_by_l2norm : elt -> arr -> arr
val pow : arr -> arr -> arr
val scalar_pow : elt -> arr -> arr
val pow_scalar : arr -> elt -> arr
val atan2 : arr -> arr -> arr
val scalar_atan2 : elt -> arr -> arr
val atan2_scalar : arr -> elt -> arr
val add : arr -> arr -> arr
val sub : arr -> arr -> arr
val mul : arr -> arr -> arr
val div : arr -> arr -> arr
val add_scalar : arr -> elt -> arr
val sub_scalar : arr -> elt -> arr
val mul_scalar : arr -> elt -> arr
val div_scalar : arr -> elt -> arr
val scalar_add : elt -> arr -> arr
val scalar_sub : elt -> arr -> arr
val scalar_mul : elt -> arr -> arr
val scalar_div : elt -> arr -> arr
val fma : arr -> arr -> arr -> arr
val conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val dilated_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val transpose_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val max_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val upsampling2d : arr -> int array -> arr
val conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val dilated_conv1d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv1d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val transpose_conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val max_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val upsampling2d_backward : arr -> int array -> arr -> arr
val row_num : arr -> int
val col_num : arr -> int
val row : arr -> int -> arr
val rows : arr -> int array -> arr
val copy_row_to : arr -> arr -> int -> unit
val copy_col_to : arr -> arr -> int -> unit
val dot : arr -> arr -> arr
val diag : ?k:int -> arr -> arr
val trace : arr -> elt
val transpose : ?axis:int array -> arr -> arr
val to_rows : arr -> arr array
val of_rows : arr array -> arr
val to_cols : arr -> arr array
val of_cols : arr array -> arr
val of_array : elt array -> int array -> arr
val of_arrays : elt array array -> arr
val float_to_elt : float -> elt
val elt_to_float : elt -> float
val elt_equal : arr -> arr -> arr
val elt_not_equal : arr -> arr -> arr
val elt_less : arr -> arr -> arr
val elt_greater : arr -> arr -> arr
val elt_less_equal : arr -> arr -> arr
val elt_greater_equal : arr -> arr -> arr
val elt_equal_scalar : arr -> elt -> arr
val elt_not_equal_scalar : arr -> elt -> arr
val elt_less_scalar : arr -> elt -> arr
val elt_greater_scalar : arr -> elt -> arr
val elt_less_equal_scalar : arr -> elt -> arr
val elt_greater_equal_scalar : arr -> elt -> arr
val is_zero : arr -> bool
val is_positive : arr -> bool
val is_negative : arr -> bool
val is_nonpositive : arr -> bool
val is_nonnegative : arr -> bool
val equal : arr -> arr -> bool
val not_equal : arr -> arr -> bool
val less : arr -> arr -> bool
val greater : arr -> arr -> bool
val less_equal : arr -> arr -> bool
val greater_equal : arr -> arr -> bool
val approx_equal : ?eps:float -> arr -> arr -> bool
val approx_equal_scalar : ?eps:float -> arr -> elt -> bool
val approx_elt_equal : ?eps:float -> arr -> arr -> arr
val approx_elt_equal_scalar : ?eps:float -> arr -> elt -> arr
\ No newline at end of file diff --git a/owl-base/Owl_types_ndarray_eltcmp/.dummy b/owl-base/Owl_types_ndarray_eltcmp/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl-base/Owl_types_ndarray_eltcmp/module-type-Sig/index.html b/owl-base/Owl_types_ndarray_eltcmp/module-type-Sig/index.html deleted file mode 100644 index 8f002d2ac..000000000 --- a/owl-base/Owl_types_ndarray_eltcmp/module-type-Sig/index.html +++ /dev/null @@ -1,158 +0,0 @@ - -Sig (owl-base.Owl_types_ndarray_eltcmp.Sig)

Module type Owl_types_ndarray_eltcmp.Sig

include Owl_types_ndarray_basic.Sig
type arr
type elt
val empty : int array -> arr
val zeros : int array -> arr
val ones : int array -> arr
val create : int array -> elt -> arr
val sequential : ?a:elt -> ?step:elt -> int array -> arr
val uniform : ?a:elt -> ?b:elt -> int array -> arr
val gaussian : ?mu:elt -> ?sigma:elt -> int array -> arr
val bernoulli : ?p:elt -> int array -> arr
val init : int array -> (int -> elt) -> arr
val init_nd : int array -> (int array -> elt) -> arr
val shape : arr -> int array
val numel : arr -> int
val get : arr -> int array -> elt
val set : arr -> int array -> elt -> unit
val get_slice : int list list -> arr -> arr
val set_slice : int list list -> arr -> arr -> unit
val get_fancy : Owl_types_common.index list -> arr -> arr
val set_fancy : Owl_types_common.index list -> arr -> arr -> unit
val copy : arr -> arr
val copy_ : out:arr -> arr -> unit
val reset : arr -> unit
val reshape : arr -> int array -> arr
val reverse : arr -> arr
val tile : arr -> int array -> arr
val repeat : arr -> int array -> arr
val concatenate : ?axis:int -> arr array -> arr
val stack : ?axis:int -> arr array -> arr
val split : ?axis:int -> int array -> arr -> arr array
val expand : ?hi:bool -> arr -> int -> arr
val squeeze : ?axis:int array -> arr -> arr
val draw : ?axis:int -> arr -> int -> arr * int array
val map : (elt -> elt) -> arr -> arr
val fold : ?axis:int -> (elt -> elt -> elt) -> elt -> arr -> arr
val scan : ?axis:int -> (elt -> elt -> elt) -> arr -> arr
val one_hot : int -> arr -> arr
val pad : ?v:elt -> int list list -> arr -> arr
val print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(elt -> string) -> - arr -> - unit
val abs : arr -> arr
val neg : arr -> arr
val floor : arr -> arr
val ceil : arr -> arr
val round : arr -> arr
val sqr : arr -> arr
val sqrt : arr -> arr
val log : arr -> arr
val log2 : arr -> arr
val log10 : arr -> arr
val exp : arr -> arr
val sin : arr -> arr
val cos : arr -> arr
val tan : arr -> arr
val sinh : arr -> arr
val cosh : arr -> arr
val tanh : arr -> arr
val asin : arr -> arr
val acos : arr -> arr
val atan : arr -> arr
val asinh : arr -> arr
val acosh : arr -> arr
val atanh : arr -> arr
val min : ?axis:int -> ?keep_dims:bool -> arr -> arr
val max : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum_reduce : ?axis:int array -> arr -> arr
val signum : arr -> arr
val sigmoid : arr -> arr
val relu : arr -> arr
val dawsn : arr -> arr
val min' : arr -> elt
val max' : arr -> elt
val sum' : arr -> elt
val log_sum_exp' : arr -> elt
val log_sum_exp : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm' : arr -> elt
val l2norm' : arr -> elt
val l2norm_sqr' : arr -> elt
val clip_by_value : ?amin:elt -> ?amax:elt -> arr -> arr
val clip_by_l2norm : elt -> arr -> arr
val pow : arr -> arr -> arr
val scalar_pow : elt -> arr -> arr
val pow_scalar : arr -> elt -> arr
val atan2 : arr -> arr -> arr
val scalar_atan2 : elt -> arr -> arr
val atan2_scalar : arr -> elt -> arr
val add : arr -> arr -> arr
val sub : arr -> arr -> arr
val mul : arr -> arr -> arr
val div : arr -> arr -> arr
val add_scalar : arr -> elt -> arr
val sub_scalar : arr -> elt -> arr
val mul_scalar : arr -> elt -> arr
val div_scalar : arr -> elt -> arr
val scalar_add : elt -> arr -> arr
val scalar_sub : elt -> arr -> arr
val scalar_mul : elt -> arr -> arr
val scalar_div : elt -> arr -> arr
val fma : arr -> arr -> arr -> arr
val conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val dilated_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val transpose_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val max_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val upsampling2d : arr -> int array -> arr
val conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val dilated_conv1d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv1d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val transpose_conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val max_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val upsampling2d_backward : arr -> int array -> arr -> arr
val row_num : arr -> int
val col_num : arr -> int
val row : arr -> int -> arr
val rows : arr -> int array -> arr
val copy_row_to : arr -> arr -> int -> unit
val copy_col_to : arr -> arr -> int -> unit
val dot : arr -> arr -> arr
val diag : ?k:int -> arr -> arr
val trace : arr -> elt
val transpose : ?axis:int array -> arr -> arr
val to_rows : arr -> arr array
val of_rows : arr array -> arr
val to_cols : arr -> arr array
val of_cols : arr array -> arr
val of_array : elt array -> int array -> arr
val of_arrays : elt array array -> arr
val float_to_elt : float -> elt
val elt_to_float : elt -> float
val elt_equal : arr -> arr -> arr
val elt_not_equal : arr -> arr -> arr
val elt_less : arr -> arr -> arr
val elt_greater : arr -> arr -> arr
val elt_less_equal : arr -> arr -> arr
val elt_greater_equal : arr -> arr -> arr
val elt_equal_scalar : arr -> elt -> arr
val elt_not_equal_scalar : arr -> elt -> arr
val elt_less_scalar : arr -> elt -> arr
val elt_greater_scalar : arr -> elt -> arr
val elt_less_equal_scalar : arr -> elt -> arr
val elt_greater_equal_scalar : arr -> elt -> arr
\ No newline at end of file diff --git a/owl-base/Owl_types_ndarray_mutable/.dummy b/owl-base/Owl_types_ndarray_mutable/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl-base/Owl_types_ndarray_mutable/module-type-Sig/Linalg/index.html b/owl-base/Owl_types_ndarray_mutable/module-type-Sig/Linalg/index.html deleted file mode 100644 index 208e5c108..000000000 --- a/owl-base/Owl_types_ndarray_mutable/module-type-Sig/Linalg/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -Linalg (owl-base.Owl_types_ndarray_mutable.Sig.Linalg)

Module Sig.Linalg

val inv : arr -> arr
val logdet : arr -> elt
val chol : ?upper:bool -> arr -> arr
val svd : ?thin:bool -> arr -> arr * arr * arr
val qr : arr -> arr * arr
val lq : arr -> arr * arr
val sylvester : arr -> arr -> arr -> arr
val lyapunov : arr -> arr -> arr
val discrete_lyapunov : - ?solver:[ `default | `bilinear | `direct ] -> - arr -> - arr -> - arr
val linsolve : ?trans:bool -> ?typ:[ `n | `u | `l ] -> arr -> arr -> arr
val care : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
val dare : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
\ No newline at end of file diff --git a/owl-base/Owl_types_ndarray_mutable/module-type-Sig/Mat/index.html b/owl-base/Owl_types_ndarray_mutable/module-type-Sig/Mat/index.html deleted file mode 100644 index 5230c38fe..000000000 --- a/owl-base/Owl_types_ndarray_mutable/module-type-Sig/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl-base.Owl_types_ndarray_mutable.Sig.Mat)

Module Sig.Mat

val diagm : ?k:int -> arr -> arr
val triu : ?k:int -> arr -> arr
val tril : ?k:int -> arr -> arr
val eye : int -> arr
\ No newline at end of file diff --git a/owl-base/Owl_types_ndarray_mutable/module-type-Sig/Scalar/index.html b/owl-base/Owl_types_ndarray_mutable/module-type-Sig/Scalar/index.html deleted file mode 100644 index a8093bbbd..000000000 --- a/owl-base/Owl_types_ndarray_mutable/module-type-Sig/Scalar/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Scalar (owl-base.Owl_types_ndarray_mutable.Sig.Scalar)

Module Sig.Scalar

val add : elt -> elt -> elt
val sub : elt -> elt -> elt
val mul : elt -> elt -> elt
val div : elt -> elt -> elt
val pow : elt -> elt -> elt
val atan2 : elt -> elt -> elt
val abs : elt -> elt
val neg : elt -> elt
val sqr : elt -> elt
val sqrt : elt -> elt
val exp : elt -> elt
val log : elt -> elt
val log2 : elt -> elt
val log10 : elt -> elt
val signum : elt -> elt
val floor : elt -> elt
val ceil : elt -> elt
val round : elt -> elt
val sin : elt -> elt
val cos : elt -> elt
val tan : elt -> elt
val sinh : elt -> elt
val cosh : elt -> elt
val tanh : elt -> elt
val asin : elt -> elt
val acos : elt -> elt
val atan : elt -> elt
val asinh : elt -> elt
val acosh : elt -> elt
val atanh : elt -> elt
val relu : elt -> elt
val dawsn : elt -> elt
val sigmoid : elt -> elt
\ No newline at end of file diff --git a/owl-base/Owl_types_ndarray_mutable/module-type-Sig/index.html b/owl-base/Owl_types_ndarray_mutable/module-type-Sig/index.html deleted file mode 100644 index 2b8c5b971..000000000 --- a/owl-base/Owl_types_ndarray_mutable/module-type-Sig/index.html +++ /dev/null @@ -1,379 +0,0 @@ - -Sig (owl-base.Owl_types_ndarray_mutable.Sig)

Module type Owl_types_ndarray_mutable.Sig

include Owl_types_ndarray_algodiff.Sig
include Owl_types_ndarray_eltcmp.Sig
include Owl_types_ndarray_basic.Sig
type arr
type elt
val empty : int array -> arr
val zeros : int array -> arr
val ones : int array -> arr
val create : int array -> elt -> arr
val sequential : ?a:elt -> ?step:elt -> int array -> arr
val uniform : ?a:elt -> ?b:elt -> int array -> arr
val gaussian : ?mu:elt -> ?sigma:elt -> int array -> arr
val bernoulli : ?p:elt -> int array -> arr
val init : int array -> (int -> elt) -> arr
val init_nd : int array -> (int array -> elt) -> arr
val shape : arr -> int array
val numel : arr -> int
val get : arr -> int array -> elt
val set : arr -> int array -> elt -> unit
val get_slice : int list list -> arr -> arr
val set_slice : int list list -> arr -> arr -> unit
val get_fancy : Owl_types_common.index list -> arr -> arr
val set_fancy : Owl_types_common.index list -> arr -> arr -> unit
val copy : arr -> arr
val copy_ : out:arr -> arr -> unit
val reset : arr -> unit
val reshape : arr -> int array -> arr
val reverse : arr -> arr
val tile : arr -> int array -> arr
val repeat : arr -> int array -> arr
val concatenate : ?axis:int -> arr array -> arr
val stack : ?axis:int -> arr array -> arr
val split : ?axis:int -> int array -> arr -> arr array
val expand : ?hi:bool -> arr -> int -> arr
val squeeze : ?axis:int array -> arr -> arr
val draw : ?axis:int -> arr -> int -> arr * int array
val map : (elt -> elt) -> arr -> arr
val fold : ?axis:int -> (elt -> elt -> elt) -> elt -> arr -> arr
val scan : ?axis:int -> (elt -> elt -> elt) -> arr -> arr
val one_hot : int -> arr -> arr
val pad : ?v:elt -> int list list -> arr -> arr
val print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(elt -> string) -> - arr -> - unit
val abs : arr -> arr
val neg : arr -> arr
val floor : arr -> arr
val ceil : arr -> arr
val round : arr -> arr
val sqr : arr -> arr
val sqrt : arr -> arr
val log : arr -> arr
val log2 : arr -> arr
val log10 : arr -> arr
val exp : arr -> arr
val sin : arr -> arr
val cos : arr -> arr
val tan : arr -> arr
val sinh : arr -> arr
val cosh : arr -> arr
val tanh : arr -> arr
val asin : arr -> arr
val acos : arr -> arr
val atan : arr -> arr
val asinh : arr -> arr
val acosh : arr -> arr
val atanh : arr -> arr
val sum_reduce : ?axis:int array -> arr -> arr
val signum : arr -> arr
val sigmoid : arr -> arr
val relu : arr -> arr
val dawsn : arr -> arr
val min' : arr -> elt
val max' : arr -> elt
val sum' : arr -> elt
val log_sum_exp' : arr -> elt
val log_sum_exp : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm' : arr -> elt
val l2norm' : arr -> elt
val l2norm_sqr' : arr -> elt
val clip_by_value : ?amin:elt -> ?amax:elt -> arr -> arr
val clip_by_l2norm : elt -> arr -> arr
val pow : arr -> arr -> arr
val scalar_pow : elt -> arr -> arr
val pow_scalar : arr -> elt -> arr
val atan2 : arr -> arr -> arr
val scalar_atan2 : elt -> arr -> arr
val atan2_scalar : arr -> elt -> arr
val add : arr -> arr -> arr
val sub : arr -> arr -> arr
val mul : arr -> arr -> arr
val div : arr -> arr -> arr
val add_scalar : arr -> elt -> arr
val sub_scalar : arr -> elt -> arr
val mul_scalar : arr -> elt -> arr
val div_scalar : arr -> elt -> arr
val scalar_add : elt -> arr -> arr
val scalar_sub : elt -> arr -> arr
val scalar_mul : elt -> arr -> arr
val scalar_div : elt -> arr -> arr
val fma : arr -> arr -> arr -> arr
val conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val dilated_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val transpose_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val max_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val upsampling2d : arr -> int array -> arr
val conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val dilated_conv1d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv1d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val transpose_conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val max_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val upsampling2d_backward : arr -> int array -> arr -> arr
val row_num : arr -> int
val col_num : arr -> int
val row : arr -> int -> arr
val rows : arr -> int array -> arr
val copy_row_to : arr -> arr -> int -> unit
val copy_col_to : arr -> arr -> int -> unit
val dot : arr -> arr -> arr
val diag : ?k:int -> arr -> arr
val trace : arr -> elt
val transpose : ?axis:int array -> arr -> arr
val to_rows : arr -> arr array
val of_rows : arr array -> arr
val to_cols : arr -> arr array
val of_cols : arr array -> arr
val of_array : elt array -> int array -> arr
val of_arrays : elt array array -> arr
val float_to_elt : float -> elt
val elt_to_float : elt -> float
val elt_equal : arr -> arr -> arr
val elt_not_equal : arr -> arr -> arr
val elt_less : arr -> arr -> arr
val elt_greater : arr -> arr -> arr
val elt_less_equal : arr -> arr -> arr
val elt_greater_equal : arr -> arr -> arr
val elt_equal_scalar : arr -> elt -> arr
val elt_not_equal_scalar : arr -> elt -> arr
val elt_less_scalar : arr -> elt -> arr
val elt_greater_scalar : arr -> elt -> arr
val elt_less_equal_scalar : arr -> elt -> arr
val elt_greater_equal_scalar : arr -> elt -> arr
module Scalar : sig ... end
module Mat : sig ... end
module Linalg : sig ... end
val create_ : out:arr -> elt -> unit
val uniform_ : ?a:elt -> ?b:elt -> out:arr -> unit
val gaussian_ : ?mu:elt -> ?sigma:elt -> out:arr -> unit
val sequential_ : ?a:elt -> ?step:elt -> out:arr -> unit
val bernoulli_ : ?p:elt -> out:arr -> unit
val zeros_ : out:arr -> unit
val ones_ : out:arr -> unit
val one_hot_ : out:arr -> int -> arr -> unit
val get_slice_ : out:arr -> int list list -> arr -> unit
val set_slice_ : out:arr -> int list list -> arr -> arr -> unit
val sub_left : arr -> int -> int -> arr
val reshape_ : out:arr -> arr -> unit
val reverse_ : out:arr -> arr -> unit
val transpose_ : out:arr -> ?axis:int array -> arr -> unit
val repeat_ : out:arr -> arr -> int array -> unit
val tile_ : out:arr -> arr -> int array -> unit
val pad_ : out:arr -> ?v:elt -> int list list -> arr -> unit
val hypot : arr -> arr -> arr
val fmod : arr -> arr -> arr
val min2 : arr -> arr -> arr
val max2 : arr -> arr -> arr
val add_ : ?out:arr -> arr -> arr -> unit
val sub_ : ?out:arr -> arr -> arr -> unit
val mul_ : ?out:arr -> arr -> arr -> unit
val div_ : ?out:arr -> arr -> arr -> unit
val pow_ : ?out:arr -> arr -> arr -> unit
val atan2_ : ?out:arr -> arr -> arr -> unit
val hypot_ : ?out:arr -> arr -> arr -> unit
val fmod_ : ?out:arr -> arr -> arr -> unit
val min2_ : ?out:arr -> arr -> arr -> unit
val max2_ : ?out:arr -> arr -> arr -> unit
val add_scalar_ : ?out:arr -> arr -> elt -> unit
val sub_scalar_ : ?out:arr -> arr -> elt -> unit
val mul_scalar_ : ?out:arr -> arr -> elt -> unit
val div_scalar_ : ?out:arr -> arr -> elt -> unit
val pow_scalar_ : ?out:arr -> arr -> elt -> unit
val atan2_scalar_ : ?out:arr -> arr -> elt -> unit
val fmod_scalar_ : ?out:arr -> arr -> elt -> unit
val scalar_add_ : ?out:arr -> elt -> arr -> unit
val scalar_sub_ : ?out:arr -> elt -> arr -> unit
val scalar_mul_ : ?out:arr -> elt -> arr -> unit
val scalar_div_ : ?out:arr -> elt -> arr -> unit
val scalar_pow_ : ?out:arr -> elt -> arr -> unit
val scalar_atan2_ : ?out:arr -> elt -> arr -> unit
val scalar_fmod_ : ?out:arr -> elt -> arr -> unit
val fma_ : ?out:arr -> arr -> arr -> arr -> unit
val clip_by_value_ : ?out:arr -> ?amin:elt -> ?amax:elt -> arr -> unit
val clip_by_l2norm_ : ?out:arr -> elt -> arr -> unit
val dot_ : - ?transa:bool -> - ?transb:bool -> - ?alpha:elt -> - ?beta:elt -> - c:arr -> - arr -> - arr -> - unit
val abs_ : ?out:arr -> arr -> unit
val neg_ : ?out:arr -> arr -> unit
val conj_ : ?out:arr -> arr -> unit
val reci_ : ?out:arr -> arr -> unit
val signum_ : ?out:arr -> arr -> unit
val sqr_ : ?out:arr -> arr -> unit
val sqrt_ : ?out:arr -> arr -> unit
val cbrt_ : ?out:arr -> arr -> unit
val exp_ : ?out:arr -> arr -> unit
val exp2_ : ?out:arr -> arr -> unit
val exp10_ : ?out:arr -> arr -> unit
val expm1_ : ?out:arr -> arr -> unit
val log_ : ?out:arr -> arr -> unit
val log2_ : ?out:arr -> arr -> unit
val log10_ : ?out:arr -> arr -> unit
val log1p_ : ?out:arr -> arr -> unit
val sin_ : ?out:arr -> arr -> unit
val cos_ : ?out:arr -> arr -> unit
val tan_ : ?out:arr -> arr -> unit
val asin_ : ?out:arr -> arr -> unit
val acos_ : ?out:arr -> arr -> unit
val atan_ : ?out:arr -> arr -> unit
val sinh_ : ?out:arr -> arr -> unit
val cosh_ : ?out:arr -> arr -> unit
val tanh_ : ?out:arr -> arr -> unit
val asinh_ : ?out:arr -> arr -> unit
val acosh_ : ?out:arr -> arr -> unit
val atanh_ : ?out:arr -> arr -> unit
val floor_ : ?out:arr -> arr -> unit
val ceil_ : ?out:arr -> arr -> unit
val round_ : ?out:arr -> arr -> unit
val trunc_ : ?out:arr -> arr -> unit
val fix_ : ?out:arr -> arr -> unit
val erf_ : ?out:arr -> arr -> unit
val erfc_ : ?out:arr -> arr -> unit
val relu_ : ?out:arr -> arr -> unit
val softplus_ : ?out:arr -> arr -> unit
val softsign_ : ?out:arr -> arr -> unit
val softmax_ : ?out:arr -> ?axis:int -> arr -> unit
val sigmoid_ : ?out:arr -> arr -> unit
val sum_ : out:arr -> axis:int -> arr -> unit
val min_ : out:arr -> axis:int -> arr -> unit
val max_ : out:arr -> axis:int -> arr -> unit
val sum : ?axis:int -> ?keep_dims:bool -> arr -> arr
val prod : ?axis:int -> ?keep_dims:bool -> arr -> arr
val min : ?axis:int -> ?keep_dims:bool -> arr -> arr
val max : ?axis:int -> ?keep_dims:bool -> arr -> arr
val mean : ?axis:int -> ?keep_dims:bool -> arr -> arr
val var : ?axis:int -> ?keep_dims:bool -> arr -> arr
val std : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l2norm : ?axis:int -> ?keep_dims:bool -> arr -> arr
val cumsum_ : ?out:arr -> ?axis:int -> arr -> unit
val cumprod_ : ?out:arr -> ?axis:int -> arr -> unit
val cummin_ : ?out:arr -> ?axis:int -> arr -> unit
val cummax_ : ?out:arr -> ?axis:int -> arr -> unit
val dropout_ : ?out:arr -> ?rate:float -> arr -> unit
val prod' : arr -> elt
val mean' : arr -> elt
val var' : arr -> elt
val std' : arr -> elt
val elt_equal_ : ?out:arr -> arr -> arr -> unit
val elt_not_equal_ : ?out:arr -> arr -> arr -> unit
val elt_less_ : ?out:arr -> arr -> arr -> unit
val elt_greater_ : ?out:arr -> arr -> arr -> unit
val elt_less_equal_ : ?out:arr -> arr -> arr -> unit
val elt_greater_equal_ : ?out:arr -> arr -> arr -> unit
val elt_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_not_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_less_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_greater_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_less_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_greater_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val conv1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val conv2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val conv3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val dilated_conv1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val dilated_conv2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val dilated_conv3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val transpose_conv1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val transpose_conv2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val transpose_conv3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val max_pool1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val max_pool2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val max_pool3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val upsampling2d_ : out:arr -> arr -> int array -> unit
val conv1d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv1d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv2d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv2d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv3d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv3d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val dilated_conv1d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv1d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv2d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv2d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv3d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv3d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val transpose_conv1d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv1d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv2d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv2d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv3d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv3d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val max_pool1d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val max_pool2d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val max_pool3d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool1d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool2d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool3d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val upsampling2d_backward_ : out:arr -> arr -> int array -> arr -> unit
val fused_adagrad_ : ?out:arr -> rate:float -> eps:float -> arr -> unit
\ No newline at end of file diff --git a/owl-base/Owl_types_ndarray_numdiff/.dummy b/owl-base/Owl_types_ndarray_numdiff/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl-base/Owl_types_ndarray_numdiff/module-type-Sig/index.html b/owl-base/Owl_types_ndarray_numdiff/module-type-Sig/index.html deleted file mode 100644 index 6241d62bf..000000000 --- a/owl-base/Owl_types_ndarray_numdiff/module-type-Sig/index.html +++ /dev/null @@ -1,158 +0,0 @@ - -Sig (owl-base.Owl_types_ndarray_numdiff.Sig)

Module type Owl_types_ndarray_numdiff.Sig

include Owl_types_ndarray_basic.Sig
type arr
type elt
val empty : int array -> arr
val zeros : int array -> arr
val ones : int array -> arr
val create : int array -> elt -> arr
val sequential : ?a:elt -> ?step:elt -> int array -> arr
val uniform : ?a:elt -> ?b:elt -> int array -> arr
val gaussian : ?mu:elt -> ?sigma:elt -> int array -> arr
val bernoulli : ?p:elt -> int array -> arr
val init : int array -> (int -> elt) -> arr
val init_nd : int array -> (int array -> elt) -> arr
val shape : arr -> int array
val numel : arr -> int
val get : arr -> int array -> elt
val set : arr -> int array -> elt -> unit
val get_slice : int list list -> arr -> arr
val set_slice : int list list -> arr -> arr -> unit
val get_fancy : Owl_types_common.index list -> arr -> arr
val set_fancy : Owl_types_common.index list -> arr -> arr -> unit
val copy : arr -> arr
val copy_ : out:arr -> arr -> unit
val reset : arr -> unit
val reshape : arr -> int array -> arr
val reverse : arr -> arr
val tile : arr -> int array -> arr
val repeat : arr -> int array -> arr
val concatenate : ?axis:int -> arr array -> arr
val stack : ?axis:int -> arr array -> arr
val split : ?axis:int -> int array -> arr -> arr array
val expand : ?hi:bool -> arr -> int -> arr
val squeeze : ?axis:int array -> arr -> arr
val draw : ?axis:int -> arr -> int -> arr * int array
val map : (elt -> elt) -> arr -> arr
val fold : ?axis:int -> (elt -> elt -> elt) -> elt -> arr -> arr
val scan : ?axis:int -> (elt -> elt -> elt) -> arr -> arr
val one_hot : int -> arr -> arr
val pad : ?v:elt -> int list list -> arr -> arr
val print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(elt -> string) -> - arr -> - unit
val abs : arr -> arr
val neg : arr -> arr
val floor : arr -> arr
val ceil : arr -> arr
val round : arr -> arr
val sqr : arr -> arr
val sqrt : arr -> arr
val log : arr -> arr
val log2 : arr -> arr
val log10 : arr -> arr
val exp : arr -> arr
val sin : arr -> arr
val cos : arr -> arr
val tan : arr -> arr
val sinh : arr -> arr
val cosh : arr -> arr
val tanh : arr -> arr
val asin : arr -> arr
val acos : arr -> arr
val atan : arr -> arr
val asinh : arr -> arr
val acosh : arr -> arr
val atanh : arr -> arr
val min : ?axis:int -> ?keep_dims:bool -> arr -> arr
val max : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum_reduce : ?axis:int array -> arr -> arr
val signum : arr -> arr
val sigmoid : arr -> arr
val relu : arr -> arr
val dawsn : arr -> arr
val min' : arr -> elt
val max' : arr -> elt
val sum' : arr -> elt
val log_sum_exp' : arr -> elt
val log_sum_exp : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm' : arr -> elt
val l2norm' : arr -> elt
val l2norm_sqr' : arr -> elt
val clip_by_value : ?amin:elt -> ?amax:elt -> arr -> arr
val clip_by_l2norm : elt -> arr -> arr
val pow : arr -> arr -> arr
val scalar_pow : elt -> arr -> arr
val pow_scalar : arr -> elt -> arr
val atan2 : arr -> arr -> arr
val scalar_atan2 : elt -> arr -> arr
val atan2_scalar : arr -> elt -> arr
val add : arr -> arr -> arr
val sub : arr -> arr -> arr
val mul : arr -> arr -> arr
val div : arr -> arr -> arr
val add_scalar : arr -> elt -> arr
val sub_scalar : arr -> elt -> arr
val mul_scalar : arr -> elt -> arr
val div_scalar : arr -> elt -> arr
val scalar_add : elt -> arr -> arr
val scalar_sub : elt -> arr -> arr
val scalar_mul : elt -> arr -> arr
val scalar_div : elt -> arr -> arr
val fma : arr -> arr -> arr -> arr
val conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val dilated_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val transpose_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val max_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val upsampling2d : arr -> int array -> arr
val conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val dilated_conv1d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv1d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val transpose_conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val max_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val upsampling2d_backward : arr -> int array -> arr -> arr
val row_num : arr -> int
val col_num : arr -> int
val row : arr -> int -> arr
val rows : arr -> int array -> arr
val copy_row_to : arr -> arr -> int -> unit
val copy_col_to : arr -> arr -> int -> unit
val dot : arr -> arr -> arr
val diag : ?k:int -> arr -> arr
val trace : arr -> elt
val transpose : ?axis:int array -> arr -> arr
val to_rows : arr -> arr array
val of_rows : arr array -> arr
val to_cols : arr -> arr array
val of_cols : arr array -> arr
val of_array : elt array -> int array -> arr
val of_arrays : elt array array -> arr
val float_to_elt : float -> elt
val elt_to_float : elt -> float
val mapi : (int -> elt -> elt) -> arr -> arr
val (+) : arr -> arr -> arr
val (-) : arr -> arr -> arr
val (*) : arr -> arr -> arr
val (/) : arr -> arr -> arr
val (+$) : arr -> elt -> arr
val (-$) : arr -> elt -> arr
val (*$) : arr -> elt -> arr
val (/$) : arr -> elt -> arr
\ No newline at end of file diff --git a/owl-base/Owl_types_operator/.dummy b/owl-base/Owl_types_operator/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl-base/Owl_types_operator/module-type-BasicSig/index.html b/owl-base/Owl_types_operator/module-type-BasicSig/index.html deleted file mode 100644 index d7928b893..000000000 --- a/owl-base/Owl_types_operator/module-type-BasicSig/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -BasicSig (owl-base.Owl_types_operator.BasicSig)

Module type Owl_types_operator.BasicSig

type ('a, 'b) t
val add : ('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t
val sub : ('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t
val mul : ('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t
val div : ('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t
val add_scalar : ('a, 'b) t -> 'a -> ('a, 'b) t
val sub_scalar : ('a, 'b) t -> 'a -> ('a, 'b) t
val mul_scalar : ('a, 'b) t -> 'a -> ('a, 'b) t
val div_scalar : ('a, 'b) t -> 'a -> ('a, 'b) t
val scalar_add : 'a -> ('a, 'b) t -> ('a, 'b) t
val scalar_sub : 'a -> ('a, 'b) t -> ('a, 'b) t
val scalar_mul : 'a -> ('a, 'b) t -> ('a, 'b) t
val scalar_div : 'a -> ('a, 'b) t -> ('a, 'b) t
val equal : ('a, 'b) t -> ('a, 'b) t -> bool
val not_equal : ('a, 'b) t -> ('a, 'b) t -> bool
val greater : ('a, 'b) t -> ('a, 'b) t -> bool
val less : ('a, 'b) t -> ('a, 'b) t -> bool
val greater_equal : ('a, 'b) t -> ('a, 'b) t -> bool
val less_equal : ('a, 'b) t -> ('a, 'b) t -> bool
\ No newline at end of file diff --git a/owl-base/Owl_types_operator/module-type-ExtendSig/index.html b/owl-base/Owl_types_operator/module-type-ExtendSig/index.html deleted file mode 100644 index be0c4f6a5..000000000 --- a/owl-base/Owl_types_operator/module-type-ExtendSig/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -ExtendSig (owl-base.Owl_types_operator.ExtendSig)

Module type Owl_types_operator.ExtendSig

type ('a, 'b) t
val equal_scalar : ('a, 'b) t -> 'a -> bool
val not_equal_scalar : ('a, 'b) t -> 'a -> bool
val less_scalar : ('a, 'b) t -> 'a -> bool
val greater_scalar : ('a, 'b) t -> 'a -> bool
val less_equal_scalar : ('a, 'b) t -> 'a -> bool
val greater_equal_scalar : ('a, 'b) t -> 'a -> bool
val elt_equal : ('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t
val elt_not_equal : ('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t
val elt_less : ('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t
val elt_greater : ('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t
val elt_less_equal : ('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t
val elt_greater_equal : ('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t
val elt_equal_scalar : ('a, 'b) t -> 'a -> ('a, 'b) t
val elt_not_equal_scalar : ('a, 'b) t -> 'a -> ('a, 'b) t
val elt_less_scalar : ('a, 'b) t -> 'a -> ('a, 'b) t
val elt_greater_scalar : ('a, 'b) t -> 'a -> ('a, 'b) t
val elt_less_equal_scalar : ('a, 'b) t -> 'a -> ('a, 'b) t
val elt_greater_equal_scalar : ('a, 'b) t -> 'a -> ('a, 'b) t
val fmod : (float, 'a) t -> (float, 'a) t -> (float, 'a) t
val fmod_scalar : (float, 'a) t -> float -> (float, 'a) t
val pow : (float, 'a) t -> (float, 'a) t -> (float, 'a) t
val scalar_pow : float -> (float, 'a) t -> (float, 'a) t
val pow_scalar : (float, 'a) t -> float -> (float, 'a) t
val approx_equal : ?eps:float -> ('a, 'b) t -> ('a, 'b) t -> bool
val approx_equal_scalar : ?eps:float -> ('a, 'b) t -> 'a -> bool
val approx_elt_equal : ?eps:float -> ('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t
val approx_elt_equal_scalar : ?eps:float -> ('a, 'b) t -> 'a -> ('a, 'b) t
val add_ : ?out:('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t -> unit
val sub_ : ?out:('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t -> unit
val mul_ : ?out:('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t -> unit
val div_ : ?out:('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t -> unit
val add_scalar_ : ?out:('a, 'b) t -> ('a, 'b) t -> 'a -> unit
val sub_scalar_ : ?out:('a, 'b) t -> ('a, 'b) t -> 'a -> unit
val mul_scalar_ : ?out:('a, 'b) t -> ('a, 'b) t -> 'a -> unit
val div_scalar_ : ?out:('a, 'b) t -> ('a, 'b) t -> 'a -> unit
val concat_vertical : ('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t
val concat_horizontal : ('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t
val get_fancy_ext : Owl_types.index array -> ('a, 'b) t -> ('a, 'b) t
val set_fancy_ext : Owl_types.index array -> ('a, 'b) t -> ('a, 'b) t -> unit
val get_slice_ext : int list array -> ('a, 'b) t -> ('a, 'b) t
val set_slice_ext : int list array -> ('a, 'b) t -> ('a, 'b) t -> unit
\ No newline at end of file diff --git a/owl-base/Owl_types_operator/module-type-LinalgSig/index.html b/owl-base/Owl_types_operator/module-type-LinalgSig/index.html deleted file mode 100644 index 875946f31..000000000 --- a/owl-base/Owl_types_operator/module-type-LinalgSig/index.html +++ /dev/null @@ -1,7 +0,0 @@ - -LinalgSig (owl-base.Owl_types_operator.LinalgSig)

Module type Owl_types_operator.LinalgSig

type ('a, 'b) t
val mpow : ('a, 'b) t -> float -> ('a, 'b) t
val linsolve : - ?trans:bool -> - ?typ:[ `n | `u | `l ] -> - ('a, 'b) t -> - ('a, 'b) t -> - ('a, 'b) t
\ No newline at end of file diff --git a/owl-base/Owl_types_operator/module-type-MatrixSig/index.html b/owl-base/Owl_types_operator/module-type-MatrixSig/index.html deleted file mode 100644 index 48c0b6e5d..000000000 --- a/owl-base/Owl_types_operator/module-type-MatrixSig/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -MatrixSig (owl-base.Owl_types_operator.MatrixSig)

Module type Owl_types_operator.MatrixSig

type ('a, 'b) t
val get : ('a, 'b) t -> int -> int -> 'a
val set : ('a, 'b) t -> int -> int -> 'a -> unit
val dot : ('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t
\ No newline at end of file diff --git a/owl-base/Owl_types_operator/module-type-NdarraySig/index.html b/owl-base/Owl_types_operator/module-type-NdarraySig/index.html deleted file mode 100644 index 84fa1a51e..000000000 --- a/owl-base/Owl_types_operator/module-type-NdarraySig/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -NdarraySig (owl-base.Owl_types_operator.NdarraySig)

Module type Owl_types_operator.NdarraySig

type ('a, 'b) t
val get : ('a, 'b) t -> int array -> 'a
val set : ('a, 'b) t -> int array -> 'a -> unit
\ No newline at end of file diff --git a/owl-base/Owl_types_stats_basic/.dummy b/owl-base/Owl_types_stats_basic/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl-base/Owl_types_stats_dist/.dummy b/owl-base/Owl_types_stats_dist/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl-base/Owl_types_stats_dist/module-type-Sig/Linalg/index.html b/owl-base/Owl_types_stats_dist/module-type-Sig/Linalg/index.html deleted file mode 100644 index 3b59c5393..000000000 --- a/owl-base/Owl_types_stats_dist/module-type-Sig/Linalg/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -Linalg (owl-base.Owl_types_stats_dist.Sig.Linalg)

Module Sig.Linalg

val inv : arr -> arr
val logdet : arr -> elt
val chol : ?upper:bool -> arr -> arr
val svd : ?thin:bool -> arr -> arr * arr * arr
val qr : arr -> arr * arr
val lq : arr -> arr * arr
val sylvester : arr -> arr -> arr -> arr
val lyapunov : arr -> arr -> arr
val discrete_lyapunov : - ?solver:[ `default | `bilinear | `direct ] -> - arr -> - arr -> - arr
val linsolve : ?trans:bool -> ?typ:[ `n | `u | `l ] -> arr -> arr -> arr
val care : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
val dare : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
\ No newline at end of file diff --git a/owl-base/Owl_types_stats_dist/module-type-Sig/Mat/index.html b/owl-base/Owl_types_stats_dist/module-type-Sig/Mat/index.html deleted file mode 100644 index 5136a8dd1..000000000 --- a/owl-base/Owl_types_stats_dist/module-type-Sig/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl-base.Owl_types_stats_dist.Sig.Mat)

Module Sig.Mat

val diagm : ?k:int -> arr -> arr
val triu : ?k:int -> arr -> arr
val tril : ?k:int -> arr -> arr
val eye : int -> arr
\ No newline at end of file diff --git a/owl-base/Owl_types_stats_dist/module-type-Sig/Scalar/index.html b/owl-base/Owl_types_stats_dist/module-type-Sig/Scalar/index.html deleted file mode 100644 index 43de7e952..000000000 --- a/owl-base/Owl_types_stats_dist/module-type-Sig/Scalar/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Scalar (owl-base.Owl_types_stats_dist.Sig.Scalar)

Module Sig.Scalar

val add : elt -> elt -> elt
val sub : elt -> elt -> elt
val mul : elt -> elt -> elt
val div : elt -> elt -> elt
val pow : elt -> elt -> elt
val atan2 : elt -> elt -> elt
val abs : elt -> elt
val neg : elt -> elt
val sqr : elt -> elt
val sqrt : elt -> elt
val exp : elt -> elt
val log : elt -> elt
val log2 : elt -> elt
val log10 : elt -> elt
val signum : elt -> elt
val floor : elt -> elt
val ceil : elt -> elt
val round : elt -> elt
val sin : elt -> elt
val cos : elt -> elt
val tan : elt -> elt
val sinh : elt -> elt
val cosh : elt -> elt
val tanh : elt -> elt
val asin : elt -> elt
val acos : elt -> elt
val atan : elt -> elt
val asinh : elt -> elt
val acosh : elt -> elt
val atanh : elt -> elt
val relu : elt -> elt
val dawsn : elt -> elt
val sigmoid : elt -> elt
\ No newline at end of file diff --git a/owl-base/Owl_types_stats_dist/module-type-Sig/index.html b/owl-base/Owl_types_stats_dist/module-type-Sig/index.html deleted file mode 100644 index 9f46b9bdc..000000000 --- a/owl-base/Owl_types_stats_dist/module-type-Sig/index.html +++ /dev/null @@ -1,379 +0,0 @@ - -Sig (owl-base.Owl_types_stats_dist.Sig)

Module type Owl_types_stats_dist.Sig

include Owl_types_ndarray_mutable.Sig
include Owl_types_ndarray_algodiff.Sig
include Owl_types_ndarray_eltcmp.Sig
include Owl_types_ndarray_basic.Sig
type arr
type elt
val empty : int array -> arr
val zeros : int array -> arr
val ones : int array -> arr
val create : int array -> elt -> arr
val sequential : ?a:elt -> ?step:elt -> int array -> arr
val uniform : ?a:elt -> ?b:elt -> int array -> arr
val gaussian : ?mu:elt -> ?sigma:elt -> int array -> arr
val bernoulli : ?p:elt -> int array -> arr
val init : int array -> (int -> elt) -> arr
val init_nd : int array -> (int array -> elt) -> arr
val shape : arr -> int array
val numel : arr -> int
val get : arr -> int array -> elt
val set : arr -> int array -> elt -> unit
val get_slice : int list list -> arr -> arr
val set_slice : int list list -> arr -> arr -> unit
val get_fancy : Owl_types_common.index list -> arr -> arr
val set_fancy : Owl_types_common.index list -> arr -> arr -> unit
val copy : arr -> arr
val copy_ : out:arr -> arr -> unit
val reset : arr -> unit
val reshape : arr -> int array -> arr
val reverse : arr -> arr
val tile : arr -> int array -> arr
val repeat : arr -> int array -> arr
val concatenate : ?axis:int -> arr array -> arr
val stack : ?axis:int -> arr array -> arr
val split : ?axis:int -> int array -> arr -> arr array
val expand : ?hi:bool -> arr -> int -> arr
val squeeze : ?axis:int array -> arr -> arr
val draw : ?axis:int -> arr -> int -> arr * int array
val map : (elt -> elt) -> arr -> arr
val fold : ?axis:int -> (elt -> elt -> elt) -> elt -> arr -> arr
val scan : ?axis:int -> (elt -> elt -> elt) -> arr -> arr
val one_hot : int -> arr -> arr
val pad : ?v:elt -> int list list -> arr -> arr
val print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(elt -> string) -> - arr -> - unit
val abs : arr -> arr
val neg : arr -> arr
val floor : arr -> arr
val ceil : arr -> arr
val round : arr -> arr
val sqr : arr -> arr
val sqrt : arr -> arr
val log : arr -> arr
val log2 : arr -> arr
val log10 : arr -> arr
val exp : arr -> arr
val sin : arr -> arr
val cos : arr -> arr
val tan : arr -> arr
val sinh : arr -> arr
val cosh : arr -> arr
val tanh : arr -> arr
val asin : arr -> arr
val acos : arr -> arr
val atan : arr -> arr
val asinh : arr -> arr
val acosh : arr -> arr
val atanh : arr -> arr
val sum_reduce : ?axis:int array -> arr -> arr
val signum : arr -> arr
val sigmoid : arr -> arr
val relu : arr -> arr
val dawsn : arr -> arr
val min' : arr -> elt
val max' : arr -> elt
val sum' : arr -> elt
val log_sum_exp' : arr -> elt
val log_sum_exp : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm' : arr -> elt
val l2norm' : arr -> elt
val l2norm_sqr' : arr -> elt
val clip_by_value : ?amin:elt -> ?amax:elt -> arr -> arr
val clip_by_l2norm : elt -> arr -> arr
val pow : arr -> arr -> arr
val scalar_pow : elt -> arr -> arr
val pow_scalar : arr -> elt -> arr
val atan2 : arr -> arr -> arr
val scalar_atan2 : elt -> arr -> arr
val atan2_scalar : arr -> elt -> arr
val add : arr -> arr -> arr
val sub : arr -> arr -> arr
val mul : arr -> arr -> arr
val div : arr -> arr -> arr
val add_scalar : arr -> elt -> arr
val sub_scalar : arr -> elt -> arr
val mul_scalar : arr -> elt -> arr
val div_scalar : arr -> elt -> arr
val scalar_add : elt -> arr -> arr
val scalar_sub : elt -> arr -> arr
val scalar_mul : elt -> arr -> arr
val scalar_div : elt -> arr -> arr
val fma : arr -> arr -> arr -> arr
val conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val dilated_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val transpose_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val max_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val upsampling2d : arr -> int array -> arr
val conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val dilated_conv1d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv1d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val transpose_conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val max_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val upsampling2d_backward : arr -> int array -> arr -> arr
val row_num : arr -> int
val col_num : arr -> int
val row : arr -> int -> arr
val rows : arr -> int array -> arr
val copy_row_to : arr -> arr -> int -> unit
val copy_col_to : arr -> arr -> int -> unit
val dot : arr -> arr -> arr
val diag : ?k:int -> arr -> arr
val trace : arr -> elt
val transpose : ?axis:int array -> arr -> arr
val to_rows : arr -> arr array
val of_rows : arr array -> arr
val to_cols : arr -> arr array
val of_cols : arr array -> arr
val of_array : elt array -> int array -> arr
val of_arrays : elt array array -> arr
val float_to_elt : float -> elt
val elt_to_float : elt -> float
val elt_equal : arr -> arr -> arr
val elt_not_equal : arr -> arr -> arr
val elt_less : arr -> arr -> arr
val elt_greater : arr -> arr -> arr
val elt_less_equal : arr -> arr -> arr
val elt_greater_equal : arr -> arr -> arr
val elt_equal_scalar : arr -> elt -> arr
val elt_not_equal_scalar : arr -> elt -> arr
val elt_less_scalar : arr -> elt -> arr
val elt_greater_scalar : arr -> elt -> arr
val elt_less_equal_scalar : arr -> elt -> arr
val elt_greater_equal_scalar : arr -> elt -> arr
module Scalar : sig ... end
module Mat : sig ... end
module Linalg : sig ... end
val create_ : out:arr -> elt -> unit
val uniform_ : ?a:elt -> ?b:elt -> out:arr -> unit
val gaussian_ : ?mu:elt -> ?sigma:elt -> out:arr -> unit
val sequential_ : ?a:elt -> ?step:elt -> out:arr -> unit
val bernoulli_ : ?p:elt -> out:arr -> unit
val zeros_ : out:arr -> unit
val ones_ : out:arr -> unit
val one_hot_ : out:arr -> int -> arr -> unit
val get_slice_ : out:arr -> int list list -> arr -> unit
val set_slice_ : out:arr -> int list list -> arr -> arr -> unit
val sub_left : arr -> int -> int -> arr
val reshape_ : out:arr -> arr -> unit
val reverse_ : out:arr -> arr -> unit
val transpose_ : out:arr -> ?axis:int array -> arr -> unit
val repeat_ : out:arr -> arr -> int array -> unit
val tile_ : out:arr -> arr -> int array -> unit
val pad_ : out:arr -> ?v:elt -> int list list -> arr -> unit
val hypot : arr -> arr -> arr
val fmod : arr -> arr -> arr
val min2 : arr -> arr -> arr
val max2 : arr -> arr -> arr
val add_ : ?out:arr -> arr -> arr -> unit
val sub_ : ?out:arr -> arr -> arr -> unit
val mul_ : ?out:arr -> arr -> arr -> unit
val div_ : ?out:arr -> arr -> arr -> unit
val pow_ : ?out:arr -> arr -> arr -> unit
val atan2_ : ?out:arr -> arr -> arr -> unit
val hypot_ : ?out:arr -> arr -> arr -> unit
val fmod_ : ?out:arr -> arr -> arr -> unit
val min2_ : ?out:arr -> arr -> arr -> unit
val max2_ : ?out:arr -> arr -> arr -> unit
val add_scalar_ : ?out:arr -> arr -> elt -> unit
val sub_scalar_ : ?out:arr -> arr -> elt -> unit
val mul_scalar_ : ?out:arr -> arr -> elt -> unit
val div_scalar_ : ?out:arr -> arr -> elt -> unit
val pow_scalar_ : ?out:arr -> arr -> elt -> unit
val atan2_scalar_ : ?out:arr -> arr -> elt -> unit
val fmod_scalar_ : ?out:arr -> arr -> elt -> unit
val scalar_add_ : ?out:arr -> elt -> arr -> unit
val scalar_sub_ : ?out:arr -> elt -> arr -> unit
val scalar_mul_ : ?out:arr -> elt -> arr -> unit
val scalar_div_ : ?out:arr -> elt -> arr -> unit
val scalar_pow_ : ?out:arr -> elt -> arr -> unit
val scalar_atan2_ : ?out:arr -> elt -> arr -> unit
val scalar_fmod_ : ?out:arr -> elt -> arr -> unit
val fma_ : ?out:arr -> arr -> arr -> arr -> unit
val clip_by_value_ : ?out:arr -> ?amin:elt -> ?amax:elt -> arr -> unit
val clip_by_l2norm_ : ?out:arr -> elt -> arr -> unit
val dot_ : - ?transa:bool -> - ?transb:bool -> - ?alpha:elt -> - ?beta:elt -> - c:arr -> - arr -> - arr -> - unit
val abs_ : ?out:arr -> arr -> unit
val neg_ : ?out:arr -> arr -> unit
val conj_ : ?out:arr -> arr -> unit
val reci_ : ?out:arr -> arr -> unit
val signum_ : ?out:arr -> arr -> unit
val sqr_ : ?out:arr -> arr -> unit
val sqrt_ : ?out:arr -> arr -> unit
val cbrt_ : ?out:arr -> arr -> unit
val exp_ : ?out:arr -> arr -> unit
val exp2_ : ?out:arr -> arr -> unit
val exp10_ : ?out:arr -> arr -> unit
val expm1_ : ?out:arr -> arr -> unit
val log_ : ?out:arr -> arr -> unit
val log2_ : ?out:arr -> arr -> unit
val log10_ : ?out:arr -> arr -> unit
val log1p_ : ?out:arr -> arr -> unit
val sin_ : ?out:arr -> arr -> unit
val cos_ : ?out:arr -> arr -> unit
val tan_ : ?out:arr -> arr -> unit
val asin_ : ?out:arr -> arr -> unit
val acos_ : ?out:arr -> arr -> unit
val atan_ : ?out:arr -> arr -> unit
val sinh_ : ?out:arr -> arr -> unit
val cosh_ : ?out:arr -> arr -> unit
val tanh_ : ?out:arr -> arr -> unit
val asinh_ : ?out:arr -> arr -> unit
val acosh_ : ?out:arr -> arr -> unit
val atanh_ : ?out:arr -> arr -> unit
val floor_ : ?out:arr -> arr -> unit
val ceil_ : ?out:arr -> arr -> unit
val round_ : ?out:arr -> arr -> unit
val trunc_ : ?out:arr -> arr -> unit
val fix_ : ?out:arr -> arr -> unit
val erf_ : ?out:arr -> arr -> unit
val erfc_ : ?out:arr -> arr -> unit
val relu_ : ?out:arr -> arr -> unit
val softplus_ : ?out:arr -> arr -> unit
val softsign_ : ?out:arr -> arr -> unit
val softmax_ : ?out:arr -> ?axis:int -> arr -> unit
val sigmoid_ : ?out:arr -> arr -> unit
val sum_ : out:arr -> axis:int -> arr -> unit
val min_ : out:arr -> axis:int -> arr -> unit
val max_ : out:arr -> axis:int -> arr -> unit
val sum : ?axis:int -> ?keep_dims:bool -> arr -> arr
val prod : ?axis:int -> ?keep_dims:bool -> arr -> arr
val min : ?axis:int -> ?keep_dims:bool -> arr -> arr
val max : ?axis:int -> ?keep_dims:bool -> arr -> arr
val mean : ?axis:int -> ?keep_dims:bool -> arr -> arr
val var : ?axis:int -> ?keep_dims:bool -> arr -> arr
val std : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l2norm : ?axis:int -> ?keep_dims:bool -> arr -> arr
val cumsum_ : ?out:arr -> ?axis:int -> arr -> unit
val cumprod_ : ?out:arr -> ?axis:int -> arr -> unit
val cummin_ : ?out:arr -> ?axis:int -> arr -> unit
val cummax_ : ?out:arr -> ?axis:int -> arr -> unit
val dropout_ : ?out:arr -> ?rate:float -> arr -> unit
val prod' : arr -> elt
val mean' : arr -> elt
val var' : arr -> elt
val std' : arr -> elt
val elt_equal_ : ?out:arr -> arr -> arr -> unit
val elt_not_equal_ : ?out:arr -> arr -> arr -> unit
val elt_less_ : ?out:arr -> arr -> arr -> unit
val elt_greater_ : ?out:arr -> arr -> arr -> unit
val elt_less_equal_ : ?out:arr -> arr -> arr -> unit
val elt_greater_equal_ : ?out:arr -> arr -> arr -> unit
val elt_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_not_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_less_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_greater_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_less_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_greater_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val conv1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val conv2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val conv3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val dilated_conv1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val dilated_conv2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val dilated_conv3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val transpose_conv1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val transpose_conv2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val transpose_conv3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val max_pool1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val max_pool2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val max_pool3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val upsampling2d_ : out:arr -> arr -> int array -> unit
val conv1d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv1d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv2d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv2d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv3d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv3d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val dilated_conv1d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv1d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv2d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv2d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv3d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv3d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val transpose_conv1d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv1d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv2d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv2d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv3d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv3d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val max_pool1d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val max_pool2d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val max_pool3d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool1d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool2d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool3d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val upsampling2d_backward_ : out:arr -> arr -> int array -> arr -> unit
val fused_adagrad_ : ?out:arr -> rate:float -> eps:float -> arr -> unit
val uniform_rvs : a:arr -> b:arr -> n:int -> arr
val uniform_pdf : a:arr -> b:arr -> arr -> arr
val uniform_logpdf : a:arr -> b:arr -> arr -> arr
val uniform_cdf : a:arr -> b:arr -> arr -> arr
val uniform_logcdf : a:arr -> b:arr -> arr -> arr
val uniform_ppf : a:arr -> b:arr -> arr -> arr
val uniform_sf : a:arr -> b:arr -> arr -> arr
val uniform_logsf : a:arr -> b:arr -> arr -> arr
val uniform_isf : a:arr -> b:arr -> arr -> arr
val gaussian_rvs : mu:arr -> sigma:arr -> n:int -> arr
val gaussian_pdf : mu:arr -> sigma:arr -> arr -> arr
val gaussian_logpdf : mu:arr -> sigma:arr -> arr -> arr
val gaussian_cdf : mu:arr -> sigma:arr -> arr -> arr
val gaussian_logcdf : mu:arr -> sigma:arr -> arr -> arr
val gaussian_ppf : mu:arr -> sigma:arr -> arr -> arr
val gaussian_sf : mu:arr -> sigma:arr -> arr -> arr
val gaussian_logsf : mu:arr -> sigma:arr -> arr -> arr
val gaussian_isf : mu:arr -> sigma:arr -> arr -> arr
val exponential_rvs : lambda:arr -> n:int -> arr
val exponential_pdf : lambda:arr -> arr -> arr
val exponential_logpdf : lambda:arr -> arr -> arr
val exponential_cdf : lambda:arr -> arr -> arr
val exponential_logcdf : lambda:arr -> arr -> arr
val exponential_ppf : lambda:arr -> arr -> arr
val exponential_sf : lambda:arr -> arr -> arr
val exponential_logsf : lambda:arr -> arr -> arr
val exponential_isf : lambda:arr -> arr -> arr
val poisson_rvs : mu:arr -> n:int -> arr
val gamma_rvs : shape:arr -> scale:arr -> n:int -> arr
val gamma_pdf : shape:arr -> scale:arr -> arr -> arr
val gamma_logpdf : shape:arr -> scale:arr -> arr -> arr
val gamma_cdf : shape:arr -> scale:arr -> arr -> arr
val gamma_logcdf : shape:arr -> scale:arr -> arr -> arr
val gamma_ppf : shape:arr -> scale:arr -> arr -> arr
val gamma_sf : shape:arr -> scale:arr -> arr -> arr
val gamma_logsf : shape:arr -> scale:arr -> arr -> arr
val gamma_isf : shape:arr -> scale:arr -> arr -> arr
val beta_rvs : a:arr -> b:arr -> n:int -> arr
val beta_pdf : a:arr -> b:arr -> arr -> arr
val beta_logpdf : a:arr -> b:arr -> arr -> arr
val beta_cdf : a:arr -> b:arr -> arr -> arr
val beta_logcdf : a:arr -> b:arr -> arr -> arr
val beta_ppf : a:arr -> b:arr -> arr -> arr
val beta_sf : a:arr -> b:arr -> arr -> arr
val beta_logsf : a:arr -> b:arr -> arr -> arr
val beta_isf : a:arr -> b:arr -> arr -> arr
val chi2_rvs : df:arr -> n:int -> arr
val chi2_pdf : df:arr -> arr -> arr
val chi2_logpdf : df:arr -> arr -> arr
val chi2_cdf : df:arr -> arr -> arr
val chi2_logcdf : df:arr -> arr -> arr
val chi2_ppf : df:arr -> arr -> arr
val chi2_sf : df:arr -> arr -> arr
val chi2_logsf : df:arr -> arr -> arr
val chi2_isf : df:arr -> arr -> arr
val f_rvs : dfnum:arr -> dfden:arr -> n:int -> arr
val f_pdf : dfnum:arr -> dfden:arr -> arr -> arr
val f_logpdf : dfnum:arr -> dfden:arr -> arr -> arr
val f_cdf : dfnum:arr -> dfden:arr -> arr -> arr
val f_logcdf : dfnum:arr -> dfden:arr -> arr -> arr
val f_ppf : dfnum:arr -> dfden:arr -> arr -> arr
val f_sf : dfnum:arr -> dfden:arr -> arr -> arr
val f_logsf : dfnum:arr -> dfden:arr -> arr -> arr
val f_isf : dfnum:arr -> dfden:arr -> arr -> arr
val cauchy_rvs : loc:arr -> scale:arr -> n:int -> arr
val cauchy_pdf : loc:arr -> scale:arr -> arr -> arr
val cauchy_logpdf : loc:arr -> scale:arr -> arr -> arr
val cauchy_cdf : loc:arr -> scale:arr -> arr -> arr
val cauchy_logcdf : loc:arr -> scale:arr -> arr -> arr
val cauchy_ppf : loc:arr -> scale:arr -> arr -> arr
val cauchy_sf : loc:arr -> scale:arr -> arr -> arr
val cauchy_logsf : loc:arr -> scale:arr -> arr -> arr
val cauchy_isf : loc:arr -> scale:arr -> arr -> arr
val lomax_rvs : shape:arr -> scale:arr -> n:int -> arr
val lomax_pdf : shape:arr -> scale:arr -> arr -> arr
val lomax_logpdf : shape:arr -> scale:arr -> arr -> arr
val lomax_cdf : shape:arr -> scale:arr -> arr -> arr
val lomax_logcdf : shape:arr -> scale:arr -> arr -> arr
val lomax_ppf : shape:arr -> scale:arr -> arr -> arr
val lomax_sf : shape:arr -> scale:arr -> arr -> arr
val lomax_logsf : shape:arr -> scale:arr -> arr -> arr
val lomax_isf : shape:arr -> scale:arr -> arr -> arr
val weibull_rvs : shape:arr -> scale:arr -> n:int -> arr
val weibull_pdf : shape:arr -> scale:arr -> arr -> arr
val weibull_logpdf : shape:arr -> scale:arr -> arr -> arr
val weibull_cdf : shape:arr -> scale:arr -> arr -> arr
val weibull_logcdf : shape:arr -> scale:arr -> arr -> arr
val weibull_ppf : shape:arr -> scale:arr -> arr -> arr
val weibull_sf : shape:arr -> scale:arr -> arr -> arr
val weibull_logsf : shape:arr -> scale:arr -> arr -> arr
val weibull_isf : shape:arr -> scale:arr -> arr -> arr
val laplace_rvs : loc:arr -> scale:arr -> n:int -> arr
val laplace_pdf : loc:arr -> scale:arr -> arr -> arr
val laplace_logpdf : loc:arr -> scale:arr -> arr -> arr
val laplace_cdf : loc:arr -> scale:arr -> arr -> arr
val laplace_logcdf : loc:arr -> scale:arr -> arr -> arr
val laplace_ppf : loc:arr -> scale:arr -> arr -> arr
val laplace_sf : loc:arr -> scale:arr -> arr -> arr
val laplace_logsf : loc:arr -> scale:arr -> arr -> arr
val laplace_isf : loc:arr -> scale:arr -> arr -> arr
val gumbel1_rvs : a:arr -> b:arr -> n:int -> arr
val gumbel1_pdf : a:arr -> b:arr -> arr -> arr
val gumbel1_logpdf : a:arr -> b:arr -> arr -> arr
val gumbel1_cdf : a:arr -> b:arr -> arr -> arr
val gumbel1_logcdf : a:arr -> b:arr -> arr -> arr
val gumbel1_ppf : a:arr -> b:arr -> arr -> arr
val gumbel1_sf : a:arr -> b:arr -> arr -> arr
val gumbel1_logsf : a:arr -> b:arr -> arr -> arr
val gumbel1_isf : a:arr -> b:arr -> arr -> arr
val gumbel2_rvs : a:arr -> b:arr -> n:int -> arr
val gumbel2_pdf : a:arr -> b:arr -> arr -> arr
val gumbel2_logpdf : a:arr -> b:arr -> arr -> arr
val gumbel2_cdf : a:arr -> b:arr -> arr -> arr
val gumbel2_logcdf : a:arr -> b:arr -> arr -> arr
val gumbel2_ppf : a:arr -> b:arr -> arr -> arr
val gumbel2_sf : a:arr -> b:arr -> arr -> arr
val gumbel2_logsf : a:arr -> b:arr -> arr -> arr
val gumbel2_isf : a:arr -> b:arr -> arr -> arr
val logistic_rvs : loc:arr -> scale:arr -> n:int -> arr
val logistic_pdf : loc:arr -> scale:arr -> arr -> arr
val logistic_logpdf : loc:arr -> scale:arr -> arr -> arr
val logistic_cdf : loc:arr -> scale:arr -> arr -> arr
val logistic_logcdf : loc:arr -> scale:arr -> arr -> arr
val logistic_ppf : loc:arr -> scale:arr -> arr -> arr
val logistic_sf : loc:arr -> scale:arr -> arr -> arr
val logistic_logsf : loc:arr -> scale:arr -> arr -> arr
val logistic_isf : loc:arr -> scale:arr -> arr -> arr
val lognormal_rvs : mu:arr -> sigma:arr -> n:int -> arr
val lognormal_pdf : mu:arr -> sigma:arr -> arr -> arr
val lognormal_logpdf : mu:arr -> sigma:arr -> arr -> arr
val lognormal_cdf : mu:arr -> sigma:arr -> arr -> arr
val lognormal_logcdf : mu:arr -> sigma:arr -> arr -> arr
val lognormal_ppf : mu:arr -> sigma:arr -> arr -> arr
val lognormal_sf : mu:arr -> sigma:arr -> arr -> arr
val lognormal_logsf : mu:arr -> sigma:arr -> arr -> arr
val lognormal_isf : mu:arr -> sigma:arr -> arr -> arr
val rayleigh_rvs : sigma:arr -> n:int -> arr
val rayleigh_pdf : sigma:arr -> arr -> arr
val rayleigh_logpdf : sigma:arr -> arr -> arr
val rayleigh_cdf : sigma:arr -> arr -> arr
val rayleigh_logcdf : sigma:arr -> arr -> arr
val rayleigh_ppf : sigma:arr -> arr -> arr
val rayleigh_sf : sigma:arr -> arr -> arr
val rayleigh_logsf : sigma:arr -> arr -> arr
val rayleigh_isf : sigma:arr -> arr -> arr
\ No newline at end of file diff --git a/owl-base/Owl_utils/.dummy b/owl-base/Owl_utils/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl-base/Owl_utils_array/.dummy b/owl-base/Owl_utils_array/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl-base/Owl_utils_heap/.dummy b/owl-base/Owl_utils_heap/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl-base/Owl_utils_infer_shape/.dummy b/owl-base/Owl_utils_infer_shape/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl-base/Owl_utils_multimap/.dummy b/owl-base/Owl_utils_multimap/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl-base/Owl_utils_multimap/Make/index.html b/owl-base/Owl_utils_multimap/Make/index.html deleted file mode 100644 index a8015b2eb..000000000 --- a/owl-base/Owl_utils_multimap/Make/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Make (owl-base.Owl_utils_multimap.Make)

Module Owl_utils_multimap.Make

Parameters

module Ord : Stdlib.Map.OrderedType

Signature

Type definition
type key = Ord.t

Type of the multimap keys.

type 'a t

Type of a multimap.

Basic functions
val empty : 'a t

The empty multimap.

val is_empty : 'a t -> bool

Check whether the multimap is empty.

val mem : key -> 'a t -> bool

mem k m returns true is the multimap m contains at least one binding for k, false otherwise.

val add : key -> 'a -> 'a t -> 'a t

add k v m returns a multimap containing the same bindings as m, plus a binding from k to v. Previous bindings for k are hidden by the new binding (they can be restored by calling remove k m).

val remove : key -> 'a t -> 'a t

remove k v m returns a multimap with the same bindings as m, except for the binding of k: the last value that was bound to it is removed. If there is no binding for k in m, raises `Not_found`.

val find : key -> 'a t -> 'a

find k m returns the last added binding of k in m, or raises Not_found if there is no such binding.

val max_binding : 'a t -> key * 'a

max_binding m returns the greatest binding in m. Raises Not_found if m is empty.

val find_first_opt : (key -> bool) -> 'a t -> (key * 'a) option

find_first_opt f m returns the first binding (k, v) such that f k, or None if no such binding exists. The function f has to be nondecreasing. Time complexity is O(log n).

\ No newline at end of file diff --git a/owl-base/Owl_utils_ndarray/.dummy b/owl-base/Owl_utils_ndarray/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl-base/Owl_utils_stack/.dummy b/owl-base/Owl_utils_stack/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl-base/Owl_view/.dummy b/owl-base/Owl_view/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl-base/Owl_view/Make/argument-1-A/index.html b/owl-base/Owl_view/Make/argument-1-A/index.html deleted file mode 100644 index 220c11810..000000000 --- a/owl-base/Owl_view/Make/argument-1-A/index.html +++ /dev/null @@ -1,158 +0,0 @@ - -A (owl-base.Owl_view.Make.A)

Parameter Make.A

include Owl_types_ndarray_basic.Sig
type arr
type elt
val empty : int array -> arr
val zeros : int array -> arr
val ones : int array -> arr
val create : int array -> elt -> arr
val sequential : ?a:elt -> ?step:elt -> int array -> arr
val uniform : ?a:elt -> ?b:elt -> int array -> arr
val gaussian : ?mu:elt -> ?sigma:elt -> int array -> arr
val bernoulli : ?p:elt -> int array -> arr
val init : int array -> (int -> elt) -> arr
val init_nd : int array -> (int array -> elt) -> arr
val shape : arr -> int array
val numel : arr -> int
val get : arr -> int array -> elt
val set : arr -> int array -> elt -> unit
val get_slice : int list list -> arr -> arr
val set_slice : int list list -> arr -> arr -> unit
val get_fancy : Owl_types_common.index list -> arr -> arr
val set_fancy : Owl_types_common.index list -> arr -> arr -> unit
val copy : arr -> arr
val copy_ : out:arr -> arr -> unit
val reset : arr -> unit
val reshape : arr -> int array -> arr
val reverse : arr -> arr
val tile : arr -> int array -> arr
val repeat : arr -> int array -> arr
val concatenate : ?axis:int -> arr array -> arr
val stack : ?axis:int -> arr array -> arr
val split : ?axis:int -> int array -> arr -> arr array
val expand : ?hi:bool -> arr -> int -> arr
val squeeze : ?axis:int array -> arr -> arr
val draw : ?axis:int -> arr -> int -> arr * int array
val map : (elt -> elt) -> arr -> arr
val fold : ?axis:int -> (elt -> elt -> elt) -> elt -> arr -> arr
val scan : ?axis:int -> (elt -> elt -> elt) -> arr -> arr
val one_hot : int -> arr -> arr
val pad : ?v:elt -> int list list -> arr -> arr
val print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(elt -> string) -> - arr -> - unit
val abs : arr -> arr
val neg : arr -> arr
val floor : arr -> arr
val ceil : arr -> arr
val round : arr -> arr
val sqr : arr -> arr
val sqrt : arr -> arr
val log : arr -> arr
val log2 : arr -> arr
val log10 : arr -> arr
val exp : arr -> arr
val sin : arr -> arr
val cos : arr -> arr
val tan : arr -> arr
val sinh : arr -> arr
val cosh : arr -> arr
val tanh : arr -> arr
val asin : arr -> arr
val acos : arr -> arr
val atan : arr -> arr
val asinh : arr -> arr
val acosh : arr -> arr
val atanh : arr -> arr
val min : ?axis:int -> ?keep_dims:bool -> arr -> arr
val max : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum_reduce : ?axis:int array -> arr -> arr
val signum : arr -> arr
val sigmoid : arr -> arr
val relu : arr -> arr
val dawsn : arr -> arr
val min' : arr -> elt
val max' : arr -> elt
val sum' : arr -> elt
val log_sum_exp' : arr -> elt
val log_sum_exp : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm' : arr -> elt
val l2norm' : arr -> elt
val l2norm_sqr' : arr -> elt
val clip_by_value : ?amin:elt -> ?amax:elt -> arr -> arr
val clip_by_l2norm : elt -> arr -> arr
val pow : arr -> arr -> arr
val scalar_pow : elt -> arr -> arr
val pow_scalar : arr -> elt -> arr
val atan2 : arr -> arr -> arr
val scalar_atan2 : elt -> arr -> arr
val atan2_scalar : arr -> elt -> arr
val add : arr -> arr -> arr
val sub : arr -> arr -> arr
val mul : arr -> arr -> arr
val div : arr -> arr -> arr
val add_scalar : arr -> elt -> arr
val sub_scalar : arr -> elt -> arr
val mul_scalar : arr -> elt -> arr
val div_scalar : arr -> elt -> arr
val scalar_add : elt -> arr -> arr
val scalar_sub : elt -> arr -> arr
val scalar_mul : elt -> arr -> arr
val scalar_div : elt -> arr -> arr
val fma : arr -> arr -> arr -> arr
val conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val dilated_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val transpose_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val max_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val upsampling2d : arr -> int array -> arr
val conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val dilated_conv1d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv1d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val transpose_conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val max_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val upsampling2d_backward : arr -> int array -> arr -> arr
val row_num : arr -> int
val col_num : arr -> int
val row : arr -> int -> arr
val rows : arr -> int array -> arr
val copy_row_to : arr -> arr -> int -> unit
val copy_col_to : arr -> arr -> int -> unit
val dot : arr -> arr -> arr
val diag : ?k:int -> arr -> arr
val trace : arr -> elt
val transpose : ?axis:int array -> arr -> arr
val to_rows : arr -> arr array
val of_rows : arr array -> arr
val to_cols : arr -> arr array
val of_cols : arr array -> arr
val of_array : elt array -> int array -> arr
val of_arrays : elt array array -> arr
val float_to_elt : float -> elt
val elt_to_float : elt -> float
\ No newline at end of file diff --git a/owl-base/Owl_view/Make/index.html b/owl-base/Owl_view/Make/index.html deleted file mode 100644 index f2aca2b4c..000000000 --- a/owl-base/Owl_view/Make/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Make (owl-base.Owl_view.Make)

Module Owl_view.Make

Parameters

Signature

Type definition
type t

t is the abstract type to represent a view atop of an ndarray.

Conversion functions
val of_arr : A.arr -> t

of_arr x creates a view from ndarray x.

val to_arr : t -> A.arr

to_arr x creates an new ndarray based on the view x.

Manipulation functions
val get : t -> int array -> A.elt

Refer to :doc:`owl_dense_ndarray_generic`

val set : t -> int array -> A.elt -> unit

Refer to :doc:`owl_dense_ndarray_generic`

val get_slice : int list list -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val set_slice : int list list -> t -> t -> unit

Refer to :doc:`owl_dense_ndarray_generic`

val shape : t -> int array

Refer to :doc:`owl_dense_ndarray_generic`

val num_dims : t -> int

Refer to :doc:`owl_dense_ndarray_generic`

val nth_dim : t -> int -> int

Refer to :doc:`owl_dense_ndarray_generic`

val numel : t -> int

Refer to :doc:`owl_dense_ndarray_generic`

Iteration functions
val iteri : (int -> A.elt -> unit) -> t -> unit

iteri f x iterates and applies f to every element in x. f has type f : int array -> elt -> unit, the first parameter is index. 1d indices are passed to the user function.

val iter : (A.elt -> unit) -> t -> unit

Similar to iteri, the index is not passed in.

val mapi : (int -> A.elt -> A.elt) -> t -> unit

mapi f x applies f : int array -> elt -> elt to every element in x, then save the result in place. 1d indices are passed to the user function.

val map : (A.elt -> A.elt) -> t -> unit

map f x applies f : elt -> elt to every element in x, then save the the result in place in x.

val iter2 : (A.elt -> A.elt -> unit) -> t -> t -> unit

iter2 f x y applies f : elt -> elt -> elt every pair of elements in x and y. The indices are not passed in the user function.

val map2 : (A.elt -> A.elt -> A.elt) -> t -> t -> unit

map2 f x y applies f : elt -> elt -> elt every pair of elements in x and y, then saves the result in y. So be careful with the order, it matters, the data reflected by view y will be modified.

val iteri_nd : (int array -> A.elt -> unit) -> t -> unit

Similar to `iteri` but n-d indices are passed in. This function is much slower than `iteri`.

val mapi_nd : (int array -> A.elt -> A.elt) -> t -> unit

Similar to `mapi` but n-d indices are passed in. This function is much slower than `mapi`.

Examination & Comparison
val exists : (A.elt -> bool) -> t -> bool

exists f x checks all the elements in x using f. If at least one element satisfies f then the function returns true otherwise false.

val not_exists : (A.elt -> bool) -> t -> bool

not_exists f x checks all the elements in x, the function returns true only if all the elements fail to satisfy f : float -> bool.

val for_all : (A.elt -> bool) -> t -> bool

for_all f x checks all the elements in x, the function returns true if and only if all the elements pass the check of function f.

val equal : t -> t -> bool

equal x y returns true if x and y are elementwise equal.

val not_equal : t -> t -> bool

not_equal x y returns true if x and y are not elementwise equal.

\ No newline at end of file diff --git a/owl-top/Owl_top/.dummy b/owl-top/Owl_top/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl/Owl/.dummy b/owl/Owl/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl/Owl/Arr/index.html b/owl/Owl/Arr/index.html deleted file mode 100644 index 06605de1b..000000000 --- a/owl/Owl/Arr/index.html +++ /dev/null @@ -1,579 +0,0 @@ - -Arr (owl.Owl.Arr)

Module Owl.Arr

include module type of struct include Owl_dense.Ndarray.D end
include module type of struct include Owl_dense_ndarray_d end
type elt = float
type arr = - (float, Stdlib.Bigarray.float64_elt, Stdlib.Bigarray.c_layout) - Stdlib.Bigarray.Genarray.t
include Owl_dense_ndarray_intf.Common with type elt := elt and type arr := arr
include Owl_base_dense_ndarray_intf.Common - with type elt := elt - with type arr := arr
val empty : int array -> arr
val zeros : int array -> arr
val ones : int array -> arr
val create : int array -> elt -> arr
val init : int array -> (int -> elt) -> arr
val init_nd : int array -> (int array -> elt) -> arr
val sequential : ?a:elt -> ?step:elt -> int array -> arr
val uniform : ?a:elt -> ?b:elt -> int array -> arr
val gaussian : ?mu:elt -> ?sigma:elt -> int array -> arr
val bernoulli : ?p:float -> int array -> arr
val shape : arr -> int array
val numel : arr -> int
val strides : arr -> int array

Refer to :doc:`owl_dense_ndarray_generic`

val slice_size : arr -> int array

Refer to :doc:`owl_dense_ndarray_generic`

val get : arr -> int array -> elt
val set : arr -> int array -> elt -> unit
val get_slice : int list list -> arr -> arr
val set_slice : int list list -> arr -> arr -> unit
val copy : arr -> arr
val copy_ : out:arr -> arr -> unit
val reset : arr -> unit
val reshape : arr -> int array -> arr
val flatten : arr -> arr
val reverse : arr -> arr
val tile : arr -> int array -> arr
val repeat : arr -> int array -> arr
val concatenate : ?axis:int -> arr array -> arr
val stack : ?axis:int -> arr array -> arr
val squeeze : ?axis:int array -> arr -> arr
val expand : ?hi:bool -> arr -> int -> arr
val split : ?axis:int -> int array -> arr -> arr array
val draw : ?axis:int -> arr -> int -> arr * int array
val pad : ?v:elt -> int list list -> arr -> arr
val one_hot : int -> arr -> arr
val print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(elt -> string) -> - arr -> - unit
val abs : arr -> arr
val neg : arr -> arr
val floor : arr -> arr
val ceil : arr -> arr
val round : arr -> arr
val sqr : arr -> arr
val sqrt : arr -> arr
val log : arr -> arr
val log2 : arr -> arr
val log10 : arr -> arr
val exp : arr -> arr
val sin : arr -> arr
val cos : arr -> arr
val tan : arr -> arr
val sinh : arr -> arr
val cosh : arr -> arr
val tanh : arr -> arr
val asin : arr -> arr
val acos : arr -> arr
val atan : arr -> arr
val asinh : arr -> arr
val acosh : arr -> arr
val atanh : arr -> arr
val min : ?axis:int -> ?keep_dims:bool -> arr -> arr
val max : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum_reduce : ?axis:int array -> arr -> arr
val min' : arr -> elt
val max' : arr -> elt
val sum' : arr -> elt
val pow : arr -> arr -> arr
val scalar_pow : elt -> arr -> arr
val pow_scalar : arr -> elt -> arr
val add : arr -> arr -> arr
val sub : arr -> arr -> arr
val mul : arr -> arr -> arr
val div : arr -> arr -> arr
val add_scalar : arr -> elt -> arr
val sub_scalar : arr -> elt -> arr
val mul_scalar : arr -> elt -> arr
val div_scalar : arr -> elt -> arr
val scalar_add : elt -> arr -> arr
val scalar_sub : elt -> arr -> arr
val scalar_mul : elt -> arr -> arr
val scalar_div : elt -> arr -> arr
val fma : arr -> arr -> arr -> arr
Iterate array elements
val iteri : (int -> elt -> unit) -> arr -> unit
val iter : (elt -> unit) -> arr -> unit
val mapi : (int -> elt -> elt) -> arr -> arr
val map : (elt -> elt) -> arr -> arr
val filteri : (int -> elt -> bool) -> arr -> int array
val filter : (elt -> bool) -> arr -> int array
val foldi : ?axis:int -> (int -> elt -> elt -> elt) -> elt -> arr -> arr
val fold : ?axis:int -> (elt -> elt -> elt) -> elt -> arr -> arr
val scani : ?axis:int -> (int -> elt -> elt -> elt) -> arr -> arr
val scan : ?axis:int -> (elt -> elt -> elt) -> arr -> arr
Examination & Comparison
val exists : (elt -> bool) -> arr -> bool
val not_exists : (elt -> bool) -> arr -> bool
val for_all : (elt -> bool) -> arr -> bool
val is_zero : arr -> bool
val is_positive : arr -> bool
val is_negative : arr -> bool
val is_nonpositive : arr -> bool
val is_nonnegative : arr -> bool
val is_normal : arr -> bool
val not_nan : arr -> bool
val not_inf : arr -> bool
val equal : arr -> arr -> bool
val not_equal : arr -> arr -> bool
val greater : arr -> arr -> bool
val less : arr -> arr -> bool
val greater_equal : arr -> arr -> bool
val less_equal : arr -> arr -> bool
val elt_equal : arr -> arr -> arr
val elt_not_equal : arr -> arr -> arr
val elt_less : arr -> arr -> arr
val elt_greater : arr -> arr -> arr
val elt_less_equal : arr -> arr -> arr
val elt_greater_equal : arr -> arr -> arr
val equal_scalar : arr -> elt -> bool
val not_equal_scalar : arr -> elt -> bool
val less_scalar : arr -> elt -> bool
val greater_scalar : arr -> elt -> bool
val less_equal_scalar : arr -> elt -> bool
val greater_equal_scalar : arr -> elt -> bool
val elt_equal_scalar : arr -> elt -> arr
val elt_not_equal_scalar : arr -> elt -> arr
val elt_less_scalar : arr -> elt -> arr
val elt_greater_scalar : arr -> elt -> arr
val elt_less_equal_scalar : arr -> elt -> arr
val elt_greater_equal_scalar : arr -> elt -> arr
val row_num : arr -> int
val col_num : arr -> int
val row : arr -> int -> arr
val rows : arr -> int array -> arr
val copy_row_to : arr -> arr -> int -> unit
val copy_col_to : arr -> arr -> int -> unit
val diag : ?k:int -> arr -> arr
val transpose : ?axis:int array -> arr -> arr
val to_rows : arr -> arr array
val of_rows : arr array -> arr
val to_cols : arr -> arr array
val of_cols : arr array -> arr
val of_array : elt array -> int array -> arr
val of_arrays : elt array array -> arr
Create N-dimensional array
val linspace : elt -> elt -> int -> arr

linspace k 0. 9. 10 ...

val logspace : ?base:float -> elt -> elt -> int -> arr

logspace k 0. 9. 10 ...

val unit_basis : int -> int -> arr

unit_basis k n i returns a unit basis vector with ith element set to 1.

Obtain basic properties
val num_dims : arr -> int
val nth_dim : arr -> int -> int
val nnz : arr -> int
val density : arr -> float
val size_in_bytes : arr -> int
val same_shape : arr -> arr -> bool
val same_data : arr -> arr -> bool
val ind : arr -> int -> int array
val i1d : arr -> int array -> int
Manipulate a N-dimensional array
val get_index : arr -> int array array -> elt array
val set_index : arr -> int array array -> elt array -> unit
val get_fancy : Owl_types.index list -> arr -> arr
val set_fancy : Owl_types.index list -> arr -> arr -> unit
val sub_left : arr -> int -> int -> arr
val sub_ndarray : int array -> arr -> arr array
val slice_left : arr -> int array -> arr
val fill : arr -> elt -> unit
val resize : ?head:bool -> arr -> int array -> arr
val flip : ?axis:int -> arr -> arr
val rotate : arr -> int -> arr
val swap : int -> int -> arr -> arr
val concat_vertical : arr -> arr -> arr
val concat_horizontal : arr -> arr -> arr
val concat_vh : arr array array -> arr
val split_vh : (int * int) array array -> arr -> arr array array
val dropout : ?rate:float -> arr -> arr
val top : arr -> int -> int array array
val bottom : arr -> int -> int array array
val sort : arr -> arr
val sort1 : ?axis:int -> arr -> arr
val argsort : - arr -> - (int64, Stdlib.Bigarray.int64_elt, Stdlib.Bigarray.c_layout) - Stdlib.Bigarray.Genarray.t
val mmap : Unix.file_descr -> ?pos:int64 -> bool -> int array -> arr
Iterate array elements
val iter2i : (int -> elt -> elt -> unit) -> arr -> arr -> unit
val iter2 : (elt -> elt -> unit) -> arr -> arr -> unit
val map2i : (int -> elt -> elt -> elt) -> arr -> arr -> arr
val map2 : (elt -> elt -> elt) -> arr -> arr -> arr
val iteri_nd : (int array -> elt -> unit) -> arr -> unit
val mapi_nd : (int array -> elt -> elt) -> arr -> arr
val foldi_nd : - ?axis:int -> - (int array -> elt -> elt -> elt) -> - elt -> - arr -> - arr
val scani_nd : ?axis:int -> (int array -> elt -> elt -> elt) -> arr -> arr
val filteri_nd : (int array -> elt -> bool) -> arr -> int array array
val iter2i_nd : (int array -> elt -> elt -> unit) -> arr -> arr -> unit
val map2i_nd : (int array -> elt -> elt -> elt) -> arr -> arr -> arr
val iteri_slice : ?axis:int -> (int -> arr -> unit) -> arr -> unit
val iter_slice : ?axis:int -> (arr -> unit) -> arr -> unit
val mapi_slice : ?axis:int -> (int -> arr -> 'c) -> arr -> 'c array
val map_slice : ?axis:int -> (arr -> 'c) -> arr -> 'c array
val filteri_slice : ?axis:int -> (int -> arr -> bool) -> arr -> arr array
val filter_slice : ?axis:int -> (arr -> bool) -> arr -> arr array
val foldi_slice : ?axis:int -> (int -> 'c -> arr -> 'c) -> 'c -> arr -> 'c
val fold_slice : ?axis:int -> ('c -> arr -> 'c) -> 'c -> arr -> 'c
Examine array elements or compare two arrays
Input/Output functions
val to_array : arr -> elt array
val save : out:string -> arr -> unit
val load : string -> arr
val save_npy : out:string -> arr -> unit
val load_npy : string -> arr
Unary mathematical operations
val prod : ?axis:int -> ?keep_dims:bool -> arr -> arr
val prod' : arr -> elt
val mean : ?axis:int -> ?keep_dims:bool -> arr -> arr
val mean' : arr -> elt
val median' : arr -> elt
val median : ?axis:int -> ?keep_dims:bool -> arr -> arr
val var : ?axis:int -> ?keep_dims:bool -> arr -> arr
val var' : arr -> elt
val std : ?axis:int -> ?keep_dims:bool -> arr -> arr
val std' : arr -> elt
val sem : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sem' : arr -> elt
val minmax : ?axis:int -> ?keep_dims:bool -> arr -> arr * arr
val minmax' : arr -> elt * elt
val min_i : arr -> elt * int array
val max_i : arr -> elt * int array
val minmax_i : arr -> (elt * int array) * (elt * int array)
val abs2 : arr -> arr
val conj : arr -> arr
val reci : arr -> arr
val reci_tol : ?tol:elt -> arr -> arr
val cbrt : arr -> arr
val exp2 : arr -> arr
val exp10 : arr -> arr
val expm1 : arr -> arr
val log1p : arr -> arr
val trunc : arr -> arr
val fix : arr -> arr
val modf : arr -> arr * arr
val l1norm : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l2norm : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l2norm_sqr : ?axis:int -> ?keep_dims:bool -> arr -> arr
val vecnorm : ?axis:int -> ?p:float -> ?keep_dims:bool -> arr -> arr
val vecnorm' : ?p:float -> arr -> elt
val cumsum : ?axis:int -> arr -> arr
val cumprod : ?axis:int -> arr -> arr
val cummin : ?axis:int -> arr -> arr
val cummax : ?axis:int -> arr -> arr
val diff : ?axis:int -> ?n:int -> arr -> arr
val lgamma : arr -> arr
Binary mathematical operations
val min2 : arr -> arr -> arr
val max2 : arr -> arr -> arr
val ssqr' : arr -> elt -> elt
val ssqr_diff' : arr -> arr -> elt
Tensor Calculus
val contract1 : (int * int) array -> arr -> arr
val contract2 : (int * int) array -> arr -> arr -> arr
Experimental functions
val slide : ?axis:int -> ?ofs:int -> ?step:int -> window:int -> arr -> arr
Functions of in-place modification
val create_ : out:arr -> elt -> unit
val uniform_ : ?a:elt -> ?b:elt -> out:arr -> unit
val gaussian_ : ?mu:elt -> ?sigma:elt -> out:arr -> unit
val sequential_ : ?a:elt -> ?step:elt -> out:arr -> unit
val bernoulli_ : ?p:float -> out:arr -> unit
val zeros_ : out:arr -> unit
val ones_ : out:arr -> unit
val sort_ : arr -> unit
val one_hot_ : out:arr -> int -> arr -> unit
val get_fancy_ : out:arr -> Owl_types.index list -> arr -> unit
val set_fancy_ : out:arr -> Owl_types.index list -> arr -> arr -> unit
val get_slice_ : out:arr -> int list list -> arr -> unit
val set_slice_ : out:arr -> int list list -> arr -> arr -> unit
val reshape_ : out:arr -> arr -> unit
val reverse_ : out:arr -> arr -> unit
val transpose_ : out:arr -> ?axis:int array -> arr -> unit
val repeat_ : out:arr -> arr -> int array -> unit
val tile_ : out:arr -> arr -> int array -> unit
val pad_ : out:arr -> ?v:elt -> int list list -> arr -> unit
val sum_ : out:arr -> axis:int -> arr -> unit
val min_ : out:arr -> axis:int -> arr -> unit
val max_ : out:arr -> axis:int -> arr -> unit
val add_ : ?out:arr -> arr -> arr -> unit
val sub_ : ?out:arr -> arr -> arr -> unit
val mul_ : ?out:arr -> arr -> arr -> unit
val div_ : ?out:arr -> arr -> arr -> unit
val pow_ : ?out:arr -> arr -> arr -> unit
val atan2_ : ?out:arr -> arr -> arr -> unit
val hypot_ : ?out:arr -> arr -> arr -> unit
val fmod_ : ?out:arr -> arr -> arr -> unit
val min2_ : ?out:arr -> arr -> arr -> unit
val max2_ : ?out:arr -> arr -> arr -> unit
val add_scalar_ : ?out:arr -> arr -> elt -> unit
val sub_scalar_ : ?out:arr -> arr -> elt -> unit
val mul_scalar_ : ?out:arr -> arr -> elt -> unit
val div_scalar_ : ?out:arr -> arr -> elt -> unit
val pow_scalar_ : ?out:arr -> arr -> elt -> unit
val atan2_scalar_ : ?out:arr -> arr -> elt -> unit
val fmod_scalar_ : ?out:arr -> arr -> elt -> unit
val scalar_add_ : ?out:arr -> elt -> arr -> unit
val scalar_sub_ : ?out:arr -> elt -> arr -> unit
val scalar_mul_ : ?out:arr -> elt -> arr -> unit
val scalar_div_ : ?out:arr -> elt -> arr -> unit
val scalar_pow_ : ?out:arr -> elt -> arr -> unit
val scalar_atan2_ : ?out:arr -> elt -> arr -> unit
val scalar_fmod_ : ?out:arr -> elt -> arr -> unit
val fma_ : ?out:arr -> arr -> arr -> arr -> unit
val clip_by_value_ : ?out:arr -> ?amin:elt -> ?amax:elt -> arr -> unit
val clip_by_l2norm_ : ?out:arr -> elt -> arr -> unit
val dot_ : - ?transa:bool -> - ?transb:bool -> - ?alpha:elt -> - ?beta:elt -> - c:arr -> - arr -> - arr -> - unit
val conj_ : ?out:arr -> arr -> unit
val abs_ : ?out:arr -> arr -> unit
val neg_ : ?out:arr -> arr -> unit
val reci_ : ?out:arr -> arr -> unit
val signum_ : ?out:arr -> arr -> unit
val sqr_ : ?out:arr -> arr -> unit
val sqrt_ : ?out:arr -> arr -> unit
val cbrt_ : ?out:arr -> arr -> unit
val exp_ : ?out:arr -> arr -> unit
val exp2_ : ?out:arr -> arr -> unit
val exp10_ : ?out:arr -> arr -> unit
val expm1_ : ?out:arr -> arr -> unit
val log_ : ?out:arr -> arr -> unit
val log2_ : ?out:arr -> arr -> unit
val log10_ : ?out:arr -> arr -> unit
val log1p_ : ?out:arr -> arr -> unit
val sin_ : ?out:arr -> arr -> unit
val cos_ : ?out:arr -> arr -> unit
val tan_ : ?out:arr -> arr -> unit
val asin_ : ?out:arr -> arr -> unit
val acos_ : ?out:arr -> arr -> unit
val atan_ : ?out:arr -> arr -> unit
val sinh_ : ?out:arr -> arr -> unit
val cosh_ : ?out:arr -> arr -> unit
val tanh_ : ?out:arr -> arr -> unit
val asinh_ : ?out:arr -> arr -> unit
val acosh_ : ?out:arr -> arr -> unit
val atanh_ : ?out:arr -> arr -> unit
val floor_ : ?out:arr -> arr -> unit
val ceil_ : ?out:arr -> arr -> unit
val round_ : ?out:arr -> arr -> unit
val trunc_ : ?out:arr -> arr -> unit
val fix_ : ?out:arr -> arr -> unit
val erf_ : ?out:arr -> arr -> unit
val erfc_ : ?out:arr -> arr -> unit
val relu_ : ?out:arr -> arr -> unit
val softplus_ : ?out:arr -> arr -> unit
val softsign_ : ?out:arr -> arr -> unit
val sigmoid_ : ?out:arr -> arr -> unit
val softmax_ : ?out:arr -> ?axis:int -> arr -> unit
val cumsum_ : ?out:arr -> ?axis:int -> arr -> unit
val cumprod_ : ?out:arr -> ?axis:int -> arr -> unit
val cummin_ : ?out:arr -> ?axis:int -> arr -> unit
val cummax_ : ?out:arr -> ?axis:int -> arr -> unit
val dropout_ : ?out:arr -> ?rate:float -> arr -> unit
val elt_equal_ : ?out:arr -> arr -> arr -> unit
val elt_not_equal_ : ?out:arr -> arr -> arr -> unit
val elt_less_ : ?out:arr -> arr -> arr -> unit
val elt_greater_ : ?out:arr -> arr -> arr -> unit
val elt_less_equal_ : ?out:arr -> arr -> arr -> unit
val elt_greater_equal_ : ?out:arr -> arr -> arr -> unit
val elt_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_not_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_less_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_greater_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_less_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_greater_equal_scalar_ : ?out:arr -> arr -> elt -> unit

Matrix functions

val col : arr -> int -> arr
val cols : arr -> int array -> arr
val to_arrays : arr -> elt array array
val draw_rows : ?replacement:bool -> arr -> int -> arr * int array
val draw_cols : ?replacement:bool -> arr -> int -> arr * int array
val draw_rows2 : - ?replacement:bool -> - arr -> - arr -> - int -> - arr * arr * int array
val draw_cols2 : - ?replacement:bool -> - arr -> - arr -> - int -> - arr * arr * int array
include Owl_dense_ndarray_intf.Real with type elt := elt and type arr := arr
include Owl_base_dense_ndarray_intf.Real - with type elt := elt - with type arr := arr
val sum_slices : ?axis:int -> arr -> arr
val signum : arr -> arr
val relu : arr -> arr
val dawsn : arr -> arr
val l1norm' : arr -> elt
val l2norm' : arr -> elt
val l2norm_sqr' : arr -> elt
val clip_by_value : ?amin:elt -> ?amax:elt -> arr -> arr
val clip_by_l2norm : elt -> arr -> arr
val atan2 : arr -> arr -> arr
val approx_equal : ?eps:float -> arr -> arr -> bool
val approx_equal_scalar : ?eps:float -> arr -> float -> bool
val approx_elt_equal : ?eps:float -> arr -> arr -> arr
val approx_elt_equal_scalar : ?eps:float -> arr -> float -> arr
val dot : arr -> arr -> arr
val trace : arr -> elt
Helper functions
val float_to_elt : float -> elt
val elt_to_float : elt -> float
Real operations
val i0 : arr -> arr
val i0e : arr -> arr
val i1 : arr -> arr
val i1e : arr -> arr
val iv : v:arr -> arr -> arr
val scalar_iv : v:elt -> arr -> arr
val iv_scalar : v:arr -> elt -> arr
val j0 : arr -> arr
val j1 : arr -> arr
val jv : v:arr -> arr -> arr
val scalar_jv : v:elt -> arr -> arr
val jv_scalar : v:arr -> elt -> arr
val erf : arr -> arr
val erfc : arr -> arr
val logistic : arr -> arr
val elu : ?alpha:elt -> arr -> arr
val leaky_relu : ?alpha:elt -> arr -> arr
val softplus : arr -> arr
val softsign : arr -> arr
val softmax : ?axis:int -> arr -> arr
val sigmoid : arr -> arr
val log_sum_exp' : arr -> float
val log_sum_exp : ?axis:int -> ?keep_dims:bool -> arr -> arr
val scalar_atan2 : elt -> arr -> arr
val atan2_scalar : arr -> elt -> arr
val hypot : arr -> arr -> arr
val fmod : arr -> arr -> arr
val fmod_scalar : arr -> elt -> arr
val scalar_fmod : elt -> arr -> arr
val cross_entropy' : arr -> arr -> float
val fused_adagrad_ : ?out:arr -> rate:float -> eps:float -> arr -> unit
val poisson : mu:elt -> int array -> arr
val poisson_ : mu:elt -> out:arr -> unit
include Owl_dense_ndarray_intf.NN with type arr := arr
include Owl_base_dense_ndarray_intf.NN with type arr := arr
val conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val dilated_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val transpose_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val max_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val upsampling2d : arr -> int array -> arr
val conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val dilated_conv1d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv1d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val transpose_conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val max_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val upsampling2d_backward : arr -> int array -> arr -> arr
val max_pool2d_argmax : - ?padding:Owl_types.padding -> - arr -> - int array -> - int array -> - arr - * (int64, Stdlib.Bigarray.int64_elt, Stdlib.Bigarray.c_layout) - Stdlib.Bigarray.Genarray.t
val conv1d_ : - out:arr -> - ?padding:Owl_types.padding -> - arr -> - arr -> - int array -> - unit
val conv2d_ : - out:arr -> - ?padding:Owl_types.padding -> - arr -> - arr -> - int array -> - unit
val conv3d_ : - out:arr -> - ?padding:Owl_types.padding -> - arr -> - arr -> - int array -> - unit
val dilated_conv1d_ : - out:arr -> - ?padding:Owl_types.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val dilated_conv2d_ : - out:arr -> - ?padding:Owl_types.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val dilated_conv3d_ : - out:arr -> - ?padding:Owl_types.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val transpose_conv1d_ : - out:arr -> - ?padding:Owl_types.padding -> - arr -> - arr -> - int array -> - unit
val transpose_conv2d_ : - out:arr -> - ?padding:Owl_types.padding -> - arr -> - arr -> - int array -> - unit
val transpose_conv3d_ : - out:arr -> - ?padding:Owl_types.padding -> - arr -> - arr -> - int array -> - unit
val max_pool1d_ : - out:arr -> - ?padding:Owl_types.padding -> - arr -> - int array -> - int array -> - unit
val max_pool2d_ : - out:arr -> - ?padding:Owl_types.padding -> - arr -> - int array -> - int array -> - unit
val max_pool3d_ : - out:arr -> - ?padding:Owl_types.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool1d_ : - out:arr -> - ?padding:Owl_types.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool2d_ : - out:arr -> - ?padding:Owl_types.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool3d_ : - out:arr -> - ?padding:Owl_types.padding -> - arr -> - int array -> - int array -> - unit
val upsampling2d_ : out:arr -> arr -> int array -> unit
val conv1d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv1d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv2d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv2d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv3d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv3d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val dilated_conv1d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv1d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv2d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv2d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv3d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv3d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val transpose_conv1d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv1d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv2d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv2d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv3d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv3d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val max_pool1d_backward_ : - out:arr -> - Owl_types.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val max_pool2d_backward_ : - out:arr -> - Owl_types.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val max_pool3d_backward_ : - out:arr -> - Owl_types.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool1d_backward_ : - out:arr -> - Owl_types.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool2d_backward_ : - out:arr -> - Owl_types.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool3d_backward_ : - out:arr -> - Owl_types.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val upsampling2d_backward_ : out:arr -> arr -> int array -> arr -> unit
include Owl_dense_ndarray_intf.Distribution with type arr := arr
Stats & distribution functions
val uniform_rvs : a:arr -> b:arr -> n:int -> arr
val uniform_pdf : a:arr -> b:arr -> arr -> arr
val uniform_logpdf : a:arr -> b:arr -> arr -> arr
val uniform_cdf : a:arr -> b:arr -> arr -> arr
val uniform_logcdf : a:arr -> b:arr -> arr -> arr
val uniform_ppf : a:arr -> b:arr -> arr -> arr
val uniform_sf : a:arr -> b:arr -> arr -> arr
val uniform_logsf : a:arr -> b:arr -> arr -> arr
val uniform_isf : a:arr -> b:arr -> arr -> arr
val gaussian_rvs : mu:arr -> sigma:arr -> n:int -> arr
val gaussian_pdf : mu:arr -> sigma:arr -> arr -> arr
val gaussian_logpdf : mu:arr -> sigma:arr -> arr -> arr
val gaussian_cdf : mu:arr -> sigma:arr -> arr -> arr
val gaussian_logcdf : mu:arr -> sigma:arr -> arr -> arr
val gaussian_ppf : mu:arr -> sigma:arr -> arr -> arr
val gaussian_sf : mu:arr -> sigma:arr -> arr -> arr
val gaussian_logsf : mu:arr -> sigma:arr -> arr -> arr
val gaussian_isf : mu:arr -> sigma:arr -> arr -> arr
val exponential_rvs : lambda:arr -> n:int -> arr
val exponential_pdf : lambda:arr -> arr -> arr
val exponential_logpdf : lambda:arr -> arr -> arr
val exponential_cdf : lambda:arr -> arr -> arr
val exponential_logcdf : lambda:arr -> arr -> arr
val exponential_ppf : lambda:arr -> arr -> arr
val exponential_sf : lambda:arr -> arr -> arr
val exponential_logsf : lambda:arr -> arr -> arr
val exponential_isf : lambda:arr -> arr -> arr
val gamma_rvs : shape:arr -> scale:arr -> n:int -> arr
val gamma_pdf : shape:arr -> scale:arr -> arr -> arr
val gamma_logpdf : shape:arr -> scale:arr -> arr -> arr
val gamma_cdf : shape:arr -> scale:arr -> arr -> arr
val gamma_logcdf : shape:arr -> scale:arr -> arr -> arr
val gamma_ppf : shape:arr -> scale:arr -> arr -> arr
val gamma_sf : shape:arr -> scale:arr -> arr -> arr
val gamma_logsf : shape:arr -> scale:arr -> arr -> arr
val gamma_isf : shape:arr -> scale:arr -> arr -> arr
val beta_rvs : a:arr -> b:arr -> n:int -> arr
val beta_pdf : a:arr -> b:arr -> arr -> arr
val beta_logpdf : a:arr -> b:arr -> arr -> arr
val beta_cdf : a:arr -> b:arr -> arr -> arr
val beta_logcdf : a:arr -> b:arr -> arr -> arr
val beta_ppf : a:arr -> b:arr -> arr -> arr
val beta_sf : a:arr -> b:arr -> arr -> arr
val beta_logsf : a:arr -> b:arr -> arr -> arr
val beta_isf : a:arr -> b:arr -> arr -> arr
val chi2_rvs : df:arr -> n:int -> arr
val chi2_pdf : df:arr -> arr -> arr
val chi2_logpdf : df:arr -> arr -> arr
val chi2_cdf : df:arr -> arr -> arr
val chi2_logcdf : df:arr -> arr -> arr
val chi2_ppf : df:arr -> arr -> arr
val chi2_sf : df:arr -> arr -> arr
val chi2_logsf : df:arr -> arr -> arr
val chi2_isf : df:arr -> arr -> arr
val f_rvs : dfnum:arr -> dfden:arr -> n:int -> arr
val f_pdf : dfnum:arr -> dfden:arr -> arr -> arr
val f_logpdf : dfnum:arr -> dfden:arr -> arr -> arr
val f_cdf : dfnum:arr -> dfden:arr -> arr -> arr
val f_logcdf : dfnum:arr -> dfden:arr -> arr -> arr
val f_ppf : dfnum:arr -> dfden:arr -> arr -> arr
val f_sf : dfnum:arr -> dfden:arr -> arr -> arr
val f_logsf : dfnum:arr -> dfden:arr -> arr -> arr
val f_isf : dfnum:arr -> dfden:arr -> arr -> arr
val cauchy_rvs : loc:arr -> scale:arr -> n:int -> arr
val cauchy_pdf : loc:arr -> scale:arr -> arr -> arr
val cauchy_logpdf : loc:arr -> scale:arr -> arr -> arr
val cauchy_cdf : loc:arr -> scale:arr -> arr -> arr
val cauchy_logcdf : loc:arr -> scale:arr -> arr -> arr
val cauchy_ppf : loc:arr -> scale:arr -> arr -> arr
val cauchy_sf : loc:arr -> scale:arr -> arr -> arr
val cauchy_logsf : loc:arr -> scale:arr -> arr -> arr
val cauchy_isf : loc:arr -> scale:arr -> arr -> arr
val lomax_rvs : shape:arr -> scale:arr -> n:int -> arr
val lomax_pdf : shape:arr -> scale:arr -> arr -> arr
val lomax_logpdf : shape:arr -> scale:arr -> arr -> arr
val lomax_cdf : shape:arr -> scale:arr -> arr -> arr
val lomax_logcdf : shape:arr -> scale:arr -> arr -> arr
val lomax_ppf : shape:arr -> scale:arr -> arr -> arr
val lomax_sf : shape:arr -> scale:arr -> arr -> arr
val lomax_logsf : shape:arr -> scale:arr -> arr -> arr
val lomax_isf : shape:arr -> scale:arr -> arr -> arr
val weibull_rvs : shape:arr -> scale:arr -> n:int -> arr
val weibull_pdf : shape:arr -> scale:arr -> arr -> arr
val weibull_logpdf : shape:arr -> scale:arr -> arr -> arr
val weibull_cdf : shape:arr -> scale:arr -> arr -> arr
val weibull_logcdf : shape:arr -> scale:arr -> arr -> arr
val weibull_ppf : shape:arr -> scale:arr -> arr -> arr
val weibull_sf : shape:arr -> scale:arr -> arr -> arr
val weibull_logsf : shape:arr -> scale:arr -> arr -> arr
val weibull_isf : shape:arr -> scale:arr -> arr -> arr
val laplace_rvs : loc:arr -> scale:arr -> n:int -> arr
val laplace_pdf : loc:arr -> scale:arr -> arr -> arr
val laplace_logpdf : loc:arr -> scale:arr -> arr -> arr
val laplace_cdf : loc:arr -> scale:arr -> arr -> arr
val laplace_logcdf : loc:arr -> scale:arr -> arr -> arr
val laplace_ppf : loc:arr -> scale:arr -> arr -> arr
val laplace_sf : loc:arr -> scale:arr -> arr -> arr
val laplace_logsf : loc:arr -> scale:arr -> arr -> arr
val laplace_isf : loc:arr -> scale:arr -> arr -> arr
val gumbel1_rvs : a:arr -> b:arr -> n:int -> arr
val gumbel1_pdf : a:arr -> b:arr -> arr -> arr
val gumbel1_logpdf : a:arr -> b:arr -> arr -> arr
val gumbel1_cdf : a:arr -> b:arr -> arr -> arr
val gumbel1_logcdf : a:arr -> b:arr -> arr -> arr
val gumbel1_ppf : a:arr -> b:arr -> arr -> arr
val gumbel1_sf : a:arr -> b:arr -> arr -> arr
val gumbel1_logsf : a:arr -> b:arr -> arr -> arr
val gumbel1_isf : a:arr -> b:arr -> arr -> arr
val gumbel2_rvs : a:arr -> b:arr -> n:int -> arr
val gumbel2_pdf : a:arr -> b:arr -> arr -> arr
val gumbel2_logpdf : a:arr -> b:arr -> arr -> arr
val gumbel2_cdf : a:arr -> b:arr -> arr -> arr
val gumbel2_logcdf : a:arr -> b:arr -> arr -> arr
val gumbel2_ppf : a:arr -> b:arr -> arr -> arr
val gumbel2_sf : a:arr -> b:arr -> arr -> arr
val gumbel2_logsf : a:arr -> b:arr -> arr -> arr
val gumbel2_isf : a:arr -> b:arr -> arr -> arr
val logistic_rvs : loc:arr -> scale:arr -> n:int -> arr
val logistic_pdf : loc:arr -> scale:arr -> arr -> arr
val logistic_logpdf : loc:arr -> scale:arr -> arr -> arr
val logistic_cdf : loc:arr -> scale:arr -> arr -> arr
val logistic_logcdf : loc:arr -> scale:arr -> arr -> arr
val logistic_ppf : loc:arr -> scale:arr -> arr -> arr
val logistic_sf : loc:arr -> scale:arr -> arr -> arr
val logistic_logsf : loc:arr -> scale:arr -> arr -> arr
val logistic_isf : loc:arr -> scale:arr -> arr -> arr
val lognormal_rvs : mu:arr -> sigma:arr -> n:int -> arr
val lognormal_pdf : mu:arr -> sigma:arr -> arr -> arr
val lognormal_logpdf : mu:arr -> sigma:arr -> arr -> arr
val lognormal_cdf : mu:arr -> sigma:arr -> arr -> arr
val lognormal_logcdf : mu:arr -> sigma:arr -> arr -> arr
val lognormal_ppf : mu:arr -> sigma:arr -> arr -> arr
val lognormal_sf : mu:arr -> sigma:arr -> arr -> arr
val lognormal_logsf : mu:arr -> sigma:arr -> arr -> arr
val lognormal_isf : mu:arr -> sigma:arr -> arr -> arr
val rayleigh_rvs : sigma:arr -> n:int -> arr
val rayleigh_pdf : sigma:arr -> arr -> arr
val rayleigh_logpdf : sigma:arr -> arr -> arr
val rayleigh_cdf : sigma:arr -> arr -> arr
val rayleigh_logcdf : sigma:arr -> arr -> arr
val rayleigh_ppf : sigma:arr -> arr -> arr
val rayleigh_sf : sigma:arr -> arr -> arr
val rayleigh_logsf : sigma:arr -> arr -> arr
val rayleigh_isf : sigma:arr -> arr -> arr
include module type of struct include Owl_dense_ndarray.Operator end
include sig ... end
val (+$) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (-$) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (*$) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (/$) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t
val ($+) : - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t
val ($-) : - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t
val ($*) : - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t
val ($/) : - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (=) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - bool
val (!=) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - bool
val (<>) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - bool
val (>) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - bool
val (<) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - bool
val (>=) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - bool
val (<=) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - bool
include sig ... end
val (=$) : ('a, 'b) Owl_dense_ndarray_generic.t -> 'a -> bool
val (!=$) : ('a, 'b) Owl_dense_ndarray_generic.t -> 'a -> bool
val (<>$) : ('a, 'b) Owl_dense_ndarray_generic.t -> 'a -> bool
val (<$) : ('a, 'b) Owl_dense_ndarray_generic.t -> 'a -> bool
val (>$) : ('a, 'b) Owl_dense_ndarray_generic.t -> 'a -> bool
val (<=$) : ('a, 'b) Owl_dense_ndarray_generic.t -> 'a -> bool
val (>=$) : ('a, 'b) Owl_dense_ndarray_generic.t -> 'a -> bool
val (=.$) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (!=.$) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (<>.$) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (<.$) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (>.$) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (<=.$) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (>=.$) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (=~) : - ?eps:float -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - bool
val (=~$) : ?eps:float -> ('a, 'b) Owl_dense_ndarray_generic.t -> 'a -> bool
val (=~.) : - ?eps:float -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (=~.$) : - ?eps:float -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (%) : - (float, 'a) Owl_dense_ndarray_generic.t -> - (float, 'a) Owl_dense_ndarray_generic.t -> - (float, 'a) Owl_dense_ndarray_generic.t
val (%$) : - (float, 'a) Owl_dense_ndarray_generic.t -> - float -> - (float, 'a) Owl_dense_ndarray_generic.t
val (**) : - (float, 'a) Owl_dense_ndarray_generic.t -> - (float, 'a) Owl_dense_ndarray_generic.t -> - (float, 'a) Owl_dense_ndarray_generic.t
val ($**) : - float -> - (float, 'a) Owl_dense_ndarray_generic.t -> - (float, 'a) Owl_dense_ndarray_generic.t
val (**$) : - (float, 'a) Owl_dense_ndarray_generic.t -> - float -> - (float, 'a) Owl_dense_ndarray_generic.t
val (+=) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - unit
val (-=) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - unit
val (*=) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - unit
val (/=) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - unit
val (+$=) : ('a, 'b) Owl_dense_ndarray_generic.t -> 'a -> unit
val (-$=) : ('a, 'b) Owl_dense_ndarray_generic.t -> 'a -> unit
val (*$=) : ('a, 'b) Owl_dense_ndarray_generic.t -> 'a -> unit
val (/$=) : ('a, 'b) Owl_dense_ndarray_generic.t -> 'a -> unit
val (.!{;..}) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - Owl_types.index array -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (.!{;..}<-) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - Owl_types.index array -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - unit
val (.${}) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - int list -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (.${;..}) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - int list array -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (.${}<-) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - int list -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - unit
val (.${;..}<-) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - int list array -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - unit
include sig ... end
val (.%{}) : ('a, 'b) Owl_dense_ndarray_generic.t -> int -> 'a
val (.%{;..}) : ('a, 'b) Owl_dense_ndarray_generic.t -> int array -> 'a
val (.%{}<-) : ('a, 'b) Owl_dense_ndarray_generic.t -> int -> 'a -> unit
val (.%{;..}<-) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - int array -> - 'a -> - unit
val mpow : Owl_linalg_d.mat -> float -> Owl_linalg_d.mat
\ No newline at end of file diff --git a/owl/Owl/Mat/index.html b/owl/Owl/Mat/index.html deleted file mode 100644 index dbae98797..000000000 --- a/owl/Owl/Mat/index.html +++ /dev/null @@ -1,225 +0,0 @@ - -Mat (owl.Owl.Mat)

Module Owl.Mat

include module type of struct include Owl_dense.Matrix.D end
include module type of struct include Owl_dense_matrix_d end
type elt = float
type mat = (float, Stdlib.Bigarray.float64_elt) Owl_dense_matrix_generic.t
include Owl_dense_matrix_intf.Common with type elt := elt and type mat := mat
Create dense matrices
val empty : int -> int -> mat
val create : int -> int -> elt -> mat
val init : int -> int -> (int -> elt) -> mat
val init_2d : int -> int -> (int -> int -> elt) -> mat
val zeros : int -> int -> mat
val ones : int -> int -> mat
val eye : int -> mat
val sequential : ?a:elt -> ?step:elt -> int -> int -> mat
val uniform : ?a:elt -> ?b:elt -> int -> int -> mat
val gaussian : ?mu:elt -> ?sigma:elt -> int -> int -> mat
val bernoulli : ?p:float -> int -> int -> mat
val unit_basis : int -> int -> mat
val diagm : ?k:int -> mat -> mat
val triu : ?k:int -> mat -> mat
val tril : ?k:int -> mat -> mat
val symmetric : ?upper:bool -> mat -> mat
val bidiagonal : ?upper:bool -> mat -> mat -> mat
val toeplitz : ?c:mat -> mat -> mat
val hankel : ?r:mat -> mat -> mat
val hadamard : int -> mat
val magic : int -> mat
Dense row vectors and meshgrids
val vector : int -> mat
val vector_zeros : int -> mat
val vector_ones : int -> mat
val vector_uniform : int -> mat
val linspace : elt -> elt -> int -> mat
val logspace : ?base:float -> elt -> elt -> int -> mat
val meshgrid : elt -> elt -> elt -> elt -> int -> int -> mat * mat
val meshup : mat -> mat -> mat * mat
Obtain the basic properties of a matrix
val shape : mat -> int * int
val row_num : mat -> int
val col_num : mat -> int
val numel : mat -> int
val nnz : mat -> int
val density : mat -> float
val size_in_bytes : mat -> int
val same_shape : mat -> mat -> bool
val same_data : mat -> mat -> bool
Manipulate a matrix
val get : mat -> int -> int -> elt
val set : mat -> int -> int -> elt -> unit
val get_index : mat -> int array array -> elt array
val set_index : mat -> int array array -> elt array -> unit
val get_fancy : Owl_types.index list -> mat -> mat
val set_fancy : Owl_types.index list -> mat -> mat -> unit
val get_slice : int list list -> mat -> mat
val set_slice : int list list -> mat -> mat -> unit
val row : mat -> int -> mat
val col : mat -> int -> mat
val rows : mat -> int array -> mat
val cols : mat -> int array -> mat
val resize : ?head:bool -> mat -> int array -> mat
val reshape : mat -> int array -> mat
val flatten : mat -> mat
val reverse : mat -> mat
val flip : ?axis:int -> mat -> mat
val rotate : mat -> int -> mat
val reset : mat -> unit
val fill : mat -> elt -> unit
val copy : mat -> mat
val copy_row_to : mat -> mat -> int -> unit
val copy_col_to : mat -> mat -> int -> unit
val concat_vertical : mat -> mat -> mat
val concat_horizontal : mat -> mat -> mat
val concat_vh : mat array array -> mat
val concatenate : ?axis:int -> mat array -> mat
val split : ?axis:int -> int array -> mat -> mat array
val split_vh : (int * int) array array -> mat -> mat array array
val transpose : mat -> mat
val ctranspose : mat -> mat
val swap_rows : mat -> int -> int -> unit
val swap_cols : mat -> int -> int -> unit
val tile : mat -> int array -> mat
val repeat : mat -> int array -> mat
val pad : ?v:elt -> int list list -> mat -> mat
val dropout : ?rate:float -> mat -> mat
val top : mat -> int -> int array array
val bottom : mat -> int -> int array array
val sort : mat -> mat
val argsort : - mat -> - (int64, Stdlib.Bigarray.int64_elt, Stdlib.Bigarray.c_layout) - Stdlib.Bigarray.Genarray.t
Iterate elements, columns, and rows.
val iteri : (int -> elt -> unit) -> mat -> unit
val iter : (elt -> unit) -> mat -> unit
val mapi : (int -> elt -> elt) -> mat -> mat
val map : (elt -> elt) -> mat -> mat
val foldi : ?axis:int -> (int -> elt -> elt -> elt) -> elt -> mat -> mat
val fold : ?axis:int -> (elt -> elt -> elt) -> elt -> mat -> mat
val scani : ?axis:int -> (int -> elt -> elt -> elt) -> mat -> mat
val scan : ?axis:int -> (elt -> elt -> elt) -> mat -> mat
val filteri : (int -> elt -> bool) -> mat -> int array
val filter : (elt -> bool) -> mat -> int array
val iteri_2d : (int -> int -> elt -> unit) -> mat -> unit
val mapi_2d : (int -> int -> elt -> elt) -> mat -> mat
val foldi_2d : - ?axis:int -> - (int -> int -> elt -> elt -> elt) -> - elt -> - mat -> - mat
val scani_2d : ?axis:int -> (int -> int -> elt -> elt -> elt) -> mat -> mat
val filteri_2d : (int -> int -> elt -> bool) -> mat -> (int * int) array
val iter2i_2d : (int -> int -> elt -> elt -> unit) -> mat -> mat -> unit
val map2i_2d : (int -> int -> elt -> elt -> elt) -> mat -> mat -> mat
val iter2i : (int -> elt -> elt -> unit) -> mat -> mat -> unit
val iter2 : (elt -> elt -> unit) -> mat -> mat -> unit
val map2i : (int -> elt -> elt -> elt) -> mat -> mat -> mat
val map2 : (elt -> elt -> elt) -> mat -> mat -> mat
val iteri_rows : (int -> mat -> unit) -> mat -> unit
val iter_rows : (mat -> unit) -> mat -> unit
val iter2i_rows : (int -> mat -> mat -> unit) -> mat -> mat -> unit
val iter2_rows : (mat -> mat -> unit) -> mat -> mat -> unit
val iteri_cols : (int -> mat -> unit) -> mat -> unit
val iter_cols : (mat -> unit) -> mat -> unit
val filteri_rows : (int -> mat -> bool) -> mat -> int array
val filter_rows : (mat -> bool) -> mat -> int array
val filteri_cols : (int -> mat -> bool) -> mat -> int array
val filter_cols : (mat -> bool) -> mat -> int array
val fold_rows : ('a -> mat -> 'a) -> 'a -> mat -> 'a
val fold_cols : ('a -> mat -> 'a) -> 'a -> mat -> 'a
val mapi_rows : (int -> mat -> 'a) -> mat -> 'a array
val map_rows : (mat -> 'a) -> mat -> 'a array
val mapi_cols : (int -> mat -> 'a) -> mat -> 'a array
val map_cols : (mat -> 'a) -> mat -> 'a array
val mapi_by_row : int -> (int -> mat -> mat) -> mat -> mat
val map_by_row : int -> (mat -> mat) -> mat -> mat
val mapi_by_col : int -> (int -> mat -> mat) -> mat -> mat
val map_by_col : int -> (mat -> mat) -> mat -> mat
val mapi_at_row : (int -> elt -> elt) -> mat -> int -> mat
val map_at_row : (elt -> elt) -> mat -> int -> mat
val mapi_at_col : (int -> elt -> elt) -> mat -> int -> mat
val map_at_col : (elt -> elt) -> mat -> int -> mat
Examine elements and compare two matrices
val exists : (elt -> bool) -> mat -> bool
val not_exists : (elt -> bool) -> mat -> bool
val for_all : (elt -> bool) -> mat -> bool
val is_zero : mat -> bool
val is_positive : mat -> bool
val is_negative : mat -> bool
val is_nonpositive : mat -> bool
val is_nonnegative : mat -> bool
val is_normal : mat -> bool
val not_nan : mat -> bool
val not_inf : mat -> bool
val equal : mat -> mat -> bool
val not_equal : mat -> mat -> bool
val greater : mat -> mat -> bool
val less : mat -> mat -> bool
val greater_equal : mat -> mat -> bool
val less_equal : mat -> mat -> bool
val elt_equal : mat -> mat -> mat
val elt_not_equal : mat -> mat -> mat
val elt_less : mat -> mat -> mat
val elt_greater : mat -> mat -> mat
val elt_less_equal : mat -> mat -> mat
val elt_greater_equal : mat -> mat -> mat
val equal_scalar : mat -> elt -> bool
val not_equal_scalar : mat -> elt -> bool
val less_scalar : mat -> elt -> bool
val greater_scalar : mat -> elt -> bool
val less_equal_scalar : mat -> elt -> bool
val greater_equal_scalar : mat -> elt -> bool
val elt_equal_scalar : mat -> elt -> mat
val elt_not_equal_scalar : mat -> elt -> mat
val elt_less_scalar : mat -> elt -> mat
val elt_greater_scalar : mat -> elt -> mat
val elt_less_equal_scalar : mat -> elt -> mat
val elt_greater_equal_scalar : mat -> elt -> mat
val approx_equal : ?eps:float -> mat -> mat -> bool
val approx_equal_scalar : ?eps:float -> mat -> elt -> bool
val approx_elt_equal : ?eps:float -> mat -> mat -> mat
val approx_elt_equal_scalar : ?eps:float -> mat -> elt -> mat
Randomisation functions
val draw_rows : ?replacement:bool -> mat -> int -> mat * int array
val draw_cols : ?replacement:bool -> mat -> int -> mat * int array
val draw_rows2 : - ?replacement:bool -> - mat -> - mat -> - int -> - mat * mat * int array
val draw_cols2 : - ?replacement:bool -> - mat -> - mat -> - int -> - mat * mat * int array
val shuffle_rows : mat -> mat
val shuffle_cols : mat -> mat
val shuffle : mat -> mat
Input/Output and helper functions
val to_array : mat -> elt array
val of_array : elt array -> int -> int -> mat
val to_arrays : mat -> elt array array
val of_arrays : elt array array -> mat
val to_rows : mat -> mat array
val of_rows : mat array -> mat
val to_cols : mat -> mat array
val of_cols : mat array -> mat
val print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(elt -> string) -> - mat -> - unit
val save : out:string -> mat -> unit
val load : string -> mat
val save_txt : ?sep:string -> ?append:bool -> out:string -> mat -> unit
val load_txt : ?sep:string -> string -> mat
val save_npy : out:string -> mat -> unit
val load_npy : string -> mat
Unary mathematical operations
val min : ?axis:int -> ?keep_dims:bool -> mat -> mat
val min' : mat -> elt
val max : ?axis:int -> ?keep_dims:bool -> mat -> mat
val max' : mat -> elt
val minmax : ?axis:int -> ?keep_dims:bool -> mat -> mat * mat
val minmax' : mat -> elt * elt
val min_i : mat -> elt * int array
val max_i : mat -> elt * int array
val minmax_i : mat -> (elt * int array) * (elt * int array)
val trace : mat -> elt
val sum : ?axis:int -> ?keep_dims:bool -> mat -> mat
val sum' : mat -> elt
val prod : ?axis:int -> ?keep_dims:bool -> mat -> mat
val prod' : mat -> elt
val mean : ?axis:int -> ?keep_dims:bool -> mat -> mat
val mean' : mat -> elt
val var' : mat -> elt
val std' : mat -> elt
val sem : ?axis:int -> ?keep_dims:bool -> mat -> mat
val sem' : mat -> elt
val sum_rows : ?keep_dims:bool -> mat -> mat
val sum_cols : ?keep_dims:bool -> mat -> mat
val mean_rows : ?keep_dims:bool -> mat -> mat
val mean_cols : ?keep_dims:bool -> mat -> mat
val abs : mat -> mat
val abs2 : mat -> mat
val conj : mat -> mat
val neg : mat -> mat
val reci : mat -> mat
val reci_tol : ?tol:elt -> mat -> mat
val sqr : mat -> mat
val sqrt : mat -> mat
val cbrt : mat -> mat
val exp : mat -> mat
val exp2 : mat -> mat
val exp10 : mat -> mat
val expm1 : mat -> mat
val log : mat -> mat
val log10 : mat -> mat
val log2 : mat -> mat
val log1p : mat -> mat
val sin : mat -> mat
val cos : mat -> mat
val tan : mat -> mat
val asin : mat -> mat
val acos : mat -> mat
val atan : mat -> mat
val sinh : mat -> mat
val cosh : mat -> mat
val tanh : mat -> mat
val asinh : mat -> mat
val acosh : mat -> mat
val atanh : mat -> mat
val floor : mat -> mat
val ceil : mat -> mat
val round : mat -> mat
val trunc : mat -> mat
val fix : mat -> mat
val modf : mat -> mat * mat
val l1norm : ?axis:int -> ?keep_dims:bool -> mat -> mat
val l1norm' : mat -> elt
val l2norm : ?axis:int -> ?keep_dims:bool -> mat -> mat
val l2norm' : mat -> elt
val l2norm_sqr : ?axis:int -> ?keep_dims:bool -> mat -> mat
val l2norm_sqr' : mat -> elt
val vecnorm : ?axis:int -> ?p:float -> ?keep_dims:bool -> mat -> mat
val vecnorm' : ?p:float -> mat -> elt
val cumsum : ?axis:int -> mat -> mat
val cumprod : ?axis:int -> mat -> mat
val cummin : ?axis:int -> mat -> mat
val cummax : ?axis:int -> mat -> mat
val diff : ?axis:int -> ?n:int -> mat -> mat
val var : ?axis:int -> ?keep_dims:bool -> mat -> mat
val std : ?axis:int -> ?keep_dims:bool -> mat -> mat
val mat2gray : ?amin:elt -> ?amax:elt -> mat -> mat
val lgamma : mat -> mat
val dawsn : mat -> mat
Binary mathematical operations
val add : mat -> mat -> mat
val sub : mat -> mat -> mat
val mul : mat -> mat -> mat
val div : mat -> mat -> mat
val add_scalar : mat -> elt -> mat
val sub_scalar : mat -> elt -> mat
val mul_scalar : mat -> elt -> mat
val div_scalar : mat -> elt -> mat
val scalar_add : elt -> mat -> mat
val scalar_sub : elt -> mat -> mat
val scalar_mul : elt -> mat -> mat
val scalar_div : elt -> mat -> mat
val dot : mat -> mat -> mat
val add_diag : mat -> elt -> mat
val pow : mat -> mat -> mat
val scalar_pow : elt -> mat -> mat
val pow_scalar : mat -> elt -> mat
val min2 : mat -> mat -> mat
val max2 : mat -> mat -> mat
val ssqr' : mat -> elt -> elt
val ssqr_diff' : mat -> mat -> elt
val clip_by_value : ?amin:elt -> ?amax:elt -> mat -> mat
val cov : ?b:mat -> a:mat -> mat
val kron : mat -> mat -> mat
val fma : mat -> mat -> mat -> mat
Functions of in-place modification
val create_ : out:mat -> elt -> unit
val uniform_ : ?a:elt -> ?b:elt -> out:mat -> unit
val bernoulli_ : ?p:float -> out:mat -> unit
val zeros_ : out:mat -> unit
val ones_ : out:mat -> unit
val sort_ : mat -> unit
val one_hot_ : out:mat -> int -> mat -> unit
val copy_ : out:mat -> mat -> unit
val reshape_ : out:mat -> mat -> unit
val transpose_ : out:mat -> ?axis:int array -> mat -> unit
val sum_ : out:mat -> axis:int -> mat -> unit
val min_ : out:mat -> axis:int -> mat -> unit
val max_ : out:mat -> axis:int -> mat -> unit
val add_ : ?out:mat -> mat -> mat -> unit
val sub_ : ?out:mat -> mat -> mat -> unit
val mul_ : ?out:mat -> mat -> mat -> unit
val div_ : ?out:mat -> mat -> mat -> unit
val pow_ : ?out:mat -> mat -> mat -> unit
val atan2_ : ?out:mat -> mat -> mat -> unit
val hypot_ : ?out:mat -> mat -> mat -> unit
val fmod_ : ?out:mat -> mat -> mat -> unit
val min2_ : ?out:mat -> mat -> mat -> unit
val max2_ : ?out:mat -> mat -> mat -> unit
val add_scalar_ : ?out:mat -> mat -> elt -> unit
val sub_scalar_ : ?out:mat -> mat -> elt -> unit
val mul_scalar_ : ?out:mat -> mat -> elt -> unit
val div_scalar_ : ?out:mat -> mat -> elt -> unit
val pow_scalar_ : ?out:mat -> mat -> elt -> unit
val atan2_scalar_ : ?out:mat -> mat -> elt -> unit
val fmod_scalar_ : ?out:mat -> mat -> elt -> unit
val scalar_add_ : ?out:mat -> elt -> mat -> unit
val scalar_sub_ : ?out:mat -> elt -> mat -> unit
val scalar_mul_ : ?out:mat -> elt -> mat -> unit
val scalar_div_ : ?out:mat -> elt -> mat -> unit
val scalar_pow_ : ?out:mat -> elt -> mat -> unit
val scalar_atan2_ : ?out:mat -> elt -> mat -> unit
val scalar_fmod_ : ?out:mat -> elt -> mat -> unit
val fma_ : ?out:mat -> mat -> mat -> mat -> unit
val dot_ : - ?transa:bool -> - ?transb:bool -> - ?alpha:elt -> - ?beta:elt -> - c:mat -> - mat -> - mat -> - unit
val conj_ : ?out:mat -> mat -> unit
val abs_ : ?out:mat -> mat -> unit
val neg_ : ?out:mat -> mat -> unit
val reci_ : ?out:mat -> mat -> unit
val signum_ : ?out:mat -> mat -> unit
val sqr_ : ?out:mat -> mat -> unit
val sqrt_ : ?out:mat -> mat -> unit
val cbrt_ : ?out:mat -> mat -> unit
val exp_ : ?out:mat -> mat -> unit
val exp2_ : ?out:mat -> mat -> unit
val exp10_ : ?out:mat -> mat -> unit
val expm1_ : ?out:mat -> mat -> unit
val log_ : ?out:mat -> mat -> unit
val log2_ : ?out:mat -> mat -> unit
val log10_ : ?out:mat -> mat -> unit
val log1p_ : ?out:mat -> mat -> unit
val sin_ : ?out:mat -> mat -> unit
val cos_ : ?out:mat -> mat -> unit
val tan_ : ?out:mat -> mat -> unit
val asin_ : ?out:mat -> mat -> unit
val acos_ : ?out:mat -> mat -> unit
val atan_ : ?out:mat -> mat -> unit
val sinh_ : ?out:mat -> mat -> unit
val cosh_ : ?out:mat -> mat -> unit
val tanh_ : ?out:mat -> mat -> unit
val asinh_ : ?out:mat -> mat -> unit
val acosh_ : ?out:mat -> mat -> unit
val atanh_ : ?out:mat -> mat -> unit
val floor_ : ?out:mat -> mat -> unit
val ceil_ : ?out:mat -> mat -> unit
val round_ : ?out:mat -> mat -> unit
val trunc_ : ?out:mat -> mat -> unit
val fix_ : ?out:mat -> mat -> unit
val erf_ : ?out:mat -> mat -> unit
val erfc_ : ?out:mat -> mat -> unit
val relu_ : ?out:mat -> mat -> unit
val softplus_ : ?out:mat -> mat -> unit
val softsign_ : ?out:mat -> mat -> unit
val sigmoid_ : ?out:mat -> mat -> unit
val softmax_ : ?out:mat -> ?axis:int -> mat -> unit
val cumsum_ : ?out:mat -> ?axis:int -> mat -> unit
val cumprod_ : ?out:mat -> ?axis:int -> mat -> unit
val cummin_ : ?out:mat -> ?axis:int -> mat -> unit
val cummax_ : ?out:mat -> ?axis:int -> mat -> unit
val dropout_ : ?out:mat -> ?rate:float -> mat -> unit
val elt_equal_ : ?out:mat -> mat -> mat -> unit
val elt_not_equal_ : ?out:mat -> mat -> mat -> unit
val elt_less_ : ?out:mat -> mat -> mat -> unit
val elt_greater_ : ?out:mat -> mat -> mat -> unit
val elt_less_equal_ : ?out:mat -> mat -> mat -> unit
val elt_greater_equal_ : ?out:mat -> mat -> mat -> unit
val elt_equal_scalar_ : ?out:mat -> mat -> elt -> unit
val elt_not_equal_scalar_ : ?out:mat -> mat -> elt -> unit
val elt_less_scalar_ : ?out:mat -> mat -> elt -> unit
val elt_greater_scalar_ : ?out:mat -> mat -> elt -> unit
val elt_less_equal_scalar_ : ?out:mat -> mat -> elt -> unit
val elt_greater_equal_scalar_ : ?out:mat -> mat -> elt -> unit
include Owl_dense_matrix_intf.Real with type elt := elt and type mat := mat
Specific real functions
val i0 : mat -> mat
val i0e : mat -> mat
val i1 : mat -> mat
val i1e : mat -> mat
val iv : v:mat -> mat -> mat
val scalar_iv : v:elt -> mat -> mat
val iv_scalar : v:mat -> elt -> mat
val j0 : mat -> mat
val j1 : mat -> mat
val jv : v:mat -> mat -> mat
val scalar_jv : v:elt -> mat -> mat
val jv_scalar : v:mat -> elt -> mat
val semidef : int -> mat
val min_rows : mat -> (elt * int * int) array
val min_cols : mat -> (elt * int * int) array
val max_rows : mat -> (elt * int * int) array
val max_cols : mat -> (elt * int * int) array
val signum : mat -> mat
val erf : mat -> mat
val erfc : mat -> mat
val logistic : mat -> mat
val relu : mat -> mat
val elu : ?alpha:elt -> mat -> mat
val leaky_relu : ?alpha:elt -> mat -> mat
val softplus : mat -> mat
val softsign : mat -> mat
val softmax : ?axis:int -> mat -> mat
val sigmoid : mat -> mat
val log_sum_exp' : mat -> elt
val log_sum_exp : ?axis:int -> ?keep_dims:bool -> mat -> mat
val max_pool : - ?padding:Owl_types.padding -> - mat -> - int array -> - int array -> - mat
val avg_pool : - ?padding:Owl_types.padding -> - mat -> - int array -> - int array -> - mat
val atan2 : mat -> mat -> mat
val scalar_atan2 : elt -> mat -> mat
val atan2_scalar : mat -> elt -> mat
val hypot : mat -> mat -> mat
val fmod : mat -> mat -> mat
val fmod_scalar : mat -> elt -> mat
val scalar_fmod : elt -> mat -> mat
val cross_entropy' : mat -> mat -> elt
val clip_by_l2norm : elt -> mat -> mat
val poisson : mu:elt -> int -> int -> mat
include module type of struct include Owl_dense_matrix.Operator end
include sig ... end
val (+$) : - ('a, 'b) Owl_dense_matrix_generic.t -> - 'a -> - ('a, 'b) Owl_dense_matrix_generic.t
val (-$) : - ('a, 'b) Owl_dense_matrix_generic.t -> - 'a -> - ('a, 'b) Owl_dense_matrix_generic.t
val (*$) : - ('a, 'b) Owl_dense_matrix_generic.t -> - 'a -> - ('a, 'b) Owl_dense_matrix_generic.t
val (/$) : - ('a, 'b) Owl_dense_matrix_generic.t -> - 'a -> - ('a, 'b) Owl_dense_matrix_generic.t
val ($+) : - 'a -> - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t
val ($-) : - 'a -> - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t
val ($*) : - 'a -> - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t
val ($/) : - 'a -> - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t
val (=) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - bool
val (!=) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - bool
val (<>) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - bool
val (>) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - bool
val (<) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - bool
val (>=) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - bool
val (<=) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - bool
include sig ... end
val (=$) : ('a, 'b) Owl_dense_matrix_generic.t -> 'a -> bool
val (!=$) : ('a, 'b) Owl_dense_matrix_generic.t -> 'a -> bool
val (<>$) : ('a, 'b) Owl_dense_matrix_generic.t -> 'a -> bool
val (<$) : ('a, 'b) Owl_dense_matrix_generic.t -> 'a -> bool
val (>$) : ('a, 'b) Owl_dense_matrix_generic.t -> 'a -> bool
val (<=$) : ('a, 'b) Owl_dense_matrix_generic.t -> 'a -> bool
val (>=$) : ('a, 'b) Owl_dense_matrix_generic.t -> 'a -> bool
val (=.) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t
val (!=.) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t
val (<>.) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t
val (<.) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t
val (>.) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t
val (<=.) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t
val (>=.) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t
val (=.$) : - ('a, 'b) Owl_dense_matrix_generic.t -> - 'a -> - ('a, 'b) Owl_dense_matrix_generic.t
val (!=.$) : - ('a, 'b) Owl_dense_matrix_generic.t -> - 'a -> - ('a, 'b) Owl_dense_matrix_generic.t
val (<>.$) : - ('a, 'b) Owl_dense_matrix_generic.t -> - 'a -> - ('a, 'b) Owl_dense_matrix_generic.t
val (<.$) : - ('a, 'b) Owl_dense_matrix_generic.t -> - 'a -> - ('a, 'b) Owl_dense_matrix_generic.t
val (>.$) : - ('a, 'b) Owl_dense_matrix_generic.t -> - 'a -> - ('a, 'b) Owl_dense_matrix_generic.t
val (<=.$) : - ('a, 'b) Owl_dense_matrix_generic.t -> - 'a -> - ('a, 'b) Owl_dense_matrix_generic.t
val (>=.$) : - ('a, 'b) Owl_dense_matrix_generic.t -> - 'a -> - ('a, 'b) Owl_dense_matrix_generic.t
val (=~) : - ?eps:float -> - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - bool
val (=~$) : ?eps:float -> ('a, 'b) Owl_dense_matrix_generic.t -> 'a -> bool
val (=~.) : - ?eps:float -> - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t
val (=~.$) : - ?eps:float -> - ('a, 'b) Owl_dense_matrix_generic.t -> - 'a -> - ('a, 'b) Owl_dense_matrix_generic.t
val (%) : - (float, 'a) Owl_dense_matrix_generic.t -> - (float, 'a) Owl_dense_matrix_generic.t -> - (float, 'a) Owl_dense_matrix_generic.t
val (%$) : - (float, 'a) Owl_dense_matrix_generic.t -> - float -> - (float, 'a) Owl_dense_matrix_generic.t
val (**) : - (float, 'a) Owl_dense_matrix_generic.t -> - (float, 'a) Owl_dense_matrix_generic.t -> - (float, 'a) Owl_dense_matrix_generic.t
val ($**) : - float -> - (float, 'a) Owl_dense_matrix_generic.t -> - (float, 'a) Owl_dense_matrix_generic.t
val (**$) : - (float, 'a) Owl_dense_matrix_generic.t -> - float -> - (float, 'a) Owl_dense_matrix_generic.t
val (+=) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - unit
val (-=) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - unit
val (*=) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - unit
val (/=) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - unit
val (+$=) : ('a, 'b) Owl_dense_matrix_generic.t -> 'a -> unit
val (-$=) : ('a, 'b) Owl_dense_matrix_generic.t -> 'a -> unit
val (*$=) : ('a, 'b) Owl_dense_matrix_generic.t -> 'a -> unit
val (/$=) : ('a, 'b) Owl_dense_matrix_generic.t -> 'a -> unit
val (@=) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t
val (@||) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t
val (.!{;..}) : - ('a, 'b) Owl_dense_matrix_generic.t -> - Owl_types.index array -> - ('a, 'b) Owl_dense_matrix_generic.t
val (.!{;..}<-) : - ('a, 'b) Owl_dense_matrix_generic.t -> - Owl_types.index array -> - ('a, 'b) Owl_dense_matrix_generic.t -> - unit
val (.${}) : - ('a, 'b) Owl_dense_matrix_generic.t -> - int list -> - ('a, 'b) Owl_dense_matrix_generic.t
val (.${;..}) : - ('a, 'b) Owl_dense_matrix_generic.t -> - int list array -> - ('a, 'b) Owl_dense_matrix_generic.t
val (.${}<-) : - ('a, 'b) Owl_dense_matrix_generic.t -> - int list -> - ('a, 'b) Owl_dense_matrix_generic.t -> - unit
val (.${;..}<-) : - ('a, 'b) Owl_dense_matrix_generic.t -> - int list array -> - ('a, 'b) Owl_dense_matrix_generic.t -> - unit
include sig ... end
val (*@) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t
val (.%{}) : ('a, 'b) Owl_dense_matrix_generic.t -> (int * int) -> 'a
val (.%{;..}) : ('a, 'b) Owl_dense_matrix_generic.t -> int array -> 'a
val (.%{}<-) : ('a, 'b) Owl_dense_matrix_generic.t -> (int * int) -> 'a -> unit
val (.%{;..}<-) : - ('a, 'b) Owl_dense_matrix_generic.t -> - int array -> - 'a -> - unit
include sig ... end
val (**@) : - ('a, 'b) Owl_linalg_generic.t -> - float -> - ('a, 'b) Owl_linalg_generic.t
val (/@) : - ('a, 'b) Owl_linalg_generic.t -> - ('a, 'b) Owl_linalg_generic.t -> - ('a, 'b) Owl_linalg_generic.t
val mpow : Owl_linalg_d.mat -> float -> Owl_linalg_d.mat
val diag : - ?k:int -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t
\ No newline at end of file diff --git a/owl/Owl_algodiff/.dummy b/owl/Owl_algodiff/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl/Owl_algodiff/D/A/Linalg/index.html b/owl/Owl_algodiff/D/A/Linalg/index.html deleted file mode 100644 index 39845981f..000000000 --- a/owl/Owl_algodiff/D/A/Linalg/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -Linalg (owl.Owl_algodiff.D.A.Linalg)

Module A.Linalg

val inv : arr -> arr
val logdet : arr -> elt
val chol : ?upper:bool -> arr -> arr
val svd : ?thin:bool -> arr -> arr * arr * arr
val qr : arr -> arr * arr
val lq : arr -> arr * arr
val sylvester : arr -> arr -> arr -> arr
val lyapunov : arr -> arr -> arr
val discrete_lyapunov : - ?solver:[ `bilinear | `default | `direct ] -> - arr -> - arr -> - arr
val linsolve : ?trans:bool -> ?typ:[ `l | `n | `u ] -> arr -> arr -> arr
val care : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
val dare : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
\ No newline at end of file diff --git a/owl/Owl_algodiff/D/A/Mat/index.html b/owl/Owl_algodiff/D/A/Mat/index.html deleted file mode 100644 index 6f8d4eddd..000000000 --- a/owl/Owl_algodiff/D/A/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl.Owl_algodiff.D.A.Mat)

Module A.Mat

val diagm : ?k:int -> arr -> arr
val triu : ?k:int -> arr -> arr
val tril : ?k:int -> arr -> arr
val eye : int -> arr
\ No newline at end of file diff --git a/owl/Owl_algodiff/D/A/Scalar/index.html b/owl/Owl_algodiff/D/A/Scalar/index.html deleted file mode 100644 index c600b538e..000000000 --- a/owl/Owl_algodiff/D/A/Scalar/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Scalar (owl.Owl_algodiff.D.A.Scalar)

Module A.Scalar

val add : elt -> elt -> elt
val sub : elt -> elt -> elt
val mul : elt -> elt -> elt
val div : elt -> elt -> elt
val pow : elt -> elt -> elt
val atan2 : elt -> elt -> elt
val abs : elt -> elt
val neg : elt -> elt
val sqr : elt -> elt
val sqrt : elt -> elt
val exp : elt -> elt
val log : elt -> elt
val log2 : elt -> elt
val log10 : elt -> elt
val signum : elt -> elt
val floor : elt -> elt
val ceil : elt -> elt
val round : elt -> elt
val sin : elt -> elt
val cos : elt -> elt
val tan : elt -> elt
val sinh : elt -> elt
val cosh : elt -> elt
val tanh : elt -> elt
val asin : elt -> elt
val acos : elt -> elt
val atan : elt -> elt
val asinh : elt -> elt
val acosh : elt -> elt
val atanh : elt -> elt
val relu : elt -> elt
val dawsn : elt -> elt
val sigmoid : elt -> elt
\ No newline at end of file diff --git a/owl/Owl_algodiff/D/A/index.html b/owl/Owl_algodiff/D/A/index.html deleted file mode 100644 index b1e60d404..000000000 --- a/owl/Owl_algodiff/D/A/index.html +++ /dev/null @@ -1,158 +0,0 @@ - -A (owl.Owl_algodiff.D.A)

Module D.A

val empty : int array -> arr
val zeros : int array -> arr
val ones : int array -> arr
val create : int array -> elt -> arr
val sequential : ?a:elt -> ?step:elt -> int array -> arr
val uniform : ?a:elt -> ?b:elt -> int array -> arr
val gaussian : ?mu:elt -> ?sigma:elt -> int array -> arr
val bernoulli : ?p:elt -> int array -> arr
val init : int array -> (int -> elt) -> arr
val init_nd : int array -> (int array -> elt) -> arr
val shape : arr -> int array
val numel : arr -> int
val get : arr -> int array -> elt
val set : arr -> int array -> elt -> unit
val get_slice : int list list -> arr -> arr
val set_slice : int list list -> arr -> arr -> unit
val get_fancy : Owl_types_common.index list -> arr -> arr
val set_fancy : Owl_types_common.index list -> arr -> arr -> unit
val copy : arr -> arr
val copy_ : out:arr -> arr -> unit
val reset : arr -> unit
val reshape : arr -> int array -> arr
val reverse : arr -> arr
val tile : arr -> int array -> arr
val repeat : arr -> int array -> arr
val concatenate : ?axis:int -> arr array -> arr
val stack : ?axis:int -> arr array -> arr
val split : ?axis:int -> int array -> arr -> arr array
val expand : ?hi:bool -> arr -> int -> arr
val squeeze : ?axis:int array -> arr -> arr
val draw : ?axis:int -> arr -> int -> arr * int array
val map : (elt -> elt) -> arr -> arr
val fold : ?axis:int -> (elt -> elt -> elt) -> elt -> arr -> arr
val scan : ?axis:int -> (elt -> elt -> elt) -> arr -> arr
val one_hot : int -> arr -> arr
val pad : ?v:elt -> int list list -> arr -> arr
val print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(elt -> string) -> - arr -> - unit
val abs : arr -> arr
val neg : arr -> arr
val floor : arr -> arr
val ceil : arr -> arr
val round : arr -> arr
val sqr : arr -> arr
val sqrt : arr -> arr
val log : arr -> arr
val log2 : arr -> arr
val log10 : arr -> arr
val exp : arr -> arr
val sin : arr -> arr
val cos : arr -> arr
val tan : arr -> arr
val sinh : arr -> arr
val cosh : arr -> arr
val tanh : arr -> arr
val asin : arr -> arr
val acos : arr -> arr
val atan : arr -> arr
val asinh : arr -> arr
val acosh : arr -> arr
val atanh : arr -> arr
val min : ?axis:int -> ?keep_dims:bool -> arr -> arr
val max : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum_reduce : ?axis:int array -> arr -> arr
val signum : arr -> arr
val sigmoid : arr -> arr
val relu : arr -> arr
val dawsn : arr -> arr
val min' : arr -> elt
val max' : arr -> elt
val sum' : arr -> elt
val log_sum_exp' : arr -> elt
val log_sum_exp : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm' : arr -> elt
val l2norm' : arr -> elt
val l2norm_sqr' : arr -> elt
val clip_by_value : ?amin:elt -> ?amax:elt -> arr -> arr
val clip_by_l2norm : elt -> arr -> arr
val pow : arr -> arr -> arr
val scalar_pow : elt -> arr -> arr
val pow_scalar : arr -> elt -> arr
val atan2 : arr -> arr -> arr
val scalar_atan2 : elt -> arr -> arr
val atan2_scalar : arr -> elt -> arr
val add : arr -> arr -> arr
val sub : arr -> arr -> arr
val mul : arr -> arr -> arr
val div : arr -> arr -> arr
val add_scalar : arr -> elt -> arr
val sub_scalar : arr -> elt -> arr
val mul_scalar : arr -> elt -> arr
val div_scalar : arr -> elt -> arr
val scalar_add : elt -> arr -> arr
val scalar_sub : elt -> arr -> arr
val scalar_mul : elt -> arr -> arr
val scalar_div : elt -> arr -> arr
val fma : arr -> arr -> arr -> arr
val conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val dilated_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val transpose_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val max_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val upsampling2d : arr -> int array -> arr
val conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val dilated_conv1d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv1d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val transpose_conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val max_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val upsampling2d_backward : arr -> int array -> arr -> arr
val row_num : arr -> int
val col_num : arr -> int
val row : arr -> int -> arr
val rows : arr -> int array -> arr
val copy_row_to : arr -> arr -> int -> unit
val copy_col_to : arr -> arr -> int -> unit
val dot : arr -> arr -> arr
val diag : ?k:int -> arr -> arr
val trace : arr -> elt
val transpose : ?axis:int array -> arr -> arr
val to_rows : arr -> arr array
val of_rows : arr array -> arr
val to_cols : arr -> arr array
val of_cols : arr array -> arr
val of_array : elt array -> int array -> arr
val of_arrays : elt array array -> arr
val float_to_elt : float -> elt
val elt_to_float : elt -> float
val elt_equal : arr -> arr -> arr
val elt_not_equal : arr -> arr -> arr
val elt_less : arr -> arr -> arr
val elt_greater : arr -> arr -> arr
val elt_less_equal : arr -> arr -> arr
val elt_greater_equal : arr -> arr -> arr
val elt_equal_scalar : arr -> elt -> arr
val elt_not_equal_scalar : arr -> elt -> arr
val elt_less_scalar : arr -> elt -> arr
val elt_greater_scalar : arr -> elt -> arr
val elt_less_equal_scalar : arr -> elt -> arr
val elt_greater_equal_scalar : arr -> elt -> arr
module Scalar : sig ... end
module Mat : sig ... end
module Linalg : sig ... end
\ No newline at end of file diff --git a/owl/Owl_algodiff/D/Arr/index.html b/owl/Owl_algodiff/D/Arr/index.html deleted file mode 100644 index 853bd0ac9..000000000 --- a/owl/Owl_algodiff/D/Arr/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Arr (owl.Owl_algodiff.D.Arr)

Module D.Arr

val empty : int array -> t
val zeros : int array -> t
val ones : int array -> t
val uniform : ?a:A.elt -> ?b:A.elt -> int array -> t
val gaussian : ?mu:A.elt -> ?sigma:A.elt -> int array -> t
val shape : t -> int array
val numel : t -> int
val reset : t -> unit
val reshape : t -> int array -> t
val add : t -> t -> t
val sub : t -> t -> t
val mul : t -> t -> t
val div : t -> t -> t
val dot : t -> t -> t
\ No newline at end of file diff --git a/owl/Owl_algodiff/D/Builder/index.html b/owl/Owl_algodiff/D/Builder/index.html deleted file mode 100644 index 4cee77e3e..000000000 --- a/owl/Owl_algodiff/D/Builder/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Builder (owl.Owl_algodiff.D.Builder)

Module D.Builder

module type Siso = sig ... end
val build_siso : (module Siso) -> t -> t
module type Sipo = sig ... end
val build_sipo : (module Sipo) -> t -> t * t
module type Sito = sig ... end
val build_sito : (module Sito) -> t -> t * t * t
module type Siao = sig ... end
val build_siao : (module Siao) -> t -> t array
module type Piso = sig ... end
val build_piso : (module Piso) -> t -> t -> t
module type Aiso = sig ... end
val build_aiso : (module Aiso) -> t array -> t
\ No newline at end of file diff --git a/owl/Owl_algodiff/D/Builder/module-type-Aiso/index.html b/owl/Owl_algodiff/D/Builder/module-type-Aiso/index.html deleted file mode 100644 index 561fbe61f..000000000 --- a/owl/Owl_algodiff/D/Builder/module-type-Aiso/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Aiso (owl.Owl_algodiff.D.Builder.Aiso)

Module type Builder.Aiso

val label : string
val ff : t array -> t
val df : int list -> t -> t array -> t array -> t
val dr : int list -> t array -> t -> t Stdlib.ref -> t list
\ No newline at end of file diff --git a/owl/Owl_algodiff/D/Builder/module-type-Piso/index.html b/owl/Owl_algodiff/D/Builder/module-type-Piso/index.html deleted file mode 100644 index 774a89a4a..000000000 --- a/owl/Owl_algodiff/D/Builder/module-type-Piso/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Piso (owl.Owl_algodiff.D.Builder.Piso)

Module type Builder.Piso

val label : string
val ff_aa : A.elt -> A.elt -> t
val ff_ab : A.elt -> A.arr -> t
val ff_ba : A.arr -> A.elt -> t
val ff_bb : A.arr -> A.arr -> t
val df_da : t -> t -> t -> t -> t
val df_db : t -> t -> t -> t -> t
val df_dab : t -> t -> t -> t -> t -> t
val dr_ab : t -> t -> t -> t Stdlib.ref -> t * t
val dr_a : t -> t -> t -> t Stdlib.ref -> t
val dr_b : t -> t -> t -> t Stdlib.ref -> t
\ No newline at end of file diff --git a/owl/Owl_algodiff/D/Builder/module-type-Siao/index.html b/owl/Owl_algodiff/D/Builder/module-type-Siao/index.html deleted file mode 100644 index cd92b5520..000000000 --- a/owl/Owl_algodiff/D/Builder/module-type-Siao/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Siao (owl.Owl_algodiff.D.Builder.Siao)

Module type Builder.Siao

val label : string
val ff_f : A.elt -> t array
val ff_arr : A.arr -> t array
val df : t array -> t -> t -> t array
val dr : t -> t -> t Stdlib.ref array -> t Stdlib.ref array -> t
\ No newline at end of file diff --git a/owl/Owl_algodiff/D/Builder/module-type-Sipo/index.html b/owl/Owl_algodiff/D/Builder/module-type-Sipo/index.html deleted file mode 100644 index 85c4d44d2..000000000 --- a/owl/Owl_algodiff/D/Builder/module-type-Sipo/index.html +++ /dev/null @@ -1,7 +0,0 @@ - -Sipo (owl.Owl_algodiff.D.Builder.Sipo)

Module type Builder.Sipo

val label : string
val ff_f : A.elt -> t * t
val ff_arr : A.arr -> t * t
val df : t -> t -> t -> t
val dr : - t -> - t -> - (t Stdlib.ref * t Stdlib.ref) -> - (t Stdlib.ref * t Stdlib.ref) -> - t
\ No newline at end of file diff --git a/owl/Owl_algodiff/D/Builder/module-type-Siso/index.html b/owl/Owl_algodiff/D/Builder/module-type-Siso/index.html deleted file mode 100644 index f7140b088..000000000 --- a/owl/Owl_algodiff/D/Builder/module-type-Siso/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Siso (owl.Owl_algodiff.D.Builder.Siso)

Module type Builder.Siso

val label : string
val ff_f : A.elt -> t
val ff_arr : A.arr -> t
val df : t -> t -> t -> t
val dr : t -> t -> t Stdlib.ref -> t
\ No newline at end of file diff --git a/owl/Owl_algodiff/D/Builder/module-type-Sito/index.html b/owl/Owl_algodiff/D/Builder/module-type-Sito/index.html deleted file mode 100644 index 7643b893a..000000000 --- a/owl/Owl_algodiff/D/Builder/module-type-Sito/index.html +++ /dev/null @@ -1,7 +0,0 @@ - -Sito (owl.Owl_algodiff.D.Builder.Sito)

Module type Builder.Sito

val label : string
val ff_f : A.elt -> t * t * t
val ff_arr : A.arr -> t * t * t
val df : t -> t -> t -> t
val dr : - t -> - t -> - (t Stdlib.ref * t Stdlib.ref * t Stdlib.ref) -> - (t Stdlib.ref * t Stdlib.ref * t Stdlib.ref) -> - t
\ No newline at end of file diff --git a/owl/Owl_algodiff/D/Linalg/index.html b/owl/Owl_algodiff/D/Linalg/index.html deleted file mode 100644 index f58ee16aa..000000000 --- a/owl/Owl_algodiff/D/Linalg/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -Linalg (owl.Owl_algodiff.D.Linalg)

Module D.Linalg

val inv : t -> t
val logdet : t -> t
val chol : ?upper:bool -> t -> t
val qr : t -> t * t
val lq : t -> t * t
val svd : ?thin:bool -> t -> t * t * t
val sylvester : t -> t -> t -> t
val lyapunov : t -> t -> t
val discrete_lyapunov : - ?solver:[ `bilinear | `default | `direct ] -> - t -> - t -> - t
val (/@) : t -> t -> t
val linsolve : ?trans:bool -> ?typ:[ `l | `n | `u ] -> t -> t -> t
val care : ?diag_r:bool -> t -> t -> t -> t -> t
val dare : ?diag_r:bool -> t -> t -> t -> t -> t
\ No newline at end of file diff --git a/owl/Owl_algodiff/D/Mat/index.html b/owl/Owl_algodiff/D/Mat/index.html deleted file mode 100644 index 85da2073f..000000000 --- a/owl/Owl_algodiff/D/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl.Owl_algodiff.D.Mat)

Module D.Mat

val empty : int -> int -> t
val zeros : int -> int -> t
val eye : int -> t
val ones : int -> int -> t
val uniform : ?a:A.elt -> ?b:A.elt -> int -> int -> t
val gaussian : ?mu:A.elt -> ?sigma:A.elt -> int -> int -> t
val shape : t -> int * int
val numel : t -> int
val row_num : t -> int
val col_num : t -> int
val reset : t -> unit
val reshape : int -> int -> t -> t
val get : t -> int -> int -> t
val set : t -> int -> int -> t -> t
val row : t -> int -> t
val mean : t -> t
val add : t -> t -> t
val sub : t -> t -> t
val mul : t -> t -> t
val div : t -> t -> t
val dot : t -> t -> t
val map_by_row : (t -> t) -> t -> t
val of_arrays : A.elt array array -> t
val init_2d : int -> int -> (int -> int -> t) -> t
val print : t -> unit
\ No newline at end of file diff --git a/owl/Owl_algodiff/D/Maths/index.html b/owl/Owl_algodiff/D/Maths/index.html deleted file mode 100644 index 0e4e70b00..000000000 --- a/owl/Owl_algodiff/D/Maths/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Maths (owl.Owl_algodiff.D.Maths)

Module D.Maths

val (+) : t -> t -> t
val (-) : t -> t -> t
val (*) : t -> t -> t
val (/) : t -> t -> t
val (*@) : t -> t -> t
val (**) : t -> t -> t
val add : t -> t -> t
val sub : t -> t -> t
val mul : t -> t -> t
val div : t -> t -> t
val kron : t -> t -> t
val dot : t -> t -> t
val pow : t -> t -> t
val atan2 : t -> t -> t
val min2 : t -> t -> t
val max2 : t -> t -> t
val cross_entropy : t -> t -> t
val inv : t -> t
val neg : t -> t
val abs : t -> t
val signum : t -> t
val floor : t -> t
val ceil : t -> t
val round : t -> t
val sqr : t -> t
val sqrt : t -> t
val log : t -> t
val log2 : t -> t
val log10 : t -> t
val exp : t -> t
val sin : t -> t
val cos : t -> t
val tan : t -> t
val sinh : t -> t
val cosh : t -> t
val tanh : t -> t
val asin : t -> t
val acos : t -> t
val atan : t -> t
val asinh : t -> t
val acosh : t -> t
val atanh : t -> t
val sum' : t -> t
val log_sum_exp' : t -> t
val log_sum_exp : ?axis:int -> ?keep_dims:bool -> t -> t
val sum : ?axis:int -> ?keep_dims:bool -> t -> t
val sum_reduce : ?axis:int array -> t -> t
val mean : t -> t
val transpose : ?axis:int array -> t -> t
val swap : int -> int -> t -> t
val l1norm' : t -> t
val l2norm' : t -> t
val l2norm_sqr' : t -> t
val sigmoid : t -> t
val relu : t -> t
val dawsn : t -> t
val softplus : t -> t
val softsign : t -> t
val softmax : ?axis:int -> t -> t
val reshape : t -> int array -> t
val flatten : t -> t
val get_item : t -> int -> int -> t
val get_row : t -> int -> t
val concat : axis:int -> t -> t -> t
val split : axis:int -> int array -> t -> t array
val of_arrays : t array array -> t
val to_arrays : t -> t array array
val concatenate : axis:int -> t array -> t
val stack : axis:int -> t array -> t
val get_slice : int list list -> t -> t
val set_slice : int list list -> t -> t -> t
val get_fancy : Owl_types.index list -> t -> t
val set_fancy : Owl_types.index list -> t -> t -> t
val diag : ?k:int -> t -> t
val diagm : ?k:int -> t -> t
val trace : t -> t
val triu : ?k:int -> t -> t
val tril : ?k:int -> t -> t
\ No newline at end of file diff --git a/owl/Owl_algodiff/D/NN/index.html b/owl/Owl_algodiff/D/NN/index.html deleted file mode 100644 index 9f6fdaae4..000000000 --- a/owl/Owl_algodiff/D/NN/index.html +++ /dev/null @@ -1,20 +0,0 @@ - -NN (owl.Owl_algodiff.D.NN)

Module D.NN

val dropout : ?rate:float -> t -> t
val conv1d : ?padding:Owl_types.padding -> t -> t -> int array -> t
val conv2d : ?padding:Owl_types.padding -> t -> t -> int array -> t
val conv3d : ?padding:Owl_types.padding -> t -> t -> int array -> t
val dilated_conv1d : - ?padding:Owl_types.padding -> - t -> - t -> - int array -> - int array -> - t
val dilated_conv2d : - ?padding:Owl_types.padding -> - t -> - t -> - int array -> - int array -> - t
val dilated_conv3d : - ?padding:Owl_types.padding -> - t -> - t -> - int array -> - int array -> - t
val transpose_conv1d : ?padding:Owl_types.padding -> t -> t -> int array -> t
val transpose_conv2d : ?padding:Owl_types.padding -> t -> t -> int array -> t
val transpose_conv3d : ?padding:Owl_types.padding -> t -> t -> int array -> t
val max_pool1d : Owl_types.padding -> t -> int array -> int array -> t
val max_pool2d : Owl_types.padding -> t -> int array -> int array -> t
val max_pool3d : Owl_types.padding -> t -> int array -> int array -> t
val avg_pool1d : Owl_types.padding -> t -> int array -> int array -> t
val avg_pool2d : Owl_types.padding -> t -> int array -> int array -> t
val avg_pool3d : Owl_types.padding -> t -> int array -> int array -> t
val upsampling2d : t -> int array -> t
val pad : ?v:A.elt -> int list list -> t -> t
\ No newline at end of file diff --git a/owl/Owl_algodiff/D/index.html b/owl/Owl_algodiff/D/index.html deleted file mode 100644 index b98802275..000000000 --- a/owl/Owl_algodiff/D/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -D (owl.Owl_algodiff.D)

Module Owl_algodiff.D

module A : sig ... end
type t = Owl_algodiff_generic.Make(Owl_algodiff_primal_ops.D).t =
  1. | F of A.elt
  2. | Arr of A.arr
  3. | DF of t * t * int
  4. | DR of t * t Stdlib.ref * op * int Stdlib.ref * int * int Stdlib.ref
and adjoint = t -> t Stdlib.ref -> (t * t) list -> (t * t) list
and register = t list -> t list
and label = string * t list
and op = adjoint * register * label
val tag : unit -> int
val primal : t -> t
val primal' : t -> t
val zero : t -> t
val reset_zero : t -> t
val tangent : t -> t
val adjref : t -> t Stdlib.ref
val adjval : t -> t
val shape : t -> int array
val is_float : t -> bool
val is_arr : t -> bool
val row_num : t -> int
val col_num : t -> int
val numel : t -> int
val clip_by_value : amin:A.elt -> amax:A.elt -> t -> t
val clip_by_l2norm : A.elt -> t -> t
val copy_primal' : t -> t
val tile : t -> int array -> t
val repeat : t -> int array -> t
val pack_elt : A.elt -> t
val unpack_elt : t -> A.elt
val pack_flt : float -> t
val _f : float -> t
val unpack_flt : t -> float
val pack_arr : A.arr -> t
val unpack_arr : t -> A.arr
val deep_info : t -> string
val type_info : t -> string
val error_binop : string -> t -> t -> 'a
val error_uniop : string -> t -> 'a
val make_forward : t -> t -> int -> t
val make_reverse : t -> int -> t
val reverse_prop : t -> t -> unit
val diff : (t -> t) -> t -> t
val diff' : (t -> t) -> t -> t * t
val grad : (t -> t) -> t -> t
val grad' : (t -> t) -> t -> t * t
val jacobian : (t -> t) -> t -> t
val jacobian' : (t -> t) -> t -> t * t
val jacobianv : (t -> t) -> t -> t -> t
val jacobianv' : (t -> t) -> t -> t -> t * t
val jacobianTv : (t -> t) -> t -> t -> t
val jacobianTv' : (t -> t) -> t -> t -> t * t
val hessian : (t -> t) -> t -> t
val hessian' : (t -> t) -> t -> t * t
val hessianv : (t -> t) -> t -> t -> t
val hessianv' : (t -> t) -> t -> t -> t * t
val laplacian : (t -> t) -> t -> t
val laplacian' : (t -> t) -> t -> t * t
val gradhessian : (t -> t) -> t -> t * t
val gradhessian' : (t -> t) -> t -> t * t * t
val gradhessianv : (t -> t) -> t -> t -> t * t
val gradhessianv' : (t -> t) -> t -> t -> t * t * t
module Builder : sig ... end
module Maths : sig ... end
module Linalg : sig ... end
module NN : sig ... end
module Mat : sig ... end
module Arr : sig ... end
val to_trace : t list -> string
val to_dot : t list -> string
val pp_num : Stdlib.Format.formatter -> t -> unit
\ No newline at end of file diff --git a/owl/Owl_algodiff/S/A/Linalg/index.html b/owl/Owl_algodiff/S/A/Linalg/index.html deleted file mode 100644 index b11b63b81..000000000 --- a/owl/Owl_algodiff/S/A/Linalg/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -Linalg (owl.Owl_algodiff.S.A.Linalg)

Module A.Linalg

val inv : arr -> arr
val logdet : arr -> elt
val chol : ?upper:bool -> arr -> arr
val svd : ?thin:bool -> arr -> arr * arr * arr
val qr : arr -> arr * arr
val lq : arr -> arr * arr
val sylvester : arr -> arr -> arr -> arr
val lyapunov : arr -> arr -> arr
val discrete_lyapunov : - ?solver:[ `bilinear | `default | `direct ] -> - arr -> - arr -> - arr
val linsolve : ?trans:bool -> ?typ:[ `l | `n | `u ] -> arr -> arr -> arr
val care : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
val dare : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
\ No newline at end of file diff --git a/owl/Owl_algodiff/S/A/Mat/index.html b/owl/Owl_algodiff/S/A/Mat/index.html deleted file mode 100644 index ec80d492d..000000000 --- a/owl/Owl_algodiff/S/A/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl.Owl_algodiff.S.A.Mat)

Module A.Mat

val diagm : ?k:int -> arr -> arr
val triu : ?k:int -> arr -> arr
val tril : ?k:int -> arr -> arr
val eye : int -> arr
\ No newline at end of file diff --git a/owl/Owl_algodiff/S/A/Scalar/index.html b/owl/Owl_algodiff/S/A/Scalar/index.html deleted file mode 100644 index 1ccc7f0ae..000000000 --- a/owl/Owl_algodiff/S/A/Scalar/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Scalar (owl.Owl_algodiff.S.A.Scalar)

Module A.Scalar

val add : elt -> elt -> elt
val sub : elt -> elt -> elt
val mul : elt -> elt -> elt
val div : elt -> elt -> elt
val pow : elt -> elt -> elt
val atan2 : elt -> elt -> elt
val abs : elt -> elt
val neg : elt -> elt
val sqr : elt -> elt
val sqrt : elt -> elt
val exp : elt -> elt
val log : elt -> elt
val log2 : elt -> elt
val log10 : elt -> elt
val signum : elt -> elt
val floor : elt -> elt
val ceil : elt -> elt
val round : elt -> elt
val sin : elt -> elt
val cos : elt -> elt
val tan : elt -> elt
val sinh : elt -> elt
val cosh : elt -> elt
val tanh : elt -> elt
val asin : elt -> elt
val acos : elt -> elt
val atan : elt -> elt
val asinh : elt -> elt
val acosh : elt -> elt
val atanh : elt -> elt
val relu : elt -> elt
val dawsn : elt -> elt
val sigmoid : elt -> elt
\ No newline at end of file diff --git a/owl/Owl_algodiff/S/A/index.html b/owl/Owl_algodiff/S/A/index.html deleted file mode 100644 index 31b3c7bbe..000000000 --- a/owl/Owl_algodiff/S/A/index.html +++ /dev/null @@ -1,158 +0,0 @@ - -A (owl.Owl_algodiff.S.A)

Module S.A

val empty : int array -> arr
val zeros : int array -> arr
val ones : int array -> arr
val create : int array -> elt -> arr
val sequential : ?a:elt -> ?step:elt -> int array -> arr
val uniform : ?a:elt -> ?b:elt -> int array -> arr
val gaussian : ?mu:elt -> ?sigma:elt -> int array -> arr
val bernoulli : ?p:elt -> int array -> arr
val init : int array -> (int -> elt) -> arr
val init_nd : int array -> (int array -> elt) -> arr
val shape : arr -> int array
val numel : arr -> int
val get : arr -> int array -> elt
val set : arr -> int array -> elt -> unit
val get_slice : int list list -> arr -> arr
val set_slice : int list list -> arr -> arr -> unit
val get_fancy : Owl_types_common.index list -> arr -> arr
val set_fancy : Owl_types_common.index list -> arr -> arr -> unit
val copy : arr -> arr
val copy_ : out:arr -> arr -> unit
val reset : arr -> unit
val reshape : arr -> int array -> arr
val reverse : arr -> arr
val tile : arr -> int array -> arr
val repeat : arr -> int array -> arr
val concatenate : ?axis:int -> arr array -> arr
val stack : ?axis:int -> arr array -> arr
val split : ?axis:int -> int array -> arr -> arr array
val expand : ?hi:bool -> arr -> int -> arr
val squeeze : ?axis:int array -> arr -> arr
val draw : ?axis:int -> arr -> int -> arr * int array
val map : (elt -> elt) -> arr -> arr
val fold : ?axis:int -> (elt -> elt -> elt) -> elt -> arr -> arr
val scan : ?axis:int -> (elt -> elt -> elt) -> arr -> arr
val one_hot : int -> arr -> arr
val pad : ?v:elt -> int list list -> arr -> arr
val print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(elt -> string) -> - arr -> - unit
val abs : arr -> arr
val neg : arr -> arr
val floor : arr -> arr
val ceil : arr -> arr
val round : arr -> arr
val sqr : arr -> arr
val sqrt : arr -> arr
val log : arr -> arr
val log2 : arr -> arr
val log10 : arr -> arr
val exp : arr -> arr
val sin : arr -> arr
val cos : arr -> arr
val tan : arr -> arr
val sinh : arr -> arr
val cosh : arr -> arr
val tanh : arr -> arr
val asin : arr -> arr
val acos : arr -> arr
val atan : arr -> arr
val asinh : arr -> arr
val acosh : arr -> arr
val atanh : arr -> arr
val min : ?axis:int -> ?keep_dims:bool -> arr -> arr
val max : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum_reduce : ?axis:int array -> arr -> arr
val signum : arr -> arr
val sigmoid : arr -> arr
val relu : arr -> arr
val dawsn : arr -> arr
val min' : arr -> elt
val max' : arr -> elt
val sum' : arr -> elt
val log_sum_exp' : arr -> elt
val log_sum_exp : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm' : arr -> elt
val l2norm' : arr -> elt
val l2norm_sqr' : arr -> elt
val clip_by_value : ?amin:elt -> ?amax:elt -> arr -> arr
val clip_by_l2norm : elt -> arr -> arr
val pow : arr -> arr -> arr
val scalar_pow : elt -> arr -> arr
val pow_scalar : arr -> elt -> arr
val atan2 : arr -> arr -> arr
val scalar_atan2 : elt -> arr -> arr
val atan2_scalar : arr -> elt -> arr
val add : arr -> arr -> arr
val sub : arr -> arr -> arr
val mul : arr -> arr -> arr
val div : arr -> arr -> arr
val add_scalar : arr -> elt -> arr
val sub_scalar : arr -> elt -> arr
val mul_scalar : arr -> elt -> arr
val div_scalar : arr -> elt -> arr
val scalar_add : elt -> arr -> arr
val scalar_sub : elt -> arr -> arr
val scalar_mul : elt -> arr -> arr
val scalar_div : elt -> arr -> arr
val fma : arr -> arr -> arr -> arr
val conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val dilated_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val transpose_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val max_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val upsampling2d : arr -> int array -> arr
val conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val dilated_conv1d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv1d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val transpose_conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val max_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val upsampling2d_backward : arr -> int array -> arr -> arr
val row_num : arr -> int
val col_num : arr -> int
val row : arr -> int -> arr
val rows : arr -> int array -> arr
val copy_row_to : arr -> arr -> int -> unit
val copy_col_to : arr -> arr -> int -> unit
val dot : arr -> arr -> arr
val diag : ?k:int -> arr -> arr
val trace : arr -> elt
val transpose : ?axis:int array -> arr -> arr
val to_rows : arr -> arr array
val of_rows : arr array -> arr
val to_cols : arr -> arr array
val of_cols : arr array -> arr
val of_array : elt array -> int array -> arr
val of_arrays : elt array array -> arr
val float_to_elt : float -> elt
val elt_to_float : elt -> float
val elt_equal : arr -> arr -> arr
val elt_not_equal : arr -> arr -> arr
val elt_less : arr -> arr -> arr
val elt_greater : arr -> arr -> arr
val elt_less_equal : arr -> arr -> arr
val elt_greater_equal : arr -> arr -> arr
val elt_equal_scalar : arr -> elt -> arr
val elt_not_equal_scalar : arr -> elt -> arr
val elt_less_scalar : arr -> elt -> arr
val elt_greater_scalar : arr -> elt -> arr
val elt_less_equal_scalar : arr -> elt -> arr
val elt_greater_equal_scalar : arr -> elt -> arr
module Scalar : sig ... end
module Mat : sig ... end
module Linalg : sig ... end
\ No newline at end of file diff --git a/owl/Owl_algodiff/S/Arr/index.html b/owl/Owl_algodiff/S/Arr/index.html deleted file mode 100644 index ba3fe4620..000000000 --- a/owl/Owl_algodiff/S/Arr/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Arr (owl.Owl_algodiff.S.Arr)

Module S.Arr

val empty : int array -> t
val zeros : int array -> t
val ones : int array -> t
val uniform : ?a:A.elt -> ?b:A.elt -> int array -> t
val gaussian : ?mu:A.elt -> ?sigma:A.elt -> int array -> t
val shape : t -> int array
val numel : t -> int
val reset : t -> unit
val reshape : t -> int array -> t
val add : t -> t -> t
val sub : t -> t -> t
val mul : t -> t -> t
val div : t -> t -> t
val dot : t -> t -> t
\ No newline at end of file diff --git a/owl/Owl_algodiff/S/Builder/index.html b/owl/Owl_algodiff/S/Builder/index.html deleted file mode 100644 index 5589d8e9c..000000000 --- a/owl/Owl_algodiff/S/Builder/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Builder (owl.Owl_algodiff.S.Builder)

Module S.Builder

module type Siso = sig ... end
val build_siso : (module Siso) -> t -> t
module type Sipo = sig ... end
val build_sipo : (module Sipo) -> t -> t * t
module type Sito = sig ... end
val build_sito : (module Sito) -> t -> t * t * t
module type Siao = sig ... end
val build_siao : (module Siao) -> t -> t array
module type Piso = sig ... end
val build_piso : (module Piso) -> t -> t -> t
module type Aiso = sig ... end
val build_aiso : (module Aiso) -> t array -> t
\ No newline at end of file diff --git a/owl/Owl_algodiff/S/Builder/module-type-Aiso/index.html b/owl/Owl_algodiff/S/Builder/module-type-Aiso/index.html deleted file mode 100644 index 19935b61b..000000000 --- a/owl/Owl_algodiff/S/Builder/module-type-Aiso/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Aiso (owl.Owl_algodiff.S.Builder.Aiso)

Module type Builder.Aiso

val label : string
val ff : t array -> t
val df : int list -> t -> t array -> t array -> t
val dr : int list -> t array -> t -> t Stdlib.ref -> t list
\ No newline at end of file diff --git a/owl/Owl_algodiff/S/Builder/module-type-Piso/index.html b/owl/Owl_algodiff/S/Builder/module-type-Piso/index.html deleted file mode 100644 index 18d3e5c2c..000000000 --- a/owl/Owl_algodiff/S/Builder/module-type-Piso/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Piso (owl.Owl_algodiff.S.Builder.Piso)

Module type Builder.Piso

val label : string
val ff_aa : A.elt -> A.elt -> t
val ff_ab : A.elt -> A.arr -> t
val ff_ba : A.arr -> A.elt -> t
val ff_bb : A.arr -> A.arr -> t
val df_da : t -> t -> t -> t -> t
val df_db : t -> t -> t -> t -> t
val df_dab : t -> t -> t -> t -> t -> t
val dr_ab : t -> t -> t -> t Stdlib.ref -> t * t
val dr_a : t -> t -> t -> t Stdlib.ref -> t
val dr_b : t -> t -> t -> t Stdlib.ref -> t
\ No newline at end of file diff --git a/owl/Owl_algodiff/S/Builder/module-type-Siao/index.html b/owl/Owl_algodiff/S/Builder/module-type-Siao/index.html deleted file mode 100644 index 93e862fb7..000000000 --- a/owl/Owl_algodiff/S/Builder/module-type-Siao/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Siao (owl.Owl_algodiff.S.Builder.Siao)

Module type Builder.Siao

val label : string
val ff_f : A.elt -> t array
val ff_arr : A.arr -> t array
val df : t array -> t -> t -> t array
val dr : t -> t -> t Stdlib.ref array -> t Stdlib.ref array -> t
\ No newline at end of file diff --git a/owl/Owl_algodiff/S/Builder/module-type-Sipo/index.html b/owl/Owl_algodiff/S/Builder/module-type-Sipo/index.html deleted file mode 100644 index 5265b1a62..000000000 --- a/owl/Owl_algodiff/S/Builder/module-type-Sipo/index.html +++ /dev/null @@ -1,7 +0,0 @@ - -Sipo (owl.Owl_algodiff.S.Builder.Sipo)

Module type Builder.Sipo

val label : string
val ff_f : A.elt -> t * t
val ff_arr : A.arr -> t * t
val df : t -> t -> t -> t
val dr : - t -> - t -> - (t Stdlib.ref * t Stdlib.ref) -> - (t Stdlib.ref * t Stdlib.ref) -> - t
\ No newline at end of file diff --git a/owl/Owl_algodiff/S/Builder/module-type-Siso/index.html b/owl/Owl_algodiff/S/Builder/module-type-Siso/index.html deleted file mode 100644 index 65e6f1ef7..000000000 --- a/owl/Owl_algodiff/S/Builder/module-type-Siso/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Siso (owl.Owl_algodiff.S.Builder.Siso)

Module type Builder.Siso

val label : string
val ff_f : A.elt -> t
val ff_arr : A.arr -> t
val df : t -> t -> t -> t
val dr : t -> t -> t Stdlib.ref -> t
\ No newline at end of file diff --git a/owl/Owl_algodiff/S/Builder/module-type-Sito/index.html b/owl/Owl_algodiff/S/Builder/module-type-Sito/index.html deleted file mode 100644 index 39a4e9215..000000000 --- a/owl/Owl_algodiff/S/Builder/module-type-Sito/index.html +++ /dev/null @@ -1,7 +0,0 @@ - -Sito (owl.Owl_algodiff.S.Builder.Sito)

Module type Builder.Sito

val label : string
val ff_f : A.elt -> t * t * t
val ff_arr : A.arr -> t * t * t
val df : t -> t -> t -> t
val dr : - t -> - t -> - (t Stdlib.ref * t Stdlib.ref * t Stdlib.ref) -> - (t Stdlib.ref * t Stdlib.ref * t Stdlib.ref) -> - t
\ No newline at end of file diff --git a/owl/Owl_algodiff/S/Linalg/index.html b/owl/Owl_algodiff/S/Linalg/index.html deleted file mode 100644 index 2aa84ffd8..000000000 --- a/owl/Owl_algodiff/S/Linalg/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -Linalg (owl.Owl_algodiff.S.Linalg)

Module S.Linalg

val inv : t -> t
val logdet : t -> t
val chol : ?upper:bool -> t -> t
val qr : t -> t * t
val lq : t -> t * t
val svd : ?thin:bool -> t -> t * t * t
val sylvester : t -> t -> t -> t
val lyapunov : t -> t -> t
val discrete_lyapunov : - ?solver:[ `bilinear | `default | `direct ] -> - t -> - t -> - t
val (/@) : t -> t -> t
val linsolve : ?trans:bool -> ?typ:[ `l | `n | `u ] -> t -> t -> t
val care : ?diag_r:bool -> t -> t -> t -> t -> t
val dare : ?diag_r:bool -> t -> t -> t -> t -> t
\ No newline at end of file diff --git a/owl/Owl_algodiff/S/Mat/index.html b/owl/Owl_algodiff/S/Mat/index.html deleted file mode 100644 index e64ce3fbd..000000000 --- a/owl/Owl_algodiff/S/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl.Owl_algodiff.S.Mat)

Module S.Mat

val empty : int -> int -> t
val zeros : int -> int -> t
val eye : int -> t
val ones : int -> int -> t
val uniform : ?a:A.elt -> ?b:A.elt -> int -> int -> t
val gaussian : ?mu:A.elt -> ?sigma:A.elt -> int -> int -> t
val shape : t -> int * int
val numel : t -> int
val row_num : t -> int
val col_num : t -> int
val reset : t -> unit
val reshape : int -> int -> t -> t
val get : t -> int -> int -> t
val set : t -> int -> int -> t -> t
val row : t -> int -> t
val mean : t -> t
val add : t -> t -> t
val sub : t -> t -> t
val mul : t -> t -> t
val div : t -> t -> t
val dot : t -> t -> t
val map_by_row : (t -> t) -> t -> t
val of_arrays : A.elt array array -> t
val init_2d : int -> int -> (int -> int -> t) -> t
val print : t -> unit
\ No newline at end of file diff --git a/owl/Owl_algodiff/S/Maths/index.html b/owl/Owl_algodiff/S/Maths/index.html deleted file mode 100644 index 66281ea86..000000000 --- a/owl/Owl_algodiff/S/Maths/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Maths (owl.Owl_algodiff.S.Maths)

Module S.Maths

val (+) : t -> t -> t
val (-) : t -> t -> t
val (*) : t -> t -> t
val (/) : t -> t -> t
val (*@) : t -> t -> t
val (**) : t -> t -> t
val add : t -> t -> t
val sub : t -> t -> t
val mul : t -> t -> t
val div : t -> t -> t
val kron : t -> t -> t
val dot : t -> t -> t
val pow : t -> t -> t
val atan2 : t -> t -> t
val min2 : t -> t -> t
val max2 : t -> t -> t
val cross_entropy : t -> t -> t
val inv : t -> t
val neg : t -> t
val abs : t -> t
val signum : t -> t
val floor : t -> t
val ceil : t -> t
val round : t -> t
val sqr : t -> t
val sqrt : t -> t
val log : t -> t
val log2 : t -> t
val log10 : t -> t
val exp : t -> t
val sin : t -> t
val cos : t -> t
val tan : t -> t
val sinh : t -> t
val cosh : t -> t
val tanh : t -> t
val asin : t -> t
val acos : t -> t
val atan : t -> t
val asinh : t -> t
val acosh : t -> t
val atanh : t -> t
val sum' : t -> t
val log_sum_exp' : t -> t
val log_sum_exp : ?axis:int -> ?keep_dims:bool -> t -> t
val sum : ?axis:int -> ?keep_dims:bool -> t -> t
val sum_reduce : ?axis:int array -> t -> t
val mean : t -> t
val transpose : ?axis:int array -> t -> t
val swap : int -> int -> t -> t
val l1norm' : t -> t
val l2norm' : t -> t
val l2norm_sqr' : t -> t
val sigmoid : t -> t
val relu : t -> t
val dawsn : t -> t
val softplus : t -> t
val softsign : t -> t
val softmax : ?axis:int -> t -> t
val reshape : t -> int array -> t
val flatten : t -> t
val get_item : t -> int -> int -> t
val get_row : t -> int -> t
val concat : axis:int -> t -> t -> t
val split : axis:int -> int array -> t -> t array
val of_arrays : t array array -> t
val to_arrays : t -> t array array
val concatenate : axis:int -> t array -> t
val stack : axis:int -> t array -> t
val get_slice : int list list -> t -> t
val set_slice : int list list -> t -> t -> t
val get_fancy : Owl_types.index list -> t -> t
val set_fancy : Owl_types.index list -> t -> t -> t
val diag : ?k:int -> t -> t
val diagm : ?k:int -> t -> t
val trace : t -> t
val triu : ?k:int -> t -> t
val tril : ?k:int -> t -> t
\ No newline at end of file diff --git a/owl/Owl_algodiff/S/NN/index.html b/owl/Owl_algodiff/S/NN/index.html deleted file mode 100644 index c5616e75a..000000000 --- a/owl/Owl_algodiff/S/NN/index.html +++ /dev/null @@ -1,20 +0,0 @@ - -NN (owl.Owl_algodiff.S.NN)

Module S.NN

val dropout : ?rate:float -> t -> t
val conv1d : ?padding:Owl_types.padding -> t -> t -> int array -> t
val conv2d : ?padding:Owl_types.padding -> t -> t -> int array -> t
val conv3d : ?padding:Owl_types.padding -> t -> t -> int array -> t
val dilated_conv1d : - ?padding:Owl_types.padding -> - t -> - t -> - int array -> - int array -> - t
val dilated_conv2d : - ?padding:Owl_types.padding -> - t -> - t -> - int array -> - int array -> - t
val dilated_conv3d : - ?padding:Owl_types.padding -> - t -> - t -> - int array -> - int array -> - t
val transpose_conv1d : ?padding:Owl_types.padding -> t -> t -> int array -> t
val transpose_conv2d : ?padding:Owl_types.padding -> t -> t -> int array -> t
val transpose_conv3d : ?padding:Owl_types.padding -> t -> t -> int array -> t
val max_pool1d : Owl_types.padding -> t -> int array -> int array -> t
val max_pool2d : Owl_types.padding -> t -> int array -> int array -> t
val max_pool3d : Owl_types.padding -> t -> int array -> int array -> t
val avg_pool1d : Owl_types.padding -> t -> int array -> int array -> t
val avg_pool2d : Owl_types.padding -> t -> int array -> int array -> t
val avg_pool3d : Owl_types.padding -> t -> int array -> int array -> t
val upsampling2d : t -> int array -> t
val pad : ?v:A.elt -> int list list -> t -> t
\ No newline at end of file diff --git a/owl/Owl_algodiff/S/index.html b/owl/Owl_algodiff/S/index.html deleted file mode 100644 index 80adbbaa3..000000000 --- a/owl/Owl_algodiff/S/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -S (owl.Owl_algodiff.S)

Module Owl_algodiff.S

module A : sig ... end
type t = Owl_algodiff_generic.Make(Owl_algodiff_primal_ops.S).t =
  1. | F of A.elt
  2. | Arr of A.arr
  3. | DF of t * t * int
  4. | DR of t * t Stdlib.ref * op * int Stdlib.ref * int * int Stdlib.ref
and adjoint = t -> t Stdlib.ref -> (t * t) list -> (t * t) list
and register = t list -> t list
and label = string * t list
and op = adjoint * register * label
val tag : unit -> int
val primal : t -> t
val primal' : t -> t
val zero : t -> t
val reset_zero : t -> t
val tangent : t -> t
val adjref : t -> t Stdlib.ref
val adjval : t -> t
val shape : t -> int array
val is_float : t -> bool
val is_arr : t -> bool
val row_num : t -> int
val col_num : t -> int
val numel : t -> int
val clip_by_value : amin:A.elt -> amax:A.elt -> t -> t
val clip_by_l2norm : A.elt -> t -> t
val copy_primal' : t -> t
val tile : t -> int array -> t
val repeat : t -> int array -> t
val pack_elt : A.elt -> t
val unpack_elt : t -> A.elt
val pack_flt : float -> t
val _f : float -> t
val unpack_flt : t -> float
val pack_arr : A.arr -> t
val unpack_arr : t -> A.arr
val deep_info : t -> string
val type_info : t -> string
val error_binop : string -> t -> t -> 'a
val error_uniop : string -> t -> 'a
val make_forward : t -> t -> int -> t
val make_reverse : t -> int -> t
val reverse_prop : t -> t -> unit
val diff : (t -> t) -> t -> t
val diff' : (t -> t) -> t -> t * t
val grad : (t -> t) -> t -> t
val grad' : (t -> t) -> t -> t * t
val jacobian : (t -> t) -> t -> t
val jacobian' : (t -> t) -> t -> t * t
val jacobianv : (t -> t) -> t -> t -> t
val jacobianv' : (t -> t) -> t -> t -> t * t
val jacobianTv : (t -> t) -> t -> t -> t
val jacobianTv' : (t -> t) -> t -> t -> t * t
val hessian : (t -> t) -> t -> t
val hessian' : (t -> t) -> t -> t * t
val hessianv : (t -> t) -> t -> t -> t
val hessianv' : (t -> t) -> t -> t -> t * t
val laplacian : (t -> t) -> t -> t
val laplacian' : (t -> t) -> t -> t * t
val gradhessian : (t -> t) -> t -> t * t
val gradhessian' : (t -> t) -> t -> t * t * t
val gradhessianv : (t -> t) -> t -> t -> t * t
val gradhessianv' : (t -> t) -> t -> t -> t * t * t
module Builder : sig ... end
module Maths : sig ... end
module Linalg : sig ... end
module NN : sig ... end
module Mat : sig ... end
module Arr : sig ... end
val to_trace : t list -> string
val to_dot : t list -> string
val pp_num : Stdlib.Format.formatter -> t -> unit
\ No newline at end of file diff --git a/owl/Owl_algodiff_primal_ops/.dummy b/owl/Owl_algodiff_primal_ops/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl/Owl_algodiff_primal_ops/D/Linalg/index.html b/owl/Owl_algodiff_primal_ops/D/Linalg/index.html deleted file mode 100644 index 93a1b1ae0..000000000 --- a/owl/Owl_algodiff_primal_ops/D/Linalg/index.html +++ /dev/null @@ -1,25 +0,0 @@ - -Linalg (owl.Owl_algodiff_primal_ops.D.Linalg)

Module D.Linalg

include module type of struct include Owl_linalg.D end
include module type of struct include Owl_linalg_d end
type elt = float
type complex_mat = Owl_dense_matrix_z.mat
type int32_mat = (int32, Stdlib.Bigarray.int32_elt) Owl_dense_matrix_generic.t
include Owl_linalg_intf.Common - with type elt := elt - and type mat := mat - and type complex_mat := complex_mat - and type int32_mat := int32_mat
include Owl_base_linalg_intf.Common - with type elt := elt - with type mat := mat - with type complex_mat := complex_mat - with type int32_mat := int32_mat
Basic functions
val inv : mat -> mat
val det : mat -> elt
val logdet : mat -> elt
val is_triu : mat -> bool
val is_tril : mat -> bool
val is_symmetric : mat -> bool
val is_diag : mat -> bool
Factorisation
val svd : ?thin:bool -> mat -> mat * mat * mat
val chol : ?upper:bool -> mat -> mat
Linear system of equations
val linsolve : ?trans:bool -> ?typ:[ `n | `u | `l ] -> mat -> mat -> mat
val sylvester : mat -> mat -> mat -> mat
val lyapunov : mat -> mat -> mat
val discrete_lyapunov : - ?solver:[ `default | `direct | `bilinear ] -> - mat -> - mat -> - mat
Basic functions
val pinv : ?tol:float -> mat -> mat
val rank : ?tol:float -> mat -> int
val norm : ?p:float -> mat -> float
val vecnorm : ?p:float -> mat -> float
val cond : ?p:float -> mat -> float
val rcond : mat -> float
val is_posdef : mat -> bool
Factorisation
val lu : mat -> mat * mat * int32_mat
val svdvals : mat -> mat
val gsvd : mat -> mat -> mat * mat * mat * mat * mat * mat
val gsvdvals : mat -> mat -> mat
val schur : mat -> mat * mat * complex_mat
val schur_tz : mat -> mat * mat
val ordschur : select:int32_mat -> mat -> mat -> mat * mat * complex_mat
val qz : mat -> mat -> mat * mat * mat * mat * complex_mat
val ordqz : - select:int32_mat -> - mat -> - mat -> - mat -> - mat -> - mat * mat * mat * mat * complex_mat
val qzvals : mat -> mat -> complex_mat
val hess : mat -> mat * mat
Eigenvalues & eigenvectors
val eig : ?permute:bool -> ?scale:bool -> mat -> complex_mat * complex_mat
val eigvals : ?permute:bool -> ?scale:bool -> mat -> complex_mat
Linear system of equations
val null : mat -> mat
val triangular_solve : upper:bool -> ?trans:bool -> mat -> mat -> mat
val linreg : mat -> mat -> elt * elt
Low-level factorisation functions
val lufact : mat -> mat * int32_mat
val qrfact : ?pivot:bool -> mat -> mat * mat * int32_mat
val bkfact : - ?upper:bool -> - ?symmetric:bool -> - ?rook:bool -> - mat -> - mat * int32_mat
Matrix functions
val mpow : mat -> float -> mat
val expm : mat -> mat
val sinm : mat -> mat
val cosm : mat -> mat
val tanm : mat -> mat
val sincosm : mat -> mat * mat
val sinhm : mat -> mat
val coshm : mat -> mat
val tanhm : mat -> mat
val sinhcoshm : mat -> mat * mat
Helper functions
val select_ev : [ `LHP | `RHP | `UDI | `UDO ] -> mat -> int32_mat
val peakflops : ?n:int -> unit -> float
include Owl_linalg_intf.Real with type mat := mat and type elt := elt
include Owl_base_linalg_intf.Real with type mat := mat with type elt := elt
val care : ?diag_r:bool -> mat -> mat -> mat -> mat -> mat
val dare : ?diag_r:bool -> mat -> mat -> mat -> mat -> mat
val qr : mat -> mat * mat
val lq : mat -> mat * mat
\ No newline at end of file diff --git a/owl/Owl_algodiff_primal_ops/D/Mat/index.html b/owl/Owl_algodiff_primal_ops/D/Mat/index.html deleted file mode 100644 index 4caed2252..000000000 --- a/owl/Owl_algodiff_primal_ops/D/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl.Owl_algodiff_primal_ops.D.Mat)

Module D.Mat

\ No newline at end of file diff --git a/owl/Owl_algodiff_primal_ops/D/index.html b/owl/Owl_algodiff_primal_ops/D/index.html deleted file mode 100644 index a5a72b74e..000000000 --- a/owl/Owl_algodiff_primal_ops/D/index.html +++ /dev/null @@ -1,579 +0,0 @@ - -D (owl.Owl_algodiff_primal_ops.D)

Module Owl_algodiff_primal_ops.D

include module type of struct include Owl_dense_ndarray.D end
include module type of struct include Owl_dense_ndarray_d end
type elt = float
type arr = - (float, Stdlib.Bigarray.float64_elt, Stdlib.Bigarray.c_layout) - Stdlib.Bigarray.Genarray.t
include Owl_dense_ndarray_intf.Common with type elt := elt and type arr := arr
include Owl_base_dense_ndarray_intf.Common - with type elt := elt - with type arr := arr
val empty : int array -> arr
val zeros : int array -> arr
val ones : int array -> arr
val create : int array -> elt -> arr
val init : int array -> (int -> elt) -> arr
val init_nd : int array -> (int array -> elt) -> arr
val sequential : ?a:elt -> ?step:elt -> int array -> arr
val uniform : ?a:elt -> ?b:elt -> int array -> arr
val gaussian : ?mu:elt -> ?sigma:elt -> int array -> arr
val bernoulli : ?p:float -> int array -> arr
val shape : arr -> int array
val numel : arr -> int
val strides : arr -> int array

Refer to :doc:`owl_dense_ndarray_generic`

val slice_size : arr -> int array

Refer to :doc:`owl_dense_ndarray_generic`

val get : arr -> int array -> elt
val set : arr -> int array -> elt -> unit
val get_slice : int list list -> arr -> arr
val set_slice : int list list -> arr -> arr -> unit
val copy : arr -> arr
val copy_ : out:arr -> arr -> unit
val reset : arr -> unit
val reshape : arr -> int array -> arr
val flatten : arr -> arr
val reverse : arr -> arr
val tile : arr -> int array -> arr
val repeat : arr -> int array -> arr
val concatenate : ?axis:int -> arr array -> arr
val stack : ?axis:int -> arr array -> arr
val squeeze : ?axis:int array -> arr -> arr
val expand : ?hi:bool -> arr -> int -> arr
val split : ?axis:int -> int array -> arr -> arr array
val draw : ?axis:int -> arr -> int -> arr * int array
val pad : ?v:elt -> int list list -> arr -> arr
val one_hot : int -> arr -> arr
val print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(elt -> string) -> - arr -> - unit
val abs : arr -> arr
val neg : arr -> arr
val floor : arr -> arr
val ceil : arr -> arr
val round : arr -> arr
val sqr : arr -> arr
val sqrt : arr -> arr
val log : arr -> arr
val log2 : arr -> arr
val log10 : arr -> arr
val exp : arr -> arr
val sin : arr -> arr
val cos : arr -> arr
val tan : arr -> arr
val sinh : arr -> arr
val cosh : arr -> arr
val tanh : arr -> arr
val asin : arr -> arr
val acos : arr -> arr
val atan : arr -> arr
val asinh : arr -> arr
val acosh : arr -> arr
val atanh : arr -> arr
val min : ?axis:int -> ?keep_dims:bool -> arr -> arr
val max : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum_reduce : ?axis:int array -> arr -> arr
val min' : arr -> elt
val max' : arr -> elt
val sum' : arr -> elt
val pow : arr -> arr -> arr
val scalar_pow : elt -> arr -> arr
val pow_scalar : arr -> elt -> arr
val add : arr -> arr -> arr
val sub : arr -> arr -> arr
val mul : arr -> arr -> arr
val div : arr -> arr -> arr
val add_scalar : arr -> elt -> arr
val sub_scalar : arr -> elt -> arr
val mul_scalar : arr -> elt -> arr
val div_scalar : arr -> elt -> arr
val scalar_add : elt -> arr -> arr
val scalar_sub : elt -> arr -> arr
val scalar_mul : elt -> arr -> arr
val scalar_div : elt -> arr -> arr
val fma : arr -> arr -> arr -> arr
Iterate array elements
val iteri : (int -> elt -> unit) -> arr -> unit
val iter : (elt -> unit) -> arr -> unit
val mapi : (int -> elt -> elt) -> arr -> arr
val map : (elt -> elt) -> arr -> arr
val filteri : (int -> elt -> bool) -> arr -> int array
val filter : (elt -> bool) -> arr -> int array
val foldi : ?axis:int -> (int -> elt -> elt -> elt) -> elt -> arr -> arr
val fold : ?axis:int -> (elt -> elt -> elt) -> elt -> arr -> arr
val scani : ?axis:int -> (int -> elt -> elt -> elt) -> arr -> arr
val scan : ?axis:int -> (elt -> elt -> elt) -> arr -> arr
Examination & Comparison
val exists : (elt -> bool) -> arr -> bool
val not_exists : (elt -> bool) -> arr -> bool
val for_all : (elt -> bool) -> arr -> bool
val is_zero : arr -> bool
val is_positive : arr -> bool
val is_negative : arr -> bool
val is_nonpositive : arr -> bool
val is_nonnegative : arr -> bool
val is_normal : arr -> bool
val not_nan : arr -> bool
val not_inf : arr -> bool
val equal : arr -> arr -> bool
val not_equal : arr -> arr -> bool
val greater : arr -> arr -> bool
val less : arr -> arr -> bool
val greater_equal : arr -> arr -> bool
val less_equal : arr -> arr -> bool
val elt_equal : arr -> arr -> arr
val elt_not_equal : arr -> arr -> arr
val elt_less : arr -> arr -> arr
val elt_greater : arr -> arr -> arr
val elt_less_equal : arr -> arr -> arr
val elt_greater_equal : arr -> arr -> arr
val equal_scalar : arr -> elt -> bool
val not_equal_scalar : arr -> elt -> bool
val less_scalar : arr -> elt -> bool
val greater_scalar : arr -> elt -> bool
val less_equal_scalar : arr -> elt -> bool
val greater_equal_scalar : arr -> elt -> bool
val elt_equal_scalar : arr -> elt -> arr
val elt_not_equal_scalar : arr -> elt -> arr
val elt_less_scalar : arr -> elt -> arr
val elt_greater_scalar : arr -> elt -> arr
val elt_less_equal_scalar : arr -> elt -> arr
val elt_greater_equal_scalar : arr -> elt -> arr
val row_num : arr -> int
val col_num : arr -> int
val row : arr -> int -> arr
val rows : arr -> int array -> arr
val copy_row_to : arr -> arr -> int -> unit
val copy_col_to : arr -> arr -> int -> unit
val diag : ?k:int -> arr -> arr
val transpose : ?axis:int array -> arr -> arr
val to_rows : arr -> arr array
val of_rows : arr array -> arr
val to_cols : arr -> arr array
val of_cols : arr array -> arr
val of_array : elt array -> int array -> arr
val of_arrays : elt array array -> arr
Create N-dimensional array
val linspace : elt -> elt -> int -> arr

linspace k 0. 9. 10 ...

val logspace : ?base:float -> elt -> elt -> int -> arr

logspace k 0. 9. 10 ...

val unit_basis : int -> int -> arr

unit_basis k n i returns a unit basis vector with ith element set to 1.

Obtain basic properties
val num_dims : arr -> int
val nth_dim : arr -> int -> int
val nnz : arr -> int
val density : arr -> float
val size_in_bytes : arr -> int
val same_shape : arr -> arr -> bool
val same_data : arr -> arr -> bool
val ind : arr -> int -> int array
val i1d : arr -> int array -> int
Manipulate a N-dimensional array
val get_index : arr -> int array array -> elt array
val set_index : arr -> int array array -> elt array -> unit
val get_fancy : Owl_types.index list -> arr -> arr
val set_fancy : Owl_types.index list -> arr -> arr -> unit
val sub_left : arr -> int -> int -> arr
val sub_ndarray : int array -> arr -> arr array
val slice_left : arr -> int array -> arr
val fill : arr -> elt -> unit
val resize : ?head:bool -> arr -> int array -> arr
val flip : ?axis:int -> arr -> arr
val rotate : arr -> int -> arr
val swap : int -> int -> arr -> arr
val concat_vertical : arr -> arr -> arr
val concat_horizontal : arr -> arr -> arr
val concat_vh : arr array array -> arr
val split_vh : (int * int) array array -> arr -> arr array array
val dropout : ?rate:float -> arr -> arr
val top : arr -> int -> int array array
val bottom : arr -> int -> int array array
val sort : arr -> arr
val sort1 : ?axis:int -> arr -> arr
val argsort : - arr -> - (int64, Stdlib.Bigarray.int64_elt, Stdlib.Bigarray.c_layout) - Stdlib.Bigarray.Genarray.t
val mmap : Unix.file_descr -> ?pos:int64 -> bool -> int array -> arr
Iterate array elements
val iter2i : (int -> elt -> elt -> unit) -> arr -> arr -> unit
val iter2 : (elt -> elt -> unit) -> arr -> arr -> unit
val map2i : (int -> elt -> elt -> elt) -> arr -> arr -> arr
val map2 : (elt -> elt -> elt) -> arr -> arr -> arr
val iteri_nd : (int array -> elt -> unit) -> arr -> unit
val mapi_nd : (int array -> elt -> elt) -> arr -> arr
val foldi_nd : - ?axis:int -> - (int array -> elt -> elt -> elt) -> - elt -> - arr -> - arr
val scani_nd : ?axis:int -> (int array -> elt -> elt -> elt) -> arr -> arr
val filteri_nd : (int array -> elt -> bool) -> arr -> int array array
val iter2i_nd : (int array -> elt -> elt -> unit) -> arr -> arr -> unit
val map2i_nd : (int array -> elt -> elt -> elt) -> arr -> arr -> arr
val iteri_slice : ?axis:int -> (int -> arr -> unit) -> arr -> unit
val iter_slice : ?axis:int -> (arr -> unit) -> arr -> unit
val mapi_slice : ?axis:int -> (int -> arr -> 'c) -> arr -> 'c array
val map_slice : ?axis:int -> (arr -> 'c) -> arr -> 'c array
val filteri_slice : ?axis:int -> (int -> arr -> bool) -> arr -> arr array
val filter_slice : ?axis:int -> (arr -> bool) -> arr -> arr array
val foldi_slice : ?axis:int -> (int -> 'c -> arr -> 'c) -> 'c -> arr -> 'c
val fold_slice : ?axis:int -> ('c -> arr -> 'c) -> 'c -> arr -> 'c
Examine array elements or compare two arrays
Input/Output functions
val to_array : arr -> elt array
val save : out:string -> arr -> unit
val load : string -> arr
val save_npy : out:string -> arr -> unit
val load_npy : string -> arr
Unary mathematical operations
val prod : ?axis:int -> ?keep_dims:bool -> arr -> arr
val prod' : arr -> elt
val mean : ?axis:int -> ?keep_dims:bool -> arr -> arr
val mean' : arr -> elt
val median' : arr -> elt
val median : ?axis:int -> ?keep_dims:bool -> arr -> arr
val var : ?axis:int -> ?keep_dims:bool -> arr -> arr
val var' : arr -> elt
val std : ?axis:int -> ?keep_dims:bool -> arr -> arr
val std' : arr -> elt
val sem : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sem' : arr -> elt
val minmax : ?axis:int -> ?keep_dims:bool -> arr -> arr * arr
val minmax' : arr -> elt * elt
val min_i : arr -> elt * int array
val max_i : arr -> elt * int array
val minmax_i : arr -> (elt * int array) * (elt * int array)
val abs2 : arr -> arr
val conj : arr -> arr
val reci : arr -> arr
val reci_tol : ?tol:elt -> arr -> arr
val cbrt : arr -> arr
val exp2 : arr -> arr
val exp10 : arr -> arr
val expm1 : arr -> arr
val log1p : arr -> arr
val trunc : arr -> arr
val fix : arr -> arr
val modf : arr -> arr * arr
val l1norm : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l2norm : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l2norm_sqr : ?axis:int -> ?keep_dims:bool -> arr -> arr
val vecnorm : ?axis:int -> ?p:float -> ?keep_dims:bool -> arr -> arr
val vecnorm' : ?p:float -> arr -> elt
val cumsum : ?axis:int -> arr -> arr
val cumprod : ?axis:int -> arr -> arr
val cummin : ?axis:int -> arr -> arr
val cummax : ?axis:int -> arr -> arr
val diff : ?axis:int -> ?n:int -> arr -> arr
val lgamma : arr -> arr
Binary mathematical operations
val min2 : arr -> arr -> arr
val max2 : arr -> arr -> arr
val ssqr' : arr -> elt -> elt
val ssqr_diff' : arr -> arr -> elt
Tensor Calculus
val contract1 : (int * int) array -> arr -> arr
val contract2 : (int * int) array -> arr -> arr -> arr
Experimental functions
val slide : ?axis:int -> ?ofs:int -> ?step:int -> window:int -> arr -> arr
Functions of in-place modification
val create_ : out:arr -> elt -> unit
val uniform_ : ?a:elt -> ?b:elt -> out:arr -> unit
val gaussian_ : ?mu:elt -> ?sigma:elt -> out:arr -> unit
val sequential_ : ?a:elt -> ?step:elt -> out:arr -> unit
val bernoulli_ : ?p:float -> out:arr -> unit
val zeros_ : out:arr -> unit
val ones_ : out:arr -> unit
val sort_ : arr -> unit
val one_hot_ : out:arr -> int -> arr -> unit
val get_fancy_ : out:arr -> Owl_types.index list -> arr -> unit
val set_fancy_ : out:arr -> Owl_types.index list -> arr -> arr -> unit
val get_slice_ : out:arr -> int list list -> arr -> unit
val set_slice_ : out:arr -> int list list -> arr -> arr -> unit
val reshape_ : out:arr -> arr -> unit
val reverse_ : out:arr -> arr -> unit
val transpose_ : out:arr -> ?axis:int array -> arr -> unit
val repeat_ : out:arr -> arr -> int array -> unit
val tile_ : out:arr -> arr -> int array -> unit
val pad_ : out:arr -> ?v:elt -> int list list -> arr -> unit
val sum_ : out:arr -> axis:int -> arr -> unit
val min_ : out:arr -> axis:int -> arr -> unit
val max_ : out:arr -> axis:int -> arr -> unit
val add_ : ?out:arr -> arr -> arr -> unit
val sub_ : ?out:arr -> arr -> arr -> unit
val mul_ : ?out:arr -> arr -> arr -> unit
val div_ : ?out:arr -> arr -> arr -> unit
val pow_ : ?out:arr -> arr -> arr -> unit
val atan2_ : ?out:arr -> arr -> arr -> unit
val hypot_ : ?out:arr -> arr -> arr -> unit
val fmod_ : ?out:arr -> arr -> arr -> unit
val min2_ : ?out:arr -> arr -> arr -> unit
val max2_ : ?out:arr -> arr -> arr -> unit
val add_scalar_ : ?out:arr -> arr -> elt -> unit
val sub_scalar_ : ?out:arr -> arr -> elt -> unit
val mul_scalar_ : ?out:arr -> arr -> elt -> unit
val div_scalar_ : ?out:arr -> arr -> elt -> unit
val pow_scalar_ : ?out:arr -> arr -> elt -> unit
val atan2_scalar_ : ?out:arr -> arr -> elt -> unit
val fmod_scalar_ : ?out:arr -> arr -> elt -> unit
val scalar_add_ : ?out:arr -> elt -> arr -> unit
val scalar_sub_ : ?out:arr -> elt -> arr -> unit
val scalar_mul_ : ?out:arr -> elt -> arr -> unit
val scalar_div_ : ?out:arr -> elt -> arr -> unit
val scalar_pow_ : ?out:arr -> elt -> arr -> unit
val scalar_atan2_ : ?out:arr -> elt -> arr -> unit
val scalar_fmod_ : ?out:arr -> elt -> arr -> unit
val fma_ : ?out:arr -> arr -> arr -> arr -> unit
val clip_by_value_ : ?out:arr -> ?amin:elt -> ?amax:elt -> arr -> unit
val clip_by_l2norm_ : ?out:arr -> elt -> arr -> unit
val dot_ : - ?transa:bool -> - ?transb:bool -> - ?alpha:elt -> - ?beta:elt -> - c:arr -> - arr -> - arr -> - unit
val conj_ : ?out:arr -> arr -> unit
val abs_ : ?out:arr -> arr -> unit
val neg_ : ?out:arr -> arr -> unit
val reci_ : ?out:arr -> arr -> unit
val signum_ : ?out:arr -> arr -> unit
val sqr_ : ?out:arr -> arr -> unit
val sqrt_ : ?out:arr -> arr -> unit
val cbrt_ : ?out:arr -> arr -> unit
val exp_ : ?out:arr -> arr -> unit
val exp2_ : ?out:arr -> arr -> unit
val exp10_ : ?out:arr -> arr -> unit
val expm1_ : ?out:arr -> arr -> unit
val log_ : ?out:arr -> arr -> unit
val log2_ : ?out:arr -> arr -> unit
val log10_ : ?out:arr -> arr -> unit
val log1p_ : ?out:arr -> arr -> unit
val sin_ : ?out:arr -> arr -> unit
val cos_ : ?out:arr -> arr -> unit
val tan_ : ?out:arr -> arr -> unit
val asin_ : ?out:arr -> arr -> unit
val acos_ : ?out:arr -> arr -> unit
val atan_ : ?out:arr -> arr -> unit
val sinh_ : ?out:arr -> arr -> unit
val cosh_ : ?out:arr -> arr -> unit
val tanh_ : ?out:arr -> arr -> unit
val asinh_ : ?out:arr -> arr -> unit
val acosh_ : ?out:arr -> arr -> unit
val atanh_ : ?out:arr -> arr -> unit
val floor_ : ?out:arr -> arr -> unit
val ceil_ : ?out:arr -> arr -> unit
val round_ : ?out:arr -> arr -> unit
val trunc_ : ?out:arr -> arr -> unit
val fix_ : ?out:arr -> arr -> unit
val erf_ : ?out:arr -> arr -> unit
val erfc_ : ?out:arr -> arr -> unit
val relu_ : ?out:arr -> arr -> unit
val softplus_ : ?out:arr -> arr -> unit
val softsign_ : ?out:arr -> arr -> unit
val sigmoid_ : ?out:arr -> arr -> unit
val softmax_ : ?out:arr -> ?axis:int -> arr -> unit
val cumsum_ : ?out:arr -> ?axis:int -> arr -> unit
val cumprod_ : ?out:arr -> ?axis:int -> arr -> unit
val cummin_ : ?out:arr -> ?axis:int -> arr -> unit
val cummax_ : ?out:arr -> ?axis:int -> arr -> unit
val dropout_ : ?out:arr -> ?rate:float -> arr -> unit
val elt_equal_ : ?out:arr -> arr -> arr -> unit
val elt_not_equal_ : ?out:arr -> arr -> arr -> unit
val elt_less_ : ?out:arr -> arr -> arr -> unit
val elt_greater_ : ?out:arr -> arr -> arr -> unit
val elt_less_equal_ : ?out:arr -> arr -> arr -> unit
val elt_greater_equal_ : ?out:arr -> arr -> arr -> unit
val elt_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_not_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_less_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_greater_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_less_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_greater_equal_scalar_ : ?out:arr -> arr -> elt -> unit

Matrix functions

val col : arr -> int -> arr
val cols : arr -> int array -> arr
val to_arrays : arr -> elt array array
val draw_rows : ?replacement:bool -> arr -> int -> arr * int array
val draw_cols : ?replacement:bool -> arr -> int -> arr * int array
val draw_rows2 : - ?replacement:bool -> - arr -> - arr -> - int -> - arr * arr * int array
val draw_cols2 : - ?replacement:bool -> - arr -> - arr -> - int -> - arr * arr * int array
include Owl_dense_ndarray_intf.Real with type elt := elt and type arr := arr
include Owl_base_dense_ndarray_intf.Real - with type elt := elt - with type arr := arr
val sum_slices : ?axis:int -> arr -> arr
val signum : arr -> arr
val relu : arr -> arr
val dawsn : arr -> arr
val l1norm' : arr -> elt
val l2norm' : arr -> elt
val l2norm_sqr' : arr -> elt
val clip_by_value : ?amin:elt -> ?amax:elt -> arr -> arr
val clip_by_l2norm : elt -> arr -> arr
val atan2 : arr -> arr -> arr
val approx_equal : ?eps:float -> arr -> arr -> bool
val approx_equal_scalar : ?eps:float -> arr -> float -> bool
val approx_elt_equal : ?eps:float -> arr -> arr -> arr
val approx_elt_equal_scalar : ?eps:float -> arr -> float -> arr
val dot : arr -> arr -> arr
val trace : arr -> elt
Helper functions
val float_to_elt : float -> elt
val elt_to_float : elt -> float
Real operations
val i0 : arr -> arr
val i0e : arr -> arr
val i1 : arr -> arr
val i1e : arr -> arr
val iv : v:arr -> arr -> arr
val scalar_iv : v:elt -> arr -> arr
val iv_scalar : v:arr -> elt -> arr
val j0 : arr -> arr
val j1 : arr -> arr
val jv : v:arr -> arr -> arr
val scalar_jv : v:elt -> arr -> arr
val jv_scalar : v:arr -> elt -> arr
val erf : arr -> arr
val erfc : arr -> arr
val logistic : arr -> arr
val elu : ?alpha:elt -> arr -> arr
val leaky_relu : ?alpha:elt -> arr -> arr
val softplus : arr -> arr
val softsign : arr -> arr
val softmax : ?axis:int -> arr -> arr
val sigmoid : arr -> arr
val log_sum_exp' : arr -> float
val log_sum_exp : ?axis:int -> ?keep_dims:bool -> arr -> arr
val scalar_atan2 : elt -> arr -> arr
val atan2_scalar : arr -> elt -> arr
val hypot : arr -> arr -> arr
val fmod : arr -> arr -> arr
val fmod_scalar : arr -> elt -> arr
val scalar_fmod : elt -> arr -> arr
val cross_entropy' : arr -> arr -> float
val fused_adagrad_ : ?out:arr -> rate:float -> eps:float -> arr -> unit
val poisson : mu:elt -> int array -> arr
val poisson_ : mu:elt -> out:arr -> unit
include Owl_dense_ndarray_intf.NN with type arr := arr
include Owl_base_dense_ndarray_intf.NN with type arr := arr
val conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val dilated_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val transpose_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val max_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val upsampling2d : arr -> int array -> arr
val conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val dilated_conv1d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv1d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val transpose_conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val max_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val upsampling2d_backward : arr -> int array -> arr -> arr
val max_pool2d_argmax : - ?padding:Owl_types.padding -> - arr -> - int array -> - int array -> - arr - * (int64, Stdlib.Bigarray.int64_elt, Stdlib.Bigarray.c_layout) - Stdlib.Bigarray.Genarray.t
val conv1d_ : - out:arr -> - ?padding:Owl_types.padding -> - arr -> - arr -> - int array -> - unit
val conv2d_ : - out:arr -> - ?padding:Owl_types.padding -> - arr -> - arr -> - int array -> - unit
val conv3d_ : - out:arr -> - ?padding:Owl_types.padding -> - arr -> - arr -> - int array -> - unit
val dilated_conv1d_ : - out:arr -> - ?padding:Owl_types.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val dilated_conv2d_ : - out:arr -> - ?padding:Owl_types.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val dilated_conv3d_ : - out:arr -> - ?padding:Owl_types.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val transpose_conv1d_ : - out:arr -> - ?padding:Owl_types.padding -> - arr -> - arr -> - int array -> - unit
val transpose_conv2d_ : - out:arr -> - ?padding:Owl_types.padding -> - arr -> - arr -> - int array -> - unit
val transpose_conv3d_ : - out:arr -> - ?padding:Owl_types.padding -> - arr -> - arr -> - int array -> - unit
val max_pool1d_ : - out:arr -> - ?padding:Owl_types.padding -> - arr -> - int array -> - int array -> - unit
val max_pool2d_ : - out:arr -> - ?padding:Owl_types.padding -> - arr -> - int array -> - int array -> - unit
val max_pool3d_ : - out:arr -> - ?padding:Owl_types.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool1d_ : - out:arr -> - ?padding:Owl_types.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool2d_ : - out:arr -> - ?padding:Owl_types.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool3d_ : - out:arr -> - ?padding:Owl_types.padding -> - arr -> - int array -> - int array -> - unit
val upsampling2d_ : out:arr -> arr -> int array -> unit
val conv1d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv1d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv2d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv2d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv3d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv3d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val dilated_conv1d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv1d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv2d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv2d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv3d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv3d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val transpose_conv1d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv1d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv2d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv2d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv3d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv3d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val max_pool1d_backward_ : - out:arr -> - Owl_types.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val max_pool2d_backward_ : - out:arr -> - Owl_types.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val max_pool3d_backward_ : - out:arr -> - Owl_types.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool1d_backward_ : - out:arr -> - Owl_types.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool2d_backward_ : - out:arr -> - Owl_types.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool3d_backward_ : - out:arr -> - Owl_types.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val upsampling2d_backward_ : out:arr -> arr -> int array -> arr -> unit
include Owl_dense_ndarray_intf.Distribution with type arr := arr
Stats & distribution functions
val uniform_rvs : a:arr -> b:arr -> n:int -> arr
val uniform_pdf : a:arr -> b:arr -> arr -> arr
val uniform_logpdf : a:arr -> b:arr -> arr -> arr
val uniform_cdf : a:arr -> b:arr -> arr -> arr
val uniform_logcdf : a:arr -> b:arr -> arr -> arr
val uniform_ppf : a:arr -> b:arr -> arr -> arr
val uniform_sf : a:arr -> b:arr -> arr -> arr
val uniform_logsf : a:arr -> b:arr -> arr -> arr
val uniform_isf : a:arr -> b:arr -> arr -> arr
val gaussian_rvs : mu:arr -> sigma:arr -> n:int -> arr
val gaussian_pdf : mu:arr -> sigma:arr -> arr -> arr
val gaussian_logpdf : mu:arr -> sigma:arr -> arr -> arr
val gaussian_cdf : mu:arr -> sigma:arr -> arr -> arr
val gaussian_logcdf : mu:arr -> sigma:arr -> arr -> arr
val gaussian_ppf : mu:arr -> sigma:arr -> arr -> arr
val gaussian_sf : mu:arr -> sigma:arr -> arr -> arr
val gaussian_logsf : mu:arr -> sigma:arr -> arr -> arr
val gaussian_isf : mu:arr -> sigma:arr -> arr -> arr
val exponential_rvs : lambda:arr -> n:int -> arr
val exponential_pdf : lambda:arr -> arr -> arr
val exponential_logpdf : lambda:arr -> arr -> arr
val exponential_cdf : lambda:arr -> arr -> arr
val exponential_logcdf : lambda:arr -> arr -> arr
val exponential_ppf : lambda:arr -> arr -> arr
val exponential_sf : lambda:arr -> arr -> arr
val exponential_logsf : lambda:arr -> arr -> arr
val exponential_isf : lambda:arr -> arr -> arr
val gamma_rvs : shape:arr -> scale:arr -> n:int -> arr
val gamma_pdf : shape:arr -> scale:arr -> arr -> arr
val gamma_logpdf : shape:arr -> scale:arr -> arr -> arr
val gamma_cdf : shape:arr -> scale:arr -> arr -> arr
val gamma_logcdf : shape:arr -> scale:arr -> arr -> arr
val gamma_ppf : shape:arr -> scale:arr -> arr -> arr
val gamma_sf : shape:arr -> scale:arr -> arr -> arr
val gamma_logsf : shape:arr -> scale:arr -> arr -> arr
val gamma_isf : shape:arr -> scale:arr -> arr -> arr
val beta_rvs : a:arr -> b:arr -> n:int -> arr
val beta_pdf : a:arr -> b:arr -> arr -> arr
val beta_logpdf : a:arr -> b:arr -> arr -> arr
val beta_cdf : a:arr -> b:arr -> arr -> arr
val beta_logcdf : a:arr -> b:arr -> arr -> arr
val beta_ppf : a:arr -> b:arr -> arr -> arr
val beta_sf : a:arr -> b:arr -> arr -> arr
val beta_logsf : a:arr -> b:arr -> arr -> arr
val beta_isf : a:arr -> b:arr -> arr -> arr
val chi2_rvs : df:arr -> n:int -> arr
val chi2_pdf : df:arr -> arr -> arr
val chi2_logpdf : df:arr -> arr -> arr
val chi2_cdf : df:arr -> arr -> arr
val chi2_logcdf : df:arr -> arr -> arr
val chi2_ppf : df:arr -> arr -> arr
val chi2_sf : df:arr -> arr -> arr
val chi2_logsf : df:arr -> arr -> arr
val chi2_isf : df:arr -> arr -> arr
val f_rvs : dfnum:arr -> dfden:arr -> n:int -> arr
val f_pdf : dfnum:arr -> dfden:arr -> arr -> arr
val f_logpdf : dfnum:arr -> dfden:arr -> arr -> arr
val f_cdf : dfnum:arr -> dfden:arr -> arr -> arr
val f_logcdf : dfnum:arr -> dfden:arr -> arr -> arr
val f_ppf : dfnum:arr -> dfden:arr -> arr -> arr
val f_sf : dfnum:arr -> dfden:arr -> arr -> arr
val f_logsf : dfnum:arr -> dfden:arr -> arr -> arr
val f_isf : dfnum:arr -> dfden:arr -> arr -> arr
val cauchy_rvs : loc:arr -> scale:arr -> n:int -> arr
val cauchy_pdf : loc:arr -> scale:arr -> arr -> arr
val cauchy_logpdf : loc:arr -> scale:arr -> arr -> arr
val cauchy_cdf : loc:arr -> scale:arr -> arr -> arr
val cauchy_logcdf : loc:arr -> scale:arr -> arr -> arr
val cauchy_ppf : loc:arr -> scale:arr -> arr -> arr
val cauchy_sf : loc:arr -> scale:arr -> arr -> arr
val cauchy_logsf : loc:arr -> scale:arr -> arr -> arr
val cauchy_isf : loc:arr -> scale:arr -> arr -> arr
val lomax_rvs : shape:arr -> scale:arr -> n:int -> arr
val lomax_pdf : shape:arr -> scale:arr -> arr -> arr
val lomax_logpdf : shape:arr -> scale:arr -> arr -> arr
val lomax_cdf : shape:arr -> scale:arr -> arr -> arr
val lomax_logcdf : shape:arr -> scale:arr -> arr -> arr
val lomax_ppf : shape:arr -> scale:arr -> arr -> arr
val lomax_sf : shape:arr -> scale:arr -> arr -> arr
val lomax_logsf : shape:arr -> scale:arr -> arr -> arr
val lomax_isf : shape:arr -> scale:arr -> arr -> arr
val weibull_rvs : shape:arr -> scale:arr -> n:int -> arr
val weibull_pdf : shape:arr -> scale:arr -> arr -> arr
val weibull_logpdf : shape:arr -> scale:arr -> arr -> arr
val weibull_cdf : shape:arr -> scale:arr -> arr -> arr
val weibull_logcdf : shape:arr -> scale:arr -> arr -> arr
val weibull_ppf : shape:arr -> scale:arr -> arr -> arr
val weibull_sf : shape:arr -> scale:arr -> arr -> arr
val weibull_logsf : shape:arr -> scale:arr -> arr -> arr
val weibull_isf : shape:arr -> scale:arr -> arr -> arr
val laplace_rvs : loc:arr -> scale:arr -> n:int -> arr
val laplace_pdf : loc:arr -> scale:arr -> arr -> arr
val laplace_logpdf : loc:arr -> scale:arr -> arr -> arr
val laplace_cdf : loc:arr -> scale:arr -> arr -> arr
val laplace_logcdf : loc:arr -> scale:arr -> arr -> arr
val laplace_ppf : loc:arr -> scale:arr -> arr -> arr
val laplace_sf : loc:arr -> scale:arr -> arr -> arr
val laplace_logsf : loc:arr -> scale:arr -> arr -> arr
val laplace_isf : loc:arr -> scale:arr -> arr -> arr
val gumbel1_rvs : a:arr -> b:arr -> n:int -> arr
val gumbel1_pdf : a:arr -> b:arr -> arr -> arr
val gumbel1_logpdf : a:arr -> b:arr -> arr -> arr
val gumbel1_cdf : a:arr -> b:arr -> arr -> arr
val gumbel1_logcdf : a:arr -> b:arr -> arr -> arr
val gumbel1_ppf : a:arr -> b:arr -> arr -> arr
val gumbel1_sf : a:arr -> b:arr -> arr -> arr
val gumbel1_logsf : a:arr -> b:arr -> arr -> arr
val gumbel1_isf : a:arr -> b:arr -> arr -> arr
val gumbel2_rvs : a:arr -> b:arr -> n:int -> arr
val gumbel2_pdf : a:arr -> b:arr -> arr -> arr
val gumbel2_logpdf : a:arr -> b:arr -> arr -> arr
val gumbel2_cdf : a:arr -> b:arr -> arr -> arr
val gumbel2_logcdf : a:arr -> b:arr -> arr -> arr
val gumbel2_ppf : a:arr -> b:arr -> arr -> arr
val gumbel2_sf : a:arr -> b:arr -> arr -> arr
val gumbel2_logsf : a:arr -> b:arr -> arr -> arr
val gumbel2_isf : a:arr -> b:arr -> arr -> arr
val logistic_rvs : loc:arr -> scale:arr -> n:int -> arr
val logistic_pdf : loc:arr -> scale:arr -> arr -> arr
val logistic_logpdf : loc:arr -> scale:arr -> arr -> arr
val logistic_cdf : loc:arr -> scale:arr -> arr -> arr
val logistic_logcdf : loc:arr -> scale:arr -> arr -> arr
val logistic_ppf : loc:arr -> scale:arr -> arr -> arr
val logistic_sf : loc:arr -> scale:arr -> arr -> arr
val logistic_logsf : loc:arr -> scale:arr -> arr -> arr
val logistic_isf : loc:arr -> scale:arr -> arr -> arr
val lognormal_rvs : mu:arr -> sigma:arr -> n:int -> arr
val lognormal_pdf : mu:arr -> sigma:arr -> arr -> arr
val lognormal_logpdf : mu:arr -> sigma:arr -> arr -> arr
val lognormal_cdf : mu:arr -> sigma:arr -> arr -> arr
val lognormal_logcdf : mu:arr -> sigma:arr -> arr -> arr
val lognormal_ppf : mu:arr -> sigma:arr -> arr -> arr
val lognormal_sf : mu:arr -> sigma:arr -> arr -> arr
val lognormal_logsf : mu:arr -> sigma:arr -> arr -> arr
val lognormal_isf : mu:arr -> sigma:arr -> arr -> arr
val rayleigh_rvs : sigma:arr -> n:int -> arr
val rayleigh_pdf : sigma:arr -> arr -> arr
val rayleigh_logpdf : sigma:arr -> arr -> arr
val rayleigh_cdf : sigma:arr -> arr -> arr
val rayleigh_logcdf : sigma:arr -> arr -> arr
val rayleigh_ppf : sigma:arr -> arr -> arr
val rayleigh_sf : sigma:arr -> arr -> arr
val rayleigh_logsf : sigma:arr -> arr -> arr
val rayleigh_isf : sigma:arr -> arr -> arr
include module type of struct include Owl_dense_ndarray.Operator end
include sig ... end
val (+$) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (-$) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (*$) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (/$) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t
val ($+) : - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t
val ($-) : - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t
val ($*) : - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t
val ($/) : - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (=) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - bool
val (!=) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - bool
val (<>) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - bool
val (>) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - bool
val (<) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - bool
val (>=) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - bool
val (<=) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - bool
include sig ... end
val (=$) : ('a, 'b) Owl_dense_ndarray_generic.t -> 'a -> bool
val (!=$) : ('a, 'b) Owl_dense_ndarray_generic.t -> 'a -> bool
val (<>$) : ('a, 'b) Owl_dense_ndarray_generic.t -> 'a -> bool
val (<$) : ('a, 'b) Owl_dense_ndarray_generic.t -> 'a -> bool
val (>$) : ('a, 'b) Owl_dense_ndarray_generic.t -> 'a -> bool
val (<=$) : ('a, 'b) Owl_dense_ndarray_generic.t -> 'a -> bool
val (>=$) : ('a, 'b) Owl_dense_ndarray_generic.t -> 'a -> bool
val (=.$) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (!=.$) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (<>.$) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (<.$) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (>.$) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (<=.$) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (>=.$) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (=~) : - ?eps:float -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - bool
val (=~$) : ?eps:float -> ('a, 'b) Owl_dense_ndarray_generic.t -> 'a -> bool
val (=~.) : - ?eps:float -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (=~.$) : - ?eps:float -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (%) : - (float, 'a) Owl_dense_ndarray_generic.t -> - (float, 'a) Owl_dense_ndarray_generic.t -> - (float, 'a) Owl_dense_ndarray_generic.t
val (%$) : - (float, 'a) Owl_dense_ndarray_generic.t -> - float -> - (float, 'a) Owl_dense_ndarray_generic.t
val (**) : - (float, 'a) Owl_dense_ndarray_generic.t -> - (float, 'a) Owl_dense_ndarray_generic.t -> - (float, 'a) Owl_dense_ndarray_generic.t
val ($**) : - float -> - (float, 'a) Owl_dense_ndarray_generic.t -> - (float, 'a) Owl_dense_ndarray_generic.t
val (**$) : - (float, 'a) Owl_dense_ndarray_generic.t -> - float -> - (float, 'a) Owl_dense_ndarray_generic.t
val (+=) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - unit
val (-=) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - unit
val (*=) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - unit
val (/=) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - unit
val (+$=) : ('a, 'b) Owl_dense_ndarray_generic.t -> 'a -> unit
val (-$=) : ('a, 'b) Owl_dense_ndarray_generic.t -> 'a -> unit
val (*$=) : ('a, 'b) Owl_dense_ndarray_generic.t -> 'a -> unit
val (/$=) : ('a, 'b) Owl_dense_ndarray_generic.t -> 'a -> unit
val (.!{;..}) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - Owl_types.index array -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (.!{;..}<-) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - Owl_types.index array -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - unit
val (.${}) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - int list -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (.${;..}) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - int list array -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (.${}<-) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - int list -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - unit
val (.${;..}<-) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - int list array -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - unit
include sig ... end
val (.%{}) : ('a, 'b) Owl_dense_ndarray_generic.t -> int -> 'a
val (.%{;..}) : ('a, 'b) Owl_dense_ndarray_generic.t -> int array -> 'a
val (.%{}<-) : ('a, 'b) Owl_dense_ndarray_generic.t -> int -> 'a -> unit
val (.%{;..}<-) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - int array -> - 'a -> - unit
val mpow : Owl_linalg_d.mat -> float -> Owl_linalg_d.mat
module Scalar = Owl_maths
module Mat : sig ... end
module Linalg : sig ... end
\ No newline at end of file diff --git a/owl/Owl_algodiff_primal_ops/S/Linalg/index.html b/owl/Owl_algodiff_primal_ops/S/Linalg/index.html deleted file mode 100644 index 0de92b6ad..000000000 --- a/owl/Owl_algodiff_primal_ops/S/Linalg/index.html +++ /dev/null @@ -1,25 +0,0 @@ - -Linalg (owl.Owl_algodiff_primal_ops.S.Linalg)

Module S.Linalg

include module type of struct include Owl_linalg.S end
include module type of struct include Owl_linalg_s end
type elt = float
type complex_mat = Owl_dense_matrix_c.mat
type int32_mat = (int32, Stdlib.Bigarray.int32_elt) Owl_dense_matrix_generic.t
include Owl_linalg_intf.Common - with type elt := elt - and type mat := mat - and type complex_mat := complex_mat - and type int32_mat := int32_mat
include Owl_base_linalg_intf.Common - with type elt := elt - with type mat := mat - with type complex_mat := complex_mat - with type int32_mat := int32_mat
Basic functions
val inv : mat -> mat
val det : mat -> elt
val logdet : mat -> elt
val is_triu : mat -> bool
val is_tril : mat -> bool
val is_symmetric : mat -> bool
val is_diag : mat -> bool
Factorisation
val svd : ?thin:bool -> mat -> mat * mat * mat
val chol : ?upper:bool -> mat -> mat
Linear system of equations
val linsolve : ?trans:bool -> ?typ:[ `n | `u | `l ] -> mat -> mat -> mat
val sylvester : mat -> mat -> mat -> mat
val lyapunov : mat -> mat -> mat
val discrete_lyapunov : - ?solver:[ `default | `direct | `bilinear ] -> - mat -> - mat -> - mat
Basic functions
val pinv : ?tol:float -> mat -> mat
val rank : ?tol:float -> mat -> int
val norm : ?p:float -> mat -> float
val vecnorm : ?p:float -> mat -> float
val cond : ?p:float -> mat -> float
val rcond : mat -> float
val is_posdef : mat -> bool
Factorisation
val lu : mat -> mat * mat * int32_mat
val svdvals : mat -> mat
val gsvd : mat -> mat -> mat * mat * mat * mat * mat * mat
val gsvdvals : mat -> mat -> mat
val schur : mat -> mat * mat * complex_mat
val schur_tz : mat -> mat * mat
val ordschur : select:int32_mat -> mat -> mat -> mat * mat * complex_mat
val qz : mat -> mat -> mat * mat * mat * mat * complex_mat
val ordqz : - select:int32_mat -> - mat -> - mat -> - mat -> - mat -> - mat * mat * mat * mat * complex_mat
val qzvals : mat -> mat -> complex_mat
val hess : mat -> mat * mat
Eigenvalues & eigenvectors
val eig : ?permute:bool -> ?scale:bool -> mat -> complex_mat * complex_mat
val eigvals : ?permute:bool -> ?scale:bool -> mat -> complex_mat
Linear system of equations
val null : mat -> mat
val triangular_solve : upper:bool -> ?trans:bool -> mat -> mat -> mat
val linreg : mat -> mat -> elt * elt
Low-level factorisation functions
val lufact : mat -> mat * int32_mat
val qrfact : ?pivot:bool -> mat -> mat * mat * int32_mat
val bkfact : - ?upper:bool -> - ?symmetric:bool -> - ?rook:bool -> - mat -> - mat * int32_mat
Matrix functions
val mpow : mat -> float -> mat
val expm : mat -> mat
val sinm : mat -> mat
val cosm : mat -> mat
val tanm : mat -> mat
val sincosm : mat -> mat * mat
val sinhm : mat -> mat
val coshm : mat -> mat
val tanhm : mat -> mat
val sinhcoshm : mat -> mat * mat
Helper functions
val select_ev : [ `LHP | `RHP | `UDI | `UDO ] -> mat -> int32_mat
val peakflops : ?n:int -> unit -> float
include Owl_linalg_intf.Real with type mat := mat and type elt := elt
include Owl_base_linalg_intf.Real with type mat := mat with type elt := elt
val care : ?diag_r:bool -> mat -> mat -> mat -> mat -> mat
val dare : ?diag_r:bool -> mat -> mat -> mat -> mat -> mat
val qr : mat -> mat * mat
val lq : mat -> mat * mat
\ No newline at end of file diff --git a/owl/Owl_algodiff_primal_ops/S/Mat/index.html b/owl/Owl_algodiff_primal_ops/S/Mat/index.html deleted file mode 100644 index eed6376dc..000000000 --- a/owl/Owl_algodiff_primal_ops/S/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl.Owl_algodiff_primal_ops.S.Mat)

Module S.Mat

\ No newline at end of file diff --git a/owl/Owl_algodiff_primal_ops/S/index.html b/owl/Owl_algodiff_primal_ops/S/index.html deleted file mode 100644 index 7109a2d7c..000000000 --- a/owl/Owl_algodiff_primal_ops/S/index.html +++ /dev/null @@ -1,579 +0,0 @@ - -S (owl.Owl_algodiff_primal_ops.S)

Module Owl_algodiff_primal_ops.S

include module type of struct include Owl_dense_ndarray.S end
include module type of struct include Owl_dense_ndarray_s end
type elt = float
type arr = - (float, Stdlib.Bigarray.float32_elt, Stdlib.Bigarray.c_layout) - Stdlib.Bigarray.Genarray.t
include Owl_dense_ndarray_intf.Common with type elt := elt and type arr := arr
include Owl_base_dense_ndarray_intf.Common - with type elt := elt - with type arr := arr
val empty : int array -> arr
val zeros : int array -> arr
val ones : int array -> arr
val create : int array -> elt -> arr
val init : int array -> (int -> elt) -> arr
val init_nd : int array -> (int array -> elt) -> arr
val sequential : ?a:elt -> ?step:elt -> int array -> arr
val uniform : ?a:elt -> ?b:elt -> int array -> arr
val gaussian : ?mu:elt -> ?sigma:elt -> int array -> arr
val bernoulli : ?p:float -> int array -> arr
val shape : arr -> int array
val numel : arr -> int
val strides : arr -> int array

Refer to :doc:`owl_dense_ndarray_generic`

val slice_size : arr -> int array

Refer to :doc:`owl_dense_ndarray_generic`

val get : arr -> int array -> elt
val set : arr -> int array -> elt -> unit
val get_slice : int list list -> arr -> arr
val set_slice : int list list -> arr -> arr -> unit
val copy : arr -> arr
val copy_ : out:arr -> arr -> unit
val reset : arr -> unit
val reshape : arr -> int array -> arr
val flatten : arr -> arr
val reverse : arr -> arr
val tile : arr -> int array -> arr
val repeat : arr -> int array -> arr
val concatenate : ?axis:int -> arr array -> arr
val stack : ?axis:int -> arr array -> arr
val squeeze : ?axis:int array -> arr -> arr
val expand : ?hi:bool -> arr -> int -> arr
val split : ?axis:int -> int array -> arr -> arr array
val draw : ?axis:int -> arr -> int -> arr * int array
val pad : ?v:elt -> int list list -> arr -> arr
val one_hot : int -> arr -> arr
val print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(elt -> string) -> - arr -> - unit
val abs : arr -> arr
val neg : arr -> arr
val floor : arr -> arr
val ceil : arr -> arr
val round : arr -> arr
val sqr : arr -> arr
val sqrt : arr -> arr
val log : arr -> arr
val log2 : arr -> arr
val log10 : arr -> arr
val exp : arr -> arr
val sin : arr -> arr
val cos : arr -> arr
val tan : arr -> arr
val sinh : arr -> arr
val cosh : arr -> arr
val tanh : arr -> arr
val asin : arr -> arr
val acos : arr -> arr
val atan : arr -> arr
val asinh : arr -> arr
val acosh : arr -> arr
val atanh : arr -> arr
val min : ?axis:int -> ?keep_dims:bool -> arr -> arr
val max : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum_reduce : ?axis:int array -> arr -> arr
val min' : arr -> elt
val max' : arr -> elt
val sum' : arr -> elt
val pow : arr -> arr -> arr
val scalar_pow : elt -> arr -> arr
val pow_scalar : arr -> elt -> arr
val add : arr -> arr -> arr
val sub : arr -> arr -> arr
val mul : arr -> arr -> arr
val div : arr -> arr -> arr
val add_scalar : arr -> elt -> arr
val sub_scalar : arr -> elt -> arr
val mul_scalar : arr -> elt -> arr
val div_scalar : arr -> elt -> arr
val scalar_add : elt -> arr -> arr
val scalar_sub : elt -> arr -> arr
val scalar_mul : elt -> arr -> arr
val scalar_div : elt -> arr -> arr
val fma : arr -> arr -> arr -> arr
Iterate array elements
val iteri : (int -> elt -> unit) -> arr -> unit
val iter : (elt -> unit) -> arr -> unit
val mapi : (int -> elt -> elt) -> arr -> arr
val map : (elt -> elt) -> arr -> arr
val filteri : (int -> elt -> bool) -> arr -> int array
val filter : (elt -> bool) -> arr -> int array
val foldi : ?axis:int -> (int -> elt -> elt -> elt) -> elt -> arr -> arr
val fold : ?axis:int -> (elt -> elt -> elt) -> elt -> arr -> arr
val scani : ?axis:int -> (int -> elt -> elt -> elt) -> arr -> arr
val scan : ?axis:int -> (elt -> elt -> elt) -> arr -> arr
Examination & Comparison
val exists : (elt -> bool) -> arr -> bool
val not_exists : (elt -> bool) -> arr -> bool
val for_all : (elt -> bool) -> arr -> bool
val is_zero : arr -> bool
val is_positive : arr -> bool
val is_negative : arr -> bool
val is_nonpositive : arr -> bool
val is_nonnegative : arr -> bool
val is_normal : arr -> bool
val not_nan : arr -> bool
val not_inf : arr -> bool
val equal : arr -> arr -> bool
val not_equal : arr -> arr -> bool
val greater : arr -> arr -> bool
val less : arr -> arr -> bool
val greater_equal : arr -> arr -> bool
val less_equal : arr -> arr -> bool
val elt_equal : arr -> arr -> arr
val elt_not_equal : arr -> arr -> arr
val elt_less : arr -> arr -> arr
val elt_greater : arr -> arr -> arr
val elt_less_equal : arr -> arr -> arr
val elt_greater_equal : arr -> arr -> arr
val equal_scalar : arr -> elt -> bool
val not_equal_scalar : arr -> elt -> bool
val less_scalar : arr -> elt -> bool
val greater_scalar : arr -> elt -> bool
val less_equal_scalar : arr -> elt -> bool
val greater_equal_scalar : arr -> elt -> bool
val elt_equal_scalar : arr -> elt -> arr
val elt_not_equal_scalar : arr -> elt -> arr
val elt_less_scalar : arr -> elt -> arr
val elt_greater_scalar : arr -> elt -> arr
val elt_less_equal_scalar : arr -> elt -> arr
val elt_greater_equal_scalar : arr -> elt -> arr
val row_num : arr -> int
val col_num : arr -> int
val row : arr -> int -> arr
val rows : arr -> int array -> arr
val copy_row_to : arr -> arr -> int -> unit
val copy_col_to : arr -> arr -> int -> unit
val diag : ?k:int -> arr -> arr
val transpose : ?axis:int array -> arr -> arr
val to_rows : arr -> arr array
val of_rows : arr array -> arr
val to_cols : arr -> arr array
val of_cols : arr array -> arr
val of_array : elt array -> int array -> arr
val of_arrays : elt array array -> arr
Create N-dimensional array
val linspace : elt -> elt -> int -> arr

linspace k 0. 9. 10 ...

val logspace : ?base:float -> elt -> elt -> int -> arr

logspace k 0. 9. 10 ...

val unit_basis : int -> int -> arr

unit_basis k n i returns a unit basis vector with ith element set to 1.

Obtain basic properties
val num_dims : arr -> int
val nth_dim : arr -> int -> int
val nnz : arr -> int
val density : arr -> float
val size_in_bytes : arr -> int
val same_shape : arr -> arr -> bool
val same_data : arr -> arr -> bool
val ind : arr -> int -> int array
val i1d : arr -> int array -> int
Manipulate a N-dimensional array
val get_index : arr -> int array array -> elt array
val set_index : arr -> int array array -> elt array -> unit
val get_fancy : Owl_types.index list -> arr -> arr
val set_fancy : Owl_types.index list -> arr -> arr -> unit
val sub_left : arr -> int -> int -> arr
val sub_ndarray : int array -> arr -> arr array
val slice_left : arr -> int array -> arr
val fill : arr -> elt -> unit
val resize : ?head:bool -> arr -> int array -> arr
val flip : ?axis:int -> arr -> arr
val rotate : arr -> int -> arr
val swap : int -> int -> arr -> arr
val concat_vertical : arr -> arr -> arr
val concat_horizontal : arr -> arr -> arr
val concat_vh : arr array array -> arr
val split_vh : (int * int) array array -> arr -> arr array array
val dropout : ?rate:float -> arr -> arr
val top : arr -> int -> int array array
val bottom : arr -> int -> int array array
val sort : arr -> arr
val sort1 : ?axis:int -> arr -> arr
val argsort : - arr -> - (int64, Stdlib.Bigarray.int64_elt, Stdlib.Bigarray.c_layout) - Stdlib.Bigarray.Genarray.t
val mmap : Unix.file_descr -> ?pos:int64 -> bool -> int array -> arr
Iterate array elements
val iter2i : (int -> elt -> elt -> unit) -> arr -> arr -> unit
val iter2 : (elt -> elt -> unit) -> arr -> arr -> unit
val map2i : (int -> elt -> elt -> elt) -> arr -> arr -> arr
val map2 : (elt -> elt -> elt) -> arr -> arr -> arr
val iteri_nd : (int array -> elt -> unit) -> arr -> unit
val mapi_nd : (int array -> elt -> elt) -> arr -> arr
val foldi_nd : - ?axis:int -> - (int array -> elt -> elt -> elt) -> - elt -> - arr -> - arr
val scani_nd : ?axis:int -> (int array -> elt -> elt -> elt) -> arr -> arr
val filteri_nd : (int array -> elt -> bool) -> arr -> int array array
val iter2i_nd : (int array -> elt -> elt -> unit) -> arr -> arr -> unit
val map2i_nd : (int array -> elt -> elt -> elt) -> arr -> arr -> arr
val iteri_slice : ?axis:int -> (int -> arr -> unit) -> arr -> unit
val iter_slice : ?axis:int -> (arr -> unit) -> arr -> unit
val mapi_slice : ?axis:int -> (int -> arr -> 'c) -> arr -> 'c array
val map_slice : ?axis:int -> (arr -> 'c) -> arr -> 'c array
val filteri_slice : ?axis:int -> (int -> arr -> bool) -> arr -> arr array
val filter_slice : ?axis:int -> (arr -> bool) -> arr -> arr array
val foldi_slice : ?axis:int -> (int -> 'c -> arr -> 'c) -> 'c -> arr -> 'c
val fold_slice : ?axis:int -> ('c -> arr -> 'c) -> 'c -> arr -> 'c
Examine array elements or compare two arrays
Input/Output functions
val to_array : arr -> elt array
val save : out:string -> arr -> unit
val load : string -> arr
val save_npy : out:string -> arr -> unit
val load_npy : string -> arr
Unary mathematical operations
val prod : ?axis:int -> ?keep_dims:bool -> arr -> arr
val prod' : arr -> elt
val mean : ?axis:int -> ?keep_dims:bool -> arr -> arr
val mean' : arr -> elt
val median' : arr -> elt
val median : ?axis:int -> ?keep_dims:bool -> arr -> arr
val var : ?axis:int -> ?keep_dims:bool -> arr -> arr
val var' : arr -> elt
val std : ?axis:int -> ?keep_dims:bool -> arr -> arr
val std' : arr -> elt
val sem : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sem' : arr -> elt
val minmax : ?axis:int -> ?keep_dims:bool -> arr -> arr * arr
val minmax' : arr -> elt * elt
val min_i : arr -> elt * int array
val max_i : arr -> elt * int array
val minmax_i : arr -> (elt * int array) * (elt * int array)
val abs2 : arr -> arr
val conj : arr -> arr
val reci : arr -> arr
val reci_tol : ?tol:elt -> arr -> arr
val cbrt : arr -> arr
val exp2 : arr -> arr
val exp10 : arr -> arr
val expm1 : arr -> arr
val log1p : arr -> arr
val trunc : arr -> arr
val fix : arr -> arr
val modf : arr -> arr * arr
val l1norm : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l2norm : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l2norm_sqr : ?axis:int -> ?keep_dims:bool -> arr -> arr
val vecnorm : ?axis:int -> ?p:float -> ?keep_dims:bool -> arr -> arr
val vecnorm' : ?p:float -> arr -> elt
val cumsum : ?axis:int -> arr -> arr
val cumprod : ?axis:int -> arr -> arr
val cummin : ?axis:int -> arr -> arr
val cummax : ?axis:int -> arr -> arr
val diff : ?axis:int -> ?n:int -> arr -> arr
val lgamma : arr -> arr
Binary mathematical operations
val min2 : arr -> arr -> arr
val max2 : arr -> arr -> arr
val ssqr' : arr -> elt -> elt
val ssqr_diff' : arr -> arr -> elt
Tensor Calculus
val contract1 : (int * int) array -> arr -> arr
val contract2 : (int * int) array -> arr -> arr -> arr
Experimental functions
val slide : ?axis:int -> ?ofs:int -> ?step:int -> window:int -> arr -> arr
Functions of in-place modification
val create_ : out:arr -> elt -> unit
val uniform_ : ?a:elt -> ?b:elt -> out:arr -> unit
val gaussian_ : ?mu:elt -> ?sigma:elt -> out:arr -> unit
val sequential_ : ?a:elt -> ?step:elt -> out:arr -> unit
val bernoulli_ : ?p:float -> out:arr -> unit
val zeros_ : out:arr -> unit
val ones_ : out:arr -> unit
val sort_ : arr -> unit
val one_hot_ : out:arr -> int -> arr -> unit
val get_fancy_ : out:arr -> Owl_types.index list -> arr -> unit
val set_fancy_ : out:arr -> Owl_types.index list -> arr -> arr -> unit
val get_slice_ : out:arr -> int list list -> arr -> unit
val set_slice_ : out:arr -> int list list -> arr -> arr -> unit
val reshape_ : out:arr -> arr -> unit
val reverse_ : out:arr -> arr -> unit
val transpose_ : out:arr -> ?axis:int array -> arr -> unit
val repeat_ : out:arr -> arr -> int array -> unit
val tile_ : out:arr -> arr -> int array -> unit
val pad_ : out:arr -> ?v:elt -> int list list -> arr -> unit
val sum_ : out:arr -> axis:int -> arr -> unit
val min_ : out:arr -> axis:int -> arr -> unit
val max_ : out:arr -> axis:int -> arr -> unit
val add_ : ?out:arr -> arr -> arr -> unit
val sub_ : ?out:arr -> arr -> arr -> unit
val mul_ : ?out:arr -> arr -> arr -> unit
val div_ : ?out:arr -> arr -> arr -> unit
val pow_ : ?out:arr -> arr -> arr -> unit
val atan2_ : ?out:arr -> arr -> arr -> unit
val hypot_ : ?out:arr -> arr -> arr -> unit
val fmod_ : ?out:arr -> arr -> arr -> unit
val min2_ : ?out:arr -> arr -> arr -> unit
val max2_ : ?out:arr -> arr -> arr -> unit
val add_scalar_ : ?out:arr -> arr -> elt -> unit
val sub_scalar_ : ?out:arr -> arr -> elt -> unit
val mul_scalar_ : ?out:arr -> arr -> elt -> unit
val div_scalar_ : ?out:arr -> arr -> elt -> unit
val pow_scalar_ : ?out:arr -> arr -> elt -> unit
val atan2_scalar_ : ?out:arr -> arr -> elt -> unit
val fmod_scalar_ : ?out:arr -> arr -> elt -> unit
val scalar_add_ : ?out:arr -> elt -> arr -> unit
val scalar_sub_ : ?out:arr -> elt -> arr -> unit
val scalar_mul_ : ?out:arr -> elt -> arr -> unit
val scalar_div_ : ?out:arr -> elt -> arr -> unit
val scalar_pow_ : ?out:arr -> elt -> arr -> unit
val scalar_atan2_ : ?out:arr -> elt -> arr -> unit
val scalar_fmod_ : ?out:arr -> elt -> arr -> unit
val fma_ : ?out:arr -> arr -> arr -> arr -> unit
val clip_by_value_ : ?out:arr -> ?amin:elt -> ?amax:elt -> arr -> unit
val clip_by_l2norm_ : ?out:arr -> elt -> arr -> unit
val dot_ : - ?transa:bool -> - ?transb:bool -> - ?alpha:elt -> - ?beta:elt -> - c:arr -> - arr -> - arr -> - unit
val conj_ : ?out:arr -> arr -> unit
val abs_ : ?out:arr -> arr -> unit
val neg_ : ?out:arr -> arr -> unit
val reci_ : ?out:arr -> arr -> unit
val signum_ : ?out:arr -> arr -> unit
val sqr_ : ?out:arr -> arr -> unit
val sqrt_ : ?out:arr -> arr -> unit
val cbrt_ : ?out:arr -> arr -> unit
val exp_ : ?out:arr -> arr -> unit
val exp2_ : ?out:arr -> arr -> unit
val exp10_ : ?out:arr -> arr -> unit
val expm1_ : ?out:arr -> arr -> unit
val log_ : ?out:arr -> arr -> unit
val log2_ : ?out:arr -> arr -> unit
val log10_ : ?out:arr -> arr -> unit
val log1p_ : ?out:arr -> arr -> unit
val sin_ : ?out:arr -> arr -> unit
val cos_ : ?out:arr -> arr -> unit
val tan_ : ?out:arr -> arr -> unit
val asin_ : ?out:arr -> arr -> unit
val acos_ : ?out:arr -> arr -> unit
val atan_ : ?out:arr -> arr -> unit
val sinh_ : ?out:arr -> arr -> unit
val cosh_ : ?out:arr -> arr -> unit
val tanh_ : ?out:arr -> arr -> unit
val asinh_ : ?out:arr -> arr -> unit
val acosh_ : ?out:arr -> arr -> unit
val atanh_ : ?out:arr -> arr -> unit
val floor_ : ?out:arr -> arr -> unit
val ceil_ : ?out:arr -> arr -> unit
val round_ : ?out:arr -> arr -> unit
val trunc_ : ?out:arr -> arr -> unit
val fix_ : ?out:arr -> arr -> unit
val erf_ : ?out:arr -> arr -> unit
val erfc_ : ?out:arr -> arr -> unit
val relu_ : ?out:arr -> arr -> unit
val softplus_ : ?out:arr -> arr -> unit
val softsign_ : ?out:arr -> arr -> unit
val sigmoid_ : ?out:arr -> arr -> unit
val softmax_ : ?out:arr -> ?axis:int -> arr -> unit
val cumsum_ : ?out:arr -> ?axis:int -> arr -> unit
val cumprod_ : ?out:arr -> ?axis:int -> arr -> unit
val cummin_ : ?out:arr -> ?axis:int -> arr -> unit
val cummax_ : ?out:arr -> ?axis:int -> arr -> unit
val dropout_ : ?out:arr -> ?rate:float -> arr -> unit
val elt_equal_ : ?out:arr -> arr -> arr -> unit
val elt_not_equal_ : ?out:arr -> arr -> arr -> unit
val elt_less_ : ?out:arr -> arr -> arr -> unit
val elt_greater_ : ?out:arr -> arr -> arr -> unit
val elt_less_equal_ : ?out:arr -> arr -> arr -> unit
val elt_greater_equal_ : ?out:arr -> arr -> arr -> unit
val elt_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_not_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_less_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_greater_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_less_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_greater_equal_scalar_ : ?out:arr -> arr -> elt -> unit

Matrix functions

val col : arr -> int -> arr
val cols : arr -> int array -> arr
val to_arrays : arr -> elt array array
val draw_rows : ?replacement:bool -> arr -> int -> arr * int array
val draw_cols : ?replacement:bool -> arr -> int -> arr * int array
val draw_rows2 : - ?replacement:bool -> - arr -> - arr -> - int -> - arr * arr * int array
val draw_cols2 : - ?replacement:bool -> - arr -> - arr -> - int -> - arr * arr * int array
include Owl_dense_ndarray_intf.Real with type elt := elt and type arr := arr
include Owl_base_dense_ndarray_intf.Real - with type elt := elt - with type arr := arr
val sum_slices : ?axis:int -> arr -> arr
val signum : arr -> arr
val relu : arr -> arr
val dawsn : arr -> arr
val l1norm' : arr -> elt
val l2norm' : arr -> elt
val l2norm_sqr' : arr -> elt
val clip_by_value : ?amin:elt -> ?amax:elt -> arr -> arr
val clip_by_l2norm : elt -> arr -> arr
val atan2 : arr -> arr -> arr
val approx_equal : ?eps:float -> arr -> arr -> bool
val approx_equal_scalar : ?eps:float -> arr -> float -> bool
val approx_elt_equal : ?eps:float -> arr -> arr -> arr
val approx_elt_equal_scalar : ?eps:float -> arr -> float -> arr
val dot : arr -> arr -> arr
val trace : arr -> elt
Helper functions
val float_to_elt : float -> elt
val elt_to_float : elt -> float
Real operations
val i0 : arr -> arr
val i0e : arr -> arr
val i1 : arr -> arr
val i1e : arr -> arr
val iv : v:arr -> arr -> arr
val scalar_iv : v:elt -> arr -> arr
val iv_scalar : v:arr -> elt -> arr
val j0 : arr -> arr
val j1 : arr -> arr
val jv : v:arr -> arr -> arr
val scalar_jv : v:elt -> arr -> arr
val jv_scalar : v:arr -> elt -> arr
val erf : arr -> arr
val erfc : arr -> arr
val logistic : arr -> arr
val elu : ?alpha:elt -> arr -> arr
val leaky_relu : ?alpha:elt -> arr -> arr
val softplus : arr -> arr
val softsign : arr -> arr
val softmax : ?axis:int -> arr -> arr
val sigmoid : arr -> arr
val log_sum_exp' : arr -> float
val log_sum_exp : ?axis:int -> ?keep_dims:bool -> arr -> arr
val scalar_atan2 : elt -> arr -> arr
val atan2_scalar : arr -> elt -> arr
val hypot : arr -> arr -> arr
val fmod : arr -> arr -> arr
val fmod_scalar : arr -> elt -> arr
val scalar_fmod : elt -> arr -> arr
val cross_entropy' : arr -> arr -> float
val fused_adagrad_ : ?out:arr -> rate:float -> eps:float -> arr -> unit
val poisson : mu:elt -> int array -> arr
val poisson_ : mu:elt -> out:arr -> unit
include Owl_dense_ndarray_intf.NN with type arr := arr
include Owl_base_dense_ndarray_intf.NN with type arr := arr
val conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val dilated_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val transpose_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val max_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val upsampling2d : arr -> int array -> arr
val conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val dilated_conv1d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv1d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val transpose_conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val max_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val upsampling2d_backward : arr -> int array -> arr -> arr
val max_pool2d_argmax : - ?padding:Owl_types.padding -> - arr -> - int array -> - int array -> - arr - * (int64, Stdlib.Bigarray.int64_elt, Stdlib.Bigarray.c_layout) - Stdlib.Bigarray.Genarray.t
val conv1d_ : - out:arr -> - ?padding:Owl_types.padding -> - arr -> - arr -> - int array -> - unit
val conv2d_ : - out:arr -> - ?padding:Owl_types.padding -> - arr -> - arr -> - int array -> - unit
val conv3d_ : - out:arr -> - ?padding:Owl_types.padding -> - arr -> - arr -> - int array -> - unit
val dilated_conv1d_ : - out:arr -> - ?padding:Owl_types.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val dilated_conv2d_ : - out:arr -> - ?padding:Owl_types.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val dilated_conv3d_ : - out:arr -> - ?padding:Owl_types.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val transpose_conv1d_ : - out:arr -> - ?padding:Owl_types.padding -> - arr -> - arr -> - int array -> - unit
val transpose_conv2d_ : - out:arr -> - ?padding:Owl_types.padding -> - arr -> - arr -> - int array -> - unit
val transpose_conv3d_ : - out:arr -> - ?padding:Owl_types.padding -> - arr -> - arr -> - int array -> - unit
val max_pool1d_ : - out:arr -> - ?padding:Owl_types.padding -> - arr -> - int array -> - int array -> - unit
val max_pool2d_ : - out:arr -> - ?padding:Owl_types.padding -> - arr -> - int array -> - int array -> - unit
val max_pool3d_ : - out:arr -> - ?padding:Owl_types.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool1d_ : - out:arr -> - ?padding:Owl_types.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool2d_ : - out:arr -> - ?padding:Owl_types.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool3d_ : - out:arr -> - ?padding:Owl_types.padding -> - arr -> - int array -> - int array -> - unit
val upsampling2d_ : out:arr -> arr -> int array -> unit
val conv1d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv1d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv2d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv2d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv3d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv3d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val dilated_conv1d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv1d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv2d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv2d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv3d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv3d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val transpose_conv1d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv1d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv2d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv2d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv3d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv3d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val max_pool1d_backward_ : - out:arr -> - Owl_types.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val max_pool2d_backward_ : - out:arr -> - Owl_types.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val max_pool3d_backward_ : - out:arr -> - Owl_types.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool1d_backward_ : - out:arr -> - Owl_types.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool2d_backward_ : - out:arr -> - Owl_types.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool3d_backward_ : - out:arr -> - Owl_types.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val upsampling2d_backward_ : out:arr -> arr -> int array -> arr -> unit
include module type of struct include Owl_dense_ndarray.Operator end
include sig ... end
val (+$) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (-$) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (*$) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (/$) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t
val ($+) : - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t
val ($-) : - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t
val ($*) : - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t
val ($/) : - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (=) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - bool
val (!=) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - bool
val (<>) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - bool
val (>) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - bool
val (<) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - bool
val (>=) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - bool
val (<=) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - bool
include sig ... end
val (=$) : ('a, 'b) Owl_dense_ndarray_generic.t -> 'a -> bool
val (!=$) : ('a, 'b) Owl_dense_ndarray_generic.t -> 'a -> bool
val (<>$) : ('a, 'b) Owl_dense_ndarray_generic.t -> 'a -> bool
val (<$) : ('a, 'b) Owl_dense_ndarray_generic.t -> 'a -> bool
val (>$) : ('a, 'b) Owl_dense_ndarray_generic.t -> 'a -> bool
val (<=$) : ('a, 'b) Owl_dense_ndarray_generic.t -> 'a -> bool
val (>=$) : ('a, 'b) Owl_dense_ndarray_generic.t -> 'a -> bool
val (=.$) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (!=.$) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (<>.$) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (<.$) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (>.$) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (<=.$) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (>=.$) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (=~) : - ?eps:float -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - bool
val (=~$) : ?eps:float -> ('a, 'b) Owl_dense_ndarray_generic.t -> 'a -> bool
val (=~.) : - ?eps:float -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (=~.$) : - ?eps:float -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (%) : - (float, 'a) Owl_dense_ndarray_generic.t -> - (float, 'a) Owl_dense_ndarray_generic.t -> - (float, 'a) Owl_dense_ndarray_generic.t
val (%$) : - (float, 'a) Owl_dense_ndarray_generic.t -> - float -> - (float, 'a) Owl_dense_ndarray_generic.t
val (**) : - (float, 'a) Owl_dense_ndarray_generic.t -> - (float, 'a) Owl_dense_ndarray_generic.t -> - (float, 'a) Owl_dense_ndarray_generic.t
val ($**) : - float -> - (float, 'a) Owl_dense_ndarray_generic.t -> - (float, 'a) Owl_dense_ndarray_generic.t
val (**$) : - (float, 'a) Owl_dense_ndarray_generic.t -> - float -> - (float, 'a) Owl_dense_ndarray_generic.t
val (+=) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - unit
val (-=) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - unit
val (*=) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - unit
val (/=) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - unit
val (+$=) : ('a, 'b) Owl_dense_ndarray_generic.t -> 'a -> unit
val (-$=) : ('a, 'b) Owl_dense_ndarray_generic.t -> 'a -> unit
val (*$=) : ('a, 'b) Owl_dense_ndarray_generic.t -> 'a -> unit
val (/$=) : ('a, 'b) Owl_dense_ndarray_generic.t -> 'a -> unit
val (.!{;..}) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - Owl_types.index array -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (.!{;..}<-) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - Owl_types.index array -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - unit
val (.${}) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - int list -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (.${;..}) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - int list array -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (.${}<-) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - int list -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - unit
val (.${;..}<-) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - int list array -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - unit
include sig ... end
val (.%{}) : ('a, 'b) Owl_dense_ndarray_generic.t -> int -> 'a
val (.%{;..}) : ('a, 'b) Owl_dense_ndarray_generic.t -> int array -> 'a
val (.%{}<-) : ('a, 'b) Owl_dense_ndarray_generic.t -> int -> 'a -> unit
val (.%{;..}<-) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - int array -> - 'a -> - unit
val mpow : Owl_linalg_s.mat -> float -> Owl_linalg_s.mat
module Scalar = Owl_maths
module Mat : sig ... end
module Linalg : sig ... end
\ No newline at end of file diff --git a/owl/Owl_cblas/.dummy b/owl/Owl_cblas/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl/Owl_cblas_basic/.dummy b/owl/Owl_cblas_basic/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl/Owl_cblas_generated/.dummy b/owl/Owl_cblas_generated/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl/Owl_cluster/.dummy b/owl/Owl_cluster/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl/Owl_core_types/.dummy b/owl/Owl_core_types/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl/Owl_dataset/.dummy b/owl/Owl_dataset/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl/Owl_dense/.dummy b/owl/Owl_dense/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl/Owl_dense_matrix/.dummy b/owl/Owl_dense_matrix/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl/Owl_dense_matrix/C/index.html b/owl/Owl_dense_matrix/C/index.html deleted file mode 100644 index 921b5ca75..000000000 --- a/owl/Owl_dense_matrix/C/index.html +++ /dev/null @@ -1,215 +0,0 @@ - -C (owl.Owl_dense_matrix.C)

Module Owl_dense_matrix.C

include module type of struct include Owl_dense_matrix_c end
type elt = Stdlib.Complex.t
type mat = - (Stdlib.Complex.t, Stdlib.Bigarray.complex32_elt) Owl_dense_matrix_generic.t
type cast_mat = (float, Stdlib.Bigarray.float32_elt) Owl_dense_matrix_generic.t
include Owl_dense_matrix_intf.Common with type elt := elt and type mat := mat
Create dense matrices
val empty : int -> int -> mat
val create : int -> int -> elt -> mat
val init : int -> int -> (int -> elt) -> mat
val init_2d : int -> int -> (int -> int -> elt) -> mat
val zeros : int -> int -> mat
val ones : int -> int -> mat
val eye : int -> mat
val sequential : ?a:elt -> ?step:elt -> int -> int -> mat
val uniform : ?a:elt -> ?b:elt -> int -> int -> mat
val gaussian : ?mu:elt -> ?sigma:elt -> int -> int -> mat
val bernoulli : ?p:float -> int -> int -> mat
val unit_basis : int -> int -> mat
val diagm : ?k:int -> mat -> mat
val triu : ?k:int -> mat -> mat
val tril : ?k:int -> mat -> mat
val symmetric : ?upper:bool -> mat -> mat
val bidiagonal : ?upper:bool -> mat -> mat -> mat
val toeplitz : ?c:mat -> mat -> mat
val hankel : ?r:mat -> mat -> mat
val hadamard : int -> mat
val magic : int -> mat
Dense row vectors and meshgrids
val vector : int -> mat
val vector_zeros : int -> mat
val vector_ones : int -> mat
val vector_uniform : int -> mat
val linspace : elt -> elt -> int -> mat
val logspace : ?base:float -> elt -> elt -> int -> mat
val meshgrid : elt -> elt -> elt -> elt -> int -> int -> mat * mat
val meshup : mat -> mat -> mat * mat
Obtain the basic properties of a matrix
val shape : mat -> int * int
val row_num : mat -> int
val col_num : mat -> int
val numel : mat -> int
val nnz : mat -> int
val density : mat -> float
val size_in_bytes : mat -> int
val same_shape : mat -> mat -> bool
val same_data : mat -> mat -> bool
Manipulate a matrix
val get : mat -> int -> int -> elt
val set : mat -> int -> int -> elt -> unit
val get_index : mat -> int array array -> elt array
val set_index : mat -> int array array -> elt array -> unit
val get_fancy : Owl_types.index list -> mat -> mat
val set_fancy : Owl_types.index list -> mat -> mat -> unit
val get_slice : int list list -> mat -> mat
val set_slice : int list list -> mat -> mat -> unit
val row : mat -> int -> mat
val col : mat -> int -> mat
val rows : mat -> int array -> mat
val cols : mat -> int array -> mat
val resize : ?head:bool -> mat -> int array -> mat
val reshape : mat -> int array -> mat
val flatten : mat -> mat
val reverse : mat -> mat
val flip : ?axis:int -> mat -> mat
val rotate : mat -> int -> mat
val reset : mat -> unit
val fill : mat -> elt -> unit
val copy : mat -> mat
val copy_row_to : mat -> mat -> int -> unit
val copy_col_to : mat -> mat -> int -> unit
val concat_vertical : mat -> mat -> mat
val concat_horizontal : mat -> mat -> mat
val concat_vh : mat array array -> mat
val concatenate : ?axis:int -> mat array -> mat
val split : ?axis:int -> int array -> mat -> mat array
val split_vh : (int * int) array array -> mat -> mat array array
val transpose : mat -> mat
val ctranspose : mat -> mat
val diag : ?k:int -> mat -> mat
val swap_rows : mat -> int -> int -> unit
val swap_cols : mat -> int -> int -> unit
val tile : mat -> int array -> mat
val repeat : mat -> int array -> mat
val pad : ?v:elt -> int list list -> mat -> mat
val dropout : ?rate:float -> mat -> mat
val top : mat -> int -> int array array
val bottom : mat -> int -> int array array
val sort : mat -> mat
val argsort : - mat -> - (int64, Stdlib.Bigarray.int64_elt, Stdlib.Bigarray.c_layout) - Stdlib.Bigarray.Genarray.t
Iterate elements, columns, and rows.
val iteri : (int -> elt -> unit) -> mat -> unit
val iter : (elt -> unit) -> mat -> unit
val mapi : (int -> elt -> elt) -> mat -> mat
val map : (elt -> elt) -> mat -> mat
val foldi : ?axis:int -> (int -> elt -> elt -> elt) -> elt -> mat -> mat
val fold : ?axis:int -> (elt -> elt -> elt) -> elt -> mat -> mat
val scani : ?axis:int -> (int -> elt -> elt -> elt) -> mat -> mat
val scan : ?axis:int -> (elt -> elt -> elt) -> mat -> mat
val filteri : (int -> elt -> bool) -> mat -> int array
val filter : (elt -> bool) -> mat -> int array
val iteri_2d : (int -> int -> elt -> unit) -> mat -> unit
val mapi_2d : (int -> int -> elt -> elt) -> mat -> mat
val foldi_2d : - ?axis:int -> - (int -> int -> elt -> elt -> elt) -> - elt -> - mat -> - mat
val scani_2d : ?axis:int -> (int -> int -> elt -> elt -> elt) -> mat -> mat
val filteri_2d : (int -> int -> elt -> bool) -> mat -> (int * int) array
val iter2i_2d : (int -> int -> elt -> elt -> unit) -> mat -> mat -> unit
val map2i_2d : (int -> int -> elt -> elt -> elt) -> mat -> mat -> mat
val iter2i : (int -> elt -> elt -> unit) -> mat -> mat -> unit
val iter2 : (elt -> elt -> unit) -> mat -> mat -> unit
val map2i : (int -> elt -> elt -> elt) -> mat -> mat -> mat
val map2 : (elt -> elt -> elt) -> mat -> mat -> mat
val iteri_rows : (int -> mat -> unit) -> mat -> unit
val iter_rows : (mat -> unit) -> mat -> unit
val iter2i_rows : (int -> mat -> mat -> unit) -> mat -> mat -> unit
val iter2_rows : (mat -> mat -> unit) -> mat -> mat -> unit
val iteri_cols : (int -> mat -> unit) -> mat -> unit
val iter_cols : (mat -> unit) -> mat -> unit
val filteri_rows : (int -> mat -> bool) -> mat -> int array
val filter_rows : (mat -> bool) -> mat -> int array
val filteri_cols : (int -> mat -> bool) -> mat -> int array
val filter_cols : (mat -> bool) -> mat -> int array
val fold_rows : ('a -> mat -> 'a) -> 'a -> mat -> 'a
val fold_cols : ('a -> mat -> 'a) -> 'a -> mat -> 'a
val mapi_rows : (int -> mat -> 'a) -> mat -> 'a array
val map_rows : (mat -> 'a) -> mat -> 'a array
val mapi_cols : (int -> mat -> 'a) -> mat -> 'a array
val map_cols : (mat -> 'a) -> mat -> 'a array
val mapi_by_row : int -> (int -> mat -> mat) -> mat -> mat
val map_by_row : int -> (mat -> mat) -> mat -> mat
val mapi_by_col : int -> (int -> mat -> mat) -> mat -> mat
val map_by_col : int -> (mat -> mat) -> mat -> mat
val mapi_at_row : (int -> elt -> elt) -> mat -> int -> mat
val map_at_row : (elt -> elt) -> mat -> int -> mat
val mapi_at_col : (int -> elt -> elt) -> mat -> int -> mat
val map_at_col : (elt -> elt) -> mat -> int -> mat
Examine elements and compare two matrices
val exists : (elt -> bool) -> mat -> bool
val not_exists : (elt -> bool) -> mat -> bool
val for_all : (elt -> bool) -> mat -> bool
val is_zero : mat -> bool
val is_positive : mat -> bool
val is_negative : mat -> bool
val is_nonpositive : mat -> bool
val is_nonnegative : mat -> bool
val is_normal : mat -> bool
val not_nan : mat -> bool
val not_inf : mat -> bool
val equal : mat -> mat -> bool
val not_equal : mat -> mat -> bool
val greater : mat -> mat -> bool
val less : mat -> mat -> bool
val greater_equal : mat -> mat -> bool
val less_equal : mat -> mat -> bool
val elt_equal : mat -> mat -> mat
val elt_not_equal : mat -> mat -> mat
val elt_less : mat -> mat -> mat
val elt_greater : mat -> mat -> mat
val elt_less_equal : mat -> mat -> mat
val elt_greater_equal : mat -> mat -> mat
val equal_scalar : mat -> elt -> bool
val not_equal_scalar : mat -> elt -> bool
val less_scalar : mat -> elt -> bool
val greater_scalar : mat -> elt -> bool
val less_equal_scalar : mat -> elt -> bool
val greater_equal_scalar : mat -> elt -> bool
val elt_equal_scalar : mat -> elt -> mat
val elt_not_equal_scalar : mat -> elt -> mat
val elt_less_scalar : mat -> elt -> mat
val elt_greater_scalar : mat -> elt -> mat
val elt_less_equal_scalar : mat -> elt -> mat
val elt_greater_equal_scalar : mat -> elt -> mat
val approx_equal : ?eps:float -> mat -> mat -> bool
val approx_equal_scalar : ?eps:float -> mat -> elt -> bool
val approx_elt_equal : ?eps:float -> mat -> mat -> mat
val approx_elt_equal_scalar : ?eps:float -> mat -> elt -> mat
Randomisation functions
val draw_rows : ?replacement:bool -> mat -> int -> mat * int array
val draw_cols : ?replacement:bool -> mat -> int -> mat * int array
val draw_rows2 : - ?replacement:bool -> - mat -> - mat -> - int -> - mat * mat * int array
val draw_cols2 : - ?replacement:bool -> - mat -> - mat -> - int -> - mat * mat * int array
val shuffle_rows : mat -> mat
val shuffle_cols : mat -> mat
val shuffle : mat -> mat
Input/Output and helper functions
val to_array : mat -> elt array
val of_array : elt array -> int -> int -> mat
val to_arrays : mat -> elt array array
val of_arrays : elt array array -> mat
val to_rows : mat -> mat array
val of_rows : mat array -> mat
val to_cols : mat -> mat array
val of_cols : mat array -> mat
val print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(elt -> string) -> - mat -> - unit
val save : out:string -> mat -> unit
val load : string -> mat
val save_txt : ?sep:string -> ?append:bool -> out:string -> mat -> unit
val load_txt : ?sep:string -> string -> mat
val save_npy : out:string -> mat -> unit
val load_npy : string -> mat
Unary mathematical operations
val min : ?axis:int -> ?keep_dims:bool -> mat -> mat
val min' : mat -> elt
val max : ?axis:int -> ?keep_dims:bool -> mat -> mat
val max' : mat -> elt
val minmax : ?axis:int -> ?keep_dims:bool -> mat -> mat * mat
val minmax' : mat -> elt * elt
val min_i : mat -> elt * int array
val max_i : mat -> elt * int array
val minmax_i : mat -> (elt * int array) * (elt * int array)
val trace : mat -> elt
val sum : ?axis:int -> ?keep_dims:bool -> mat -> mat
val sum' : mat -> elt
val prod : ?axis:int -> ?keep_dims:bool -> mat -> mat
val prod' : mat -> elt
val mean : ?axis:int -> ?keep_dims:bool -> mat -> mat
val mean' : mat -> elt
val var' : mat -> elt
val std' : mat -> elt
val sem : ?axis:int -> ?keep_dims:bool -> mat -> mat
val sem' : mat -> elt
val sum_rows : ?keep_dims:bool -> mat -> mat
val sum_cols : ?keep_dims:bool -> mat -> mat
val mean_rows : ?keep_dims:bool -> mat -> mat
val mean_cols : ?keep_dims:bool -> mat -> mat
val abs : mat -> mat
val abs2 : mat -> mat
val conj : mat -> mat
val neg : mat -> mat
val reci : mat -> mat
val reci_tol : ?tol:elt -> mat -> mat
val sqr : mat -> mat
val sqrt : mat -> mat
val cbrt : mat -> mat
val exp : mat -> mat
val exp2 : mat -> mat
val exp10 : mat -> mat
val expm1 : mat -> mat
val log : mat -> mat
val log10 : mat -> mat
val log2 : mat -> mat
val log1p : mat -> mat
val sin : mat -> mat
val cos : mat -> mat
val tan : mat -> mat
val asin : mat -> mat
val acos : mat -> mat
val atan : mat -> mat
val sinh : mat -> mat
val cosh : mat -> mat
val tanh : mat -> mat
val asinh : mat -> mat
val acosh : mat -> mat
val atanh : mat -> mat
val floor : mat -> mat
val ceil : mat -> mat
val round : mat -> mat
val trunc : mat -> mat
val fix : mat -> mat
val modf : mat -> mat * mat
val l1norm : ?axis:int -> ?keep_dims:bool -> mat -> mat
val l1norm' : mat -> elt
val l2norm : ?axis:int -> ?keep_dims:bool -> mat -> mat
val l2norm' : mat -> elt
val l2norm_sqr : ?axis:int -> ?keep_dims:bool -> mat -> mat
val l2norm_sqr' : mat -> elt
val vecnorm : ?axis:int -> ?p:float -> ?keep_dims:bool -> mat -> mat
val vecnorm' : ?p:float -> mat -> elt
val cumsum : ?axis:int -> mat -> mat
val cumprod : ?axis:int -> mat -> mat
val cummin : ?axis:int -> mat -> mat
val cummax : ?axis:int -> mat -> mat
val diff : ?axis:int -> ?n:int -> mat -> mat
val var : ?axis:int -> ?keep_dims:bool -> mat -> mat
val std : ?axis:int -> ?keep_dims:bool -> mat -> mat
val mat2gray : ?amin:elt -> ?amax:elt -> mat -> mat
val lgamma : mat -> mat
val dawsn : mat -> mat
Binary mathematical operations
val add : mat -> mat -> mat
val sub : mat -> mat -> mat
val mul : mat -> mat -> mat
val div : mat -> mat -> mat
val add_scalar : mat -> elt -> mat
val sub_scalar : mat -> elt -> mat
val mul_scalar : mat -> elt -> mat
val div_scalar : mat -> elt -> mat
val scalar_add : elt -> mat -> mat
val scalar_sub : elt -> mat -> mat
val scalar_mul : elt -> mat -> mat
val scalar_div : elt -> mat -> mat
val dot : mat -> mat -> mat
val add_diag : mat -> elt -> mat
val pow : mat -> mat -> mat
val scalar_pow : elt -> mat -> mat
val pow_scalar : mat -> elt -> mat
val min2 : mat -> mat -> mat
val max2 : mat -> mat -> mat
val ssqr' : mat -> elt -> elt
val ssqr_diff' : mat -> mat -> elt
val clip_by_value : ?amin:elt -> ?amax:elt -> mat -> mat
val cov : ?b:mat -> a:mat -> mat
val kron : mat -> mat -> mat
val fma : mat -> mat -> mat -> mat
Functions of in-place modification
val create_ : out:mat -> elt -> unit
val uniform_ : ?a:elt -> ?b:elt -> out:mat -> unit
val bernoulli_ : ?p:float -> out:mat -> unit
val zeros_ : out:mat -> unit
val ones_ : out:mat -> unit
val sort_ : mat -> unit
val one_hot_ : out:mat -> int -> mat -> unit
val copy_ : out:mat -> mat -> unit
val reshape_ : out:mat -> mat -> unit
val transpose_ : out:mat -> ?axis:int array -> mat -> unit
val sum_ : out:mat -> axis:int -> mat -> unit
val min_ : out:mat -> axis:int -> mat -> unit
val max_ : out:mat -> axis:int -> mat -> unit
val add_ : ?out:mat -> mat -> mat -> unit
val sub_ : ?out:mat -> mat -> mat -> unit
val mul_ : ?out:mat -> mat -> mat -> unit
val div_ : ?out:mat -> mat -> mat -> unit
val pow_ : ?out:mat -> mat -> mat -> unit
val atan2_ : ?out:mat -> mat -> mat -> unit
val hypot_ : ?out:mat -> mat -> mat -> unit
val fmod_ : ?out:mat -> mat -> mat -> unit
val min2_ : ?out:mat -> mat -> mat -> unit
val max2_ : ?out:mat -> mat -> mat -> unit
val add_scalar_ : ?out:mat -> mat -> elt -> unit
val sub_scalar_ : ?out:mat -> mat -> elt -> unit
val mul_scalar_ : ?out:mat -> mat -> elt -> unit
val div_scalar_ : ?out:mat -> mat -> elt -> unit
val pow_scalar_ : ?out:mat -> mat -> elt -> unit
val atan2_scalar_ : ?out:mat -> mat -> elt -> unit
val fmod_scalar_ : ?out:mat -> mat -> elt -> unit
val scalar_add_ : ?out:mat -> elt -> mat -> unit
val scalar_sub_ : ?out:mat -> elt -> mat -> unit
val scalar_mul_ : ?out:mat -> elt -> mat -> unit
val scalar_div_ : ?out:mat -> elt -> mat -> unit
val scalar_pow_ : ?out:mat -> elt -> mat -> unit
val scalar_atan2_ : ?out:mat -> elt -> mat -> unit
val scalar_fmod_ : ?out:mat -> elt -> mat -> unit
val fma_ : ?out:mat -> mat -> mat -> mat -> unit
val dot_ : - ?transa:bool -> - ?transb:bool -> - ?alpha:elt -> - ?beta:elt -> - c:mat -> - mat -> - mat -> - unit
val conj_ : ?out:mat -> mat -> unit
val abs_ : ?out:mat -> mat -> unit
val neg_ : ?out:mat -> mat -> unit
val reci_ : ?out:mat -> mat -> unit
val signum_ : ?out:mat -> mat -> unit
val sqr_ : ?out:mat -> mat -> unit
val sqrt_ : ?out:mat -> mat -> unit
val cbrt_ : ?out:mat -> mat -> unit
val exp_ : ?out:mat -> mat -> unit
val exp2_ : ?out:mat -> mat -> unit
val exp10_ : ?out:mat -> mat -> unit
val expm1_ : ?out:mat -> mat -> unit
val log_ : ?out:mat -> mat -> unit
val log2_ : ?out:mat -> mat -> unit
val log10_ : ?out:mat -> mat -> unit
val log1p_ : ?out:mat -> mat -> unit
val sin_ : ?out:mat -> mat -> unit
val cos_ : ?out:mat -> mat -> unit
val tan_ : ?out:mat -> mat -> unit
val asin_ : ?out:mat -> mat -> unit
val acos_ : ?out:mat -> mat -> unit
val atan_ : ?out:mat -> mat -> unit
val sinh_ : ?out:mat -> mat -> unit
val cosh_ : ?out:mat -> mat -> unit
val tanh_ : ?out:mat -> mat -> unit
val asinh_ : ?out:mat -> mat -> unit
val acosh_ : ?out:mat -> mat -> unit
val atanh_ : ?out:mat -> mat -> unit
val floor_ : ?out:mat -> mat -> unit
val ceil_ : ?out:mat -> mat -> unit
val round_ : ?out:mat -> mat -> unit
val trunc_ : ?out:mat -> mat -> unit
val fix_ : ?out:mat -> mat -> unit
val erf_ : ?out:mat -> mat -> unit
val erfc_ : ?out:mat -> mat -> unit
val relu_ : ?out:mat -> mat -> unit
val softplus_ : ?out:mat -> mat -> unit
val softsign_ : ?out:mat -> mat -> unit
val sigmoid_ : ?out:mat -> mat -> unit
val softmax_ : ?out:mat -> ?axis:int -> mat -> unit
val cumsum_ : ?out:mat -> ?axis:int -> mat -> unit
val cumprod_ : ?out:mat -> ?axis:int -> mat -> unit
val cummin_ : ?out:mat -> ?axis:int -> mat -> unit
val cummax_ : ?out:mat -> ?axis:int -> mat -> unit
val dropout_ : ?out:mat -> ?rate:float -> mat -> unit
val elt_equal_ : ?out:mat -> mat -> mat -> unit
val elt_not_equal_ : ?out:mat -> mat -> mat -> unit
val elt_less_ : ?out:mat -> mat -> mat -> unit
val elt_greater_ : ?out:mat -> mat -> mat -> unit
val elt_less_equal_ : ?out:mat -> mat -> mat -> unit
val elt_greater_equal_ : ?out:mat -> mat -> mat -> unit
val elt_equal_scalar_ : ?out:mat -> mat -> elt -> unit
val elt_not_equal_scalar_ : ?out:mat -> mat -> elt -> unit
val elt_less_scalar_ : ?out:mat -> mat -> elt -> unit
val elt_greater_scalar_ : ?out:mat -> mat -> elt -> unit
val elt_less_equal_scalar_ : ?out:mat -> mat -> elt -> unit
val elt_greater_equal_scalar_ : ?out:mat -> mat -> elt -> unit
include Owl_dense_matrix_intf.Complex - with type mat := mat - and type cast_mat := cast_mat
Specific complex functions
val complex : cast_mat -> cast_mat -> mat
val polar : cast_mat -> cast_mat -> mat
val re : mat -> cast_mat
val im : mat -> cast_mat
include module type of struct include Operator end
include sig ... end
val (+$) : - ('a, 'b) Owl_dense_matrix_generic.t -> - 'a -> - ('a, 'b) Owl_dense_matrix_generic.t
val (-$) : - ('a, 'b) Owl_dense_matrix_generic.t -> - 'a -> - ('a, 'b) Owl_dense_matrix_generic.t
val (*$) : - ('a, 'b) Owl_dense_matrix_generic.t -> - 'a -> - ('a, 'b) Owl_dense_matrix_generic.t
val (/$) : - ('a, 'b) Owl_dense_matrix_generic.t -> - 'a -> - ('a, 'b) Owl_dense_matrix_generic.t
val ($+) : - 'a -> - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t
val ($-) : - 'a -> - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t
val ($*) : - 'a -> - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t
val ($/) : - 'a -> - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t
val (=) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - bool
val (!=) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - bool
val (<>) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - bool
val (>) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - bool
val (<) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - bool
val (>=) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - bool
val (<=) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - bool
include sig ... end
val (=$) : ('a, 'b) Owl_dense_matrix_generic.t -> 'a -> bool
val (!=$) : ('a, 'b) Owl_dense_matrix_generic.t -> 'a -> bool
val (<>$) : ('a, 'b) Owl_dense_matrix_generic.t -> 'a -> bool
val (<$) : ('a, 'b) Owl_dense_matrix_generic.t -> 'a -> bool
val (>$) : ('a, 'b) Owl_dense_matrix_generic.t -> 'a -> bool
val (<=$) : ('a, 'b) Owl_dense_matrix_generic.t -> 'a -> bool
val (>=$) : ('a, 'b) Owl_dense_matrix_generic.t -> 'a -> bool
val (=.) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t
val (!=.) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t
val (<>.) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t
val (<.) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t
val (>.) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t
val (<=.) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t
val (>=.) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t
val (=.$) : - ('a, 'b) Owl_dense_matrix_generic.t -> - 'a -> - ('a, 'b) Owl_dense_matrix_generic.t
val (!=.$) : - ('a, 'b) Owl_dense_matrix_generic.t -> - 'a -> - ('a, 'b) Owl_dense_matrix_generic.t
val (<>.$) : - ('a, 'b) Owl_dense_matrix_generic.t -> - 'a -> - ('a, 'b) Owl_dense_matrix_generic.t
val (<.$) : - ('a, 'b) Owl_dense_matrix_generic.t -> - 'a -> - ('a, 'b) Owl_dense_matrix_generic.t
val (>.$) : - ('a, 'b) Owl_dense_matrix_generic.t -> - 'a -> - ('a, 'b) Owl_dense_matrix_generic.t
val (<=.$) : - ('a, 'b) Owl_dense_matrix_generic.t -> - 'a -> - ('a, 'b) Owl_dense_matrix_generic.t
val (>=.$) : - ('a, 'b) Owl_dense_matrix_generic.t -> - 'a -> - ('a, 'b) Owl_dense_matrix_generic.t
val (=~) : - ?eps:float -> - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - bool
val (=~$) : ?eps:float -> ('a, 'b) Owl_dense_matrix_generic.t -> 'a -> bool
val (=~.) : - ?eps:float -> - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t
val (=~.$) : - ?eps:float -> - ('a, 'b) Owl_dense_matrix_generic.t -> - 'a -> - ('a, 'b) Owl_dense_matrix_generic.t
val (%) : - (float, 'a) Owl_dense_matrix_generic.t -> - (float, 'a) Owl_dense_matrix_generic.t -> - (float, 'a) Owl_dense_matrix_generic.t
val (%$) : - (float, 'a) Owl_dense_matrix_generic.t -> - float -> - (float, 'a) Owl_dense_matrix_generic.t
val (**) : - (float, 'a) Owl_dense_matrix_generic.t -> - (float, 'a) Owl_dense_matrix_generic.t -> - (float, 'a) Owl_dense_matrix_generic.t
val ($**) : - float -> - (float, 'a) Owl_dense_matrix_generic.t -> - (float, 'a) Owl_dense_matrix_generic.t
val (**$) : - (float, 'a) Owl_dense_matrix_generic.t -> - float -> - (float, 'a) Owl_dense_matrix_generic.t
val (+=) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - unit
val (-=) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - unit
val (*=) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - unit
val (/=) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - unit
val (+$=) : ('a, 'b) Owl_dense_matrix_generic.t -> 'a -> unit
val (-$=) : ('a, 'b) Owl_dense_matrix_generic.t -> 'a -> unit
val (*$=) : ('a, 'b) Owl_dense_matrix_generic.t -> 'a -> unit
val (/$=) : ('a, 'b) Owl_dense_matrix_generic.t -> 'a -> unit
val (@=) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t
val (@||) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t
val (.!{;..}) : - ('a, 'b) Owl_dense_matrix_generic.t -> - Owl_types.index array -> - ('a, 'b) Owl_dense_matrix_generic.t
val (.!{;..}<-) : - ('a, 'b) Owl_dense_matrix_generic.t -> - Owl_types.index array -> - ('a, 'b) Owl_dense_matrix_generic.t -> - unit
val (.${}) : - ('a, 'b) Owl_dense_matrix_generic.t -> - int list -> - ('a, 'b) Owl_dense_matrix_generic.t
val (.${;..}) : - ('a, 'b) Owl_dense_matrix_generic.t -> - int list array -> - ('a, 'b) Owl_dense_matrix_generic.t
val (.${}<-) : - ('a, 'b) Owl_dense_matrix_generic.t -> - int list -> - ('a, 'b) Owl_dense_matrix_generic.t -> - unit
val (.${;..}<-) : - ('a, 'b) Owl_dense_matrix_generic.t -> - int list array -> - ('a, 'b) Owl_dense_matrix_generic.t -> - unit
include sig ... end
val (*@) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t
val (.%{}) : ('a, 'b) Owl_dense_matrix_generic.t -> (int * int) -> 'a
val (.%{;..}) : ('a, 'b) Owl_dense_matrix_generic.t -> int array -> 'a
val (.%{}<-) : ('a, 'b) Owl_dense_matrix_generic.t -> (int * int) -> 'a -> unit
val (.%{;..}<-) : - ('a, 'b) Owl_dense_matrix_generic.t -> - int array -> - 'a -> - unit
include sig ... end
val (**@) : - ('a, 'b) Owl_linalg_generic.t -> - float -> - ('a, 'b) Owl_linalg_generic.t
val (/@) : - ('a, 'b) Owl_linalg_generic.t -> - ('a, 'b) Owl_linalg_generic.t -> - ('a, 'b) Owl_linalg_generic.t
val mpow : Owl_linalg_c.mat -> float -> Owl_linalg_c.mat
\ No newline at end of file diff --git a/owl/Owl_dense_matrix/D/index.html b/owl/Owl_dense_matrix/D/index.html deleted file mode 100644 index 0dfe0c3b3..000000000 --- a/owl/Owl_dense_matrix/D/index.html +++ /dev/null @@ -1,225 +0,0 @@ - -D (owl.Owl_dense_matrix.D)

Module Owl_dense_matrix.D

include module type of struct include Owl_dense_matrix_d end
type elt = float
type mat = (float, Stdlib.Bigarray.float64_elt) Owl_dense_matrix_generic.t
include Owl_dense_matrix_intf.Common with type elt := elt and type mat := mat
Create dense matrices
val empty : int -> int -> mat
val create : int -> int -> elt -> mat
val init : int -> int -> (int -> elt) -> mat
val init_2d : int -> int -> (int -> int -> elt) -> mat
val zeros : int -> int -> mat
val ones : int -> int -> mat
val eye : int -> mat
val sequential : ?a:elt -> ?step:elt -> int -> int -> mat
val uniform : ?a:elt -> ?b:elt -> int -> int -> mat
val gaussian : ?mu:elt -> ?sigma:elt -> int -> int -> mat
val bernoulli : ?p:float -> int -> int -> mat
val unit_basis : int -> int -> mat
val diagm : ?k:int -> mat -> mat
val triu : ?k:int -> mat -> mat
val tril : ?k:int -> mat -> mat
val symmetric : ?upper:bool -> mat -> mat
val bidiagonal : ?upper:bool -> mat -> mat -> mat
val toeplitz : ?c:mat -> mat -> mat
val hankel : ?r:mat -> mat -> mat
val hadamard : int -> mat
val magic : int -> mat
Dense row vectors and meshgrids
val vector : int -> mat
val vector_zeros : int -> mat
val vector_ones : int -> mat
val vector_uniform : int -> mat
val linspace : elt -> elt -> int -> mat
val logspace : ?base:float -> elt -> elt -> int -> mat
val meshgrid : elt -> elt -> elt -> elt -> int -> int -> mat * mat
val meshup : mat -> mat -> mat * mat
Obtain the basic properties of a matrix
val shape : mat -> int * int
val row_num : mat -> int
val col_num : mat -> int
val numel : mat -> int
val nnz : mat -> int
val density : mat -> float
val size_in_bytes : mat -> int
val same_shape : mat -> mat -> bool
val same_data : mat -> mat -> bool
Manipulate a matrix
val get : mat -> int -> int -> elt
val set : mat -> int -> int -> elt -> unit
val get_index : mat -> int array array -> elt array
val set_index : mat -> int array array -> elt array -> unit
val get_fancy : Owl_types.index list -> mat -> mat
val set_fancy : Owl_types.index list -> mat -> mat -> unit
val get_slice : int list list -> mat -> mat
val set_slice : int list list -> mat -> mat -> unit
val row : mat -> int -> mat
val col : mat -> int -> mat
val rows : mat -> int array -> mat
val cols : mat -> int array -> mat
val resize : ?head:bool -> mat -> int array -> mat
val reshape : mat -> int array -> mat
val flatten : mat -> mat
val reverse : mat -> mat
val flip : ?axis:int -> mat -> mat
val rotate : mat -> int -> mat
val reset : mat -> unit
val fill : mat -> elt -> unit
val copy : mat -> mat
val copy_row_to : mat -> mat -> int -> unit
val copy_col_to : mat -> mat -> int -> unit
val concat_vertical : mat -> mat -> mat
val concat_horizontal : mat -> mat -> mat
val concat_vh : mat array array -> mat
val concatenate : ?axis:int -> mat array -> mat
val split : ?axis:int -> int array -> mat -> mat array
val split_vh : (int * int) array array -> mat -> mat array array
val transpose : mat -> mat
val ctranspose : mat -> mat
val swap_rows : mat -> int -> int -> unit
val swap_cols : mat -> int -> int -> unit
val tile : mat -> int array -> mat
val repeat : mat -> int array -> mat
val pad : ?v:elt -> int list list -> mat -> mat
val dropout : ?rate:float -> mat -> mat
val top : mat -> int -> int array array
val bottom : mat -> int -> int array array
val sort : mat -> mat
val argsort : - mat -> - (int64, Stdlib.Bigarray.int64_elt, Stdlib.Bigarray.c_layout) - Stdlib.Bigarray.Genarray.t
Iterate elements, columns, and rows.
val iteri : (int -> elt -> unit) -> mat -> unit
val iter : (elt -> unit) -> mat -> unit
val mapi : (int -> elt -> elt) -> mat -> mat
val map : (elt -> elt) -> mat -> mat
val foldi : ?axis:int -> (int -> elt -> elt -> elt) -> elt -> mat -> mat
val fold : ?axis:int -> (elt -> elt -> elt) -> elt -> mat -> mat
val scani : ?axis:int -> (int -> elt -> elt -> elt) -> mat -> mat
val scan : ?axis:int -> (elt -> elt -> elt) -> mat -> mat
val filteri : (int -> elt -> bool) -> mat -> int array
val filter : (elt -> bool) -> mat -> int array
val iteri_2d : (int -> int -> elt -> unit) -> mat -> unit
val mapi_2d : (int -> int -> elt -> elt) -> mat -> mat
val foldi_2d : - ?axis:int -> - (int -> int -> elt -> elt -> elt) -> - elt -> - mat -> - mat
val scani_2d : ?axis:int -> (int -> int -> elt -> elt -> elt) -> mat -> mat
val filteri_2d : (int -> int -> elt -> bool) -> mat -> (int * int) array
val iter2i_2d : (int -> int -> elt -> elt -> unit) -> mat -> mat -> unit
val map2i_2d : (int -> int -> elt -> elt -> elt) -> mat -> mat -> mat
val iter2i : (int -> elt -> elt -> unit) -> mat -> mat -> unit
val iter2 : (elt -> elt -> unit) -> mat -> mat -> unit
val map2i : (int -> elt -> elt -> elt) -> mat -> mat -> mat
val map2 : (elt -> elt -> elt) -> mat -> mat -> mat
val iteri_rows : (int -> mat -> unit) -> mat -> unit
val iter_rows : (mat -> unit) -> mat -> unit
val iter2i_rows : (int -> mat -> mat -> unit) -> mat -> mat -> unit
val iter2_rows : (mat -> mat -> unit) -> mat -> mat -> unit
val iteri_cols : (int -> mat -> unit) -> mat -> unit
val iter_cols : (mat -> unit) -> mat -> unit
val filteri_rows : (int -> mat -> bool) -> mat -> int array
val filter_rows : (mat -> bool) -> mat -> int array
val filteri_cols : (int -> mat -> bool) -> mat -> int array
val filter_cols : (mat -> bool) -> mat -> int array
val fold_rows : ('a -> mat -> 'a) -> 'a -> mat -> 'a
val fold_cols : ('a -> mat -> 'a) -> 'a -> mat -> 'a
val mapi_rows : (int -> mat -> 'a) -> mat -> 'a array
val map_rows : (mat -> 'a) -> mat -> 'a array
val mapi_cols : (int -> mat -> 'a) -> mat -> 'a array
val map_cols : (mat -> 'a) -> mat -> 'a array
val mapi_by_row : int -> (int -> mat -> mat) -> mat -> mat
val map_by_row : int -> (mat -> mat) -> mat -> mat
val mapi_by_col : int -> (int -> mat -> mat) -> mat -> mat
val map_by_col : int -> (mat -> mat) -> mat -> mat
val mapi_at_row : (int -> elt -> elt) -> mat -> int -> mat
val map_at_row : (elt -> elt) -> mat -> int -> mat
val mapi_at_col : (int -> elt -> elt) -> mat -> int -> mat
val map_at_col : (elt -> elt) -> mat -> int -> mat
Examine elements and compare two matrices
val exists : (elt -> bool) -> mat -> bool
val not_exists : (elt -> bool) -> mat -> bool
val for_all : (elt -> bool) -> mat -> bool
val is_zero : mat -> bool
val is_positive : mat -> bool
val is_negative : mat -> bool
val is_nonpositive : mat -> bool
val is_nonnegative : mat -> bool
val is_normal : mat -> bool
val not_nan : mat -> bool
val not_inf : mat -> bool
val equal : mat -> mat -> bool
val not_equal : mat -> mat -> bool
val greater : mat -> mat -> bool
val less : mat -> mat -> bool
val greater_equal : mat -> mat -> bool
val less_equal : mat -> mat -> bool
val elt_equal : mat -> mat -> mat
val elt_not_equal : mat -> mat -> mat
val elt_less : mat -> mat -> mat
val elt_greater : mat -> mat -> mat
val elt_less_equal : mat -> mat -> mat
val elt_greater_equal : mat -> mat -> mat
val equal_scalar : mat -> elt -> bool
val not_equal_scalar : mat -> elt -> bool
val less_scalar : mat -> elt -> bool
val greater_scalar : mat -> elt -> bool
val less_equal_scalar : mat -> elt -> bool
val greater_equal_scalar : mat -> elt -> bool
val elt_equal_scalar : mat -> elt -> mat
val elt_not_equal_scalar : mat -> elt -> mat
val elt_less_scalar : mat -> elt -> mat
val elt_greater_scalar : mat -> elt -> mat
val elt_less_equal_scalar : mat -> elt -> mat
val elt_greater_equal_scalar : mat -> elt -> mat
val approx_equal : ?eps:float -> mat -> mat -> bool
val approx_equal_scalar : ?eps:float -> mat -> elt -> bool
val approx_elt_equal : ?eps:float -> mat -> mat -> mat
val approx_elt_equal_scalar : ?eps:float -> mat -> elt -> mat
Randomisation functions
val draw_rows : ?replacement:bool -> mat -> int -> mat * int array
val draw_cols : ?replacement:bool -> mat -> int -> mat * int array
val draw_rows2 : - ?replacement:bool -> - mat -> - mat -> - int -> - mat * mat * int array
val draw_cols2 : - ?replacement:bool -> - mat -> - mat -> - int -> - mat * mat * int array
val shuffle_rows : mat -> mat
val shuffle_cols : mat -> mat
val shuffle : mat -> mat
Input/Output and helper functions
val to_array : mat -> elt array
val of_array : elt array -> int -> int -> mat
val to_arrays : mat -> elt array array
val of_arrays : elt array array -> mat
val to_rows : mat -> mat array
val of_rows : mat array -> mat
val to_cols : mat -> mat array
val of_cols : mat array -> mat
val print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(elt -> string) -> - mat -> - unit
val save : out:string -> mat -> unit
val load : string -> mat
val save_txt : ?sep:string -> ?append:bool -> out:string -> mat -> unit
val load_txt : ?sep:string -> string -> mat
val save_npy : out:string -> mat -> unit
val load_npy : string -> mat
Unary mathematical operations
val min : ?axis:int -> ?keep_dims:bool -> mat -> mat
val min' : mat -> elt
val max : ?axis:int -> ?keep_dims:bool -> mat -> mat
val max' : mat -> elt
val minmax : ?axis:int -> ?keep_dims:bool -> mat -> mat * mat
val minmax' : mat -> elt * elt
val min_i : mat -> elt * int array
val max_i : mat -> elt * int array
val minmax_i : mat -> (elt * int array) * (elt * int array)
val trace : mat -> elt
val sum : ?axis:int -> ?keep_dims:bool -> mat -> mat
val sum' : mat -> elt
val prod : ?axis:int -> ?keep_dims:bool -> mat -> mat
val prod' : mat -> elt
val mean : ?axis:int -> ?keep_dims:bool -> mat -> mat
val mean' : mat -> elt
val var' : mat -> elt
val std' : mat -> elt
val sem : ?axis:int -> ?keep_dims:bool -> mat -> mat
val sem' : mat -> elt
val sum_rows : ?keep_dims:bool -> mat -> mat
val sum_cols : ?keep_dims:bool -> mat -> mat
val mean_rows : ?keep_dims:bool -> mat -> mat
val mean_cols : ?keep_dims:bool -> mat -> mat
val abs : mat -> mat
val abs2 : mat -> mat
val conj : mat -> mat
val neg : mat -> mat
val reci : mat -> mat
val reci_tol : ?tol:elt -> mat -> mat
val sqr : mat -> mat
val sqrt : mat -> mat
val cbrt : mat -> mat
val exp : mat -> mat
val exp2 : mat -> mat
val exp10 : mat -> mat
val expm1 : mat -> mat
val log : mat -> mat
val log10 : mat -> mat
val log2 : mat -> mat
val log1p : mat -> mat
val sin : mat -> mat
val cos : mat -> mat
val tan : mat -> mat
val asin : mat -> mat
val acos : mat -> mat
val atan : mat -> mat
val sinh : mat -> mat
val cosh : mat -> mat
val tanh : mat -> mat
val asinh : mat -> mat
val acosh : mat -> mat
val atanh : mat -> mat
val floor : mat -> mat
val ceil : mat -> mat
val round : mat -> mat
val trunc : mat -> mat
val fix : mat -> mat
val modf : mat -> mat * mat
val l1norm : ?axis:int -> ?keep_dims:bool -> mat -> mat
val l1norm' : mat -> elt
val l2norm : ?axis:int -> ?keep_dims:bool -> mat -> mat
val l2norm' : mat -> elt
val l2norm_sqr : ?axis:int -> ?keep_dims:bool -> mat -> mat
val l2norm_sqr' : mat -> elt
val vecnorm : ?axis:int -> ?p:float -> ?keep_dims:bool -> mat -> mat
val vecnorm' : ?p:float -> mat -> elt
val cumsum : ?axis:int -> mat -> mat
val cumprod : ?axis:int -> mat -> mat
val cummin : ?axis:int -> mat -> mat
val cummax : ?axis:int -> mat -> mat
val diff : ?axis:int -> ?n:int -> mat -> mat
val var : ?axis:int -> ?keep_dims:bool -> mat -> mat
val std : ?axis:int -> ?keep_dims:bool -> mat -> mat
val mat2gray : ?amin:elt -> ?amax:elt -> mat -> mat
val lgamma : mat -> mat
val dawsn : mat -> mat
Binary mathematical operations
val add : mat -> mat -> mat
val sub : mat -> mat -> mat
val mul : mat -> mat -> mat
val div : mat -> mat -> mat
val add_scalar : mat -> elt -> mat
val sub_scalar : mat -> elt -> mat
val mul_scalar : mat -> elt -> mat
val div_scalar : mat -> elt -> mat
val scalar_add : elt -> mat -> mat
val scalar_sub : elt -> mat -> mat
val scalar_mul : elt -> mat -> mat
val scalar_div : elt -> mat -> mat
val dot : mat -> mat -> mat
val add_diag : mat -> elt -> mat
val pow : mat -> mat -> mat
val scalar_pow : elt -> mat -> mat
val pow_scalar : mat -> elt -> mat
val min2 : mat -> mat -> mat
val max2 : mat -> mat -> mat
val ssqr' : mat -> elt -> elt
val ssqr_diff' : mat -> mat -> elt
val clip_by_value : ?amin:elt -> ?amax:elt -> mat -> mat
val cov : ?b:mat -> a:mat -> mat
val kron : mat -> mat -> mat
val fma : mat -> mat -> mat -> mat
Functions of in-place modification
val create_ : out:mat -> elt -> unit
val uniform_ : ?a:elt -> ?b:elt -> out:mat -> unit
val bernoulli_ : ?p:float -> out:mat -> unit
val zeros_ : out:mat -> unit
val ones_ : out:mat -> unit
val sort_ : mat -> unit
val one_hot_ : out:mat -> int -> mat -> unit
val copy_ : out:mat -> mat -> unit
val reshape_ : out:mat -> mat -> unit
val transpose_ : out:mat -> ?axis:int array -> mat -> unit
val sum_ : out:mat -> axis:int -> mat -> unit
val min_ : out:mat -> axis:int -> mat -> unit
val max_ : out:mat -> axis:int -> mat -> unit
val add_ : ?out:mat -> mat -> mat -> unit
val sub_ : ?out:mat -> mat -> mat -> unit
val mul_ : ?out:mat -> mat -> mat -> unit
val div_ : ?out:mat -> mat -> mat -> unit
val pow_ : ?out:mat -> mat -> mat -> unit
val atan2_ : ?out:mat -> mat -> mat -> unit
val hypot_ : ?out:mat -> mat -> mat -> unit
val fmod_ : ?out:mat -> mat -> mat -> unit
val min2_ : ?out:mat -> mat -> mat -> unit
val max2_ : ?out:mat -> mat -> mat -> unit
val add_scalar_ : ?out:mat -> mat -> elt -> unit
val sub_scalar_ : ?out:mat -> mat -> elt -> unit
val mul_scalar_ : ?out:mat -> mat -> elt -> unit
val div_scalar_ : ?out:mat -> mat -> elt -> unit
val pow_scalar_ : ?out:mat -> mat -> elt -> unit
val atan2_scalar_ : ?out:mat -> mat -> elt -> unit
val fmod_scalar_ : ?out:mat -> mat -> elt -> unit
val scalar_add_ : ?out:mat -> elt -> mat -> unit
val scalar_sub_ : ?out:mat -> elt -> mat -> unit
val scalar_mul_ : ?out:mat -> elt -> mat -> unit
val scalar_div_ : ?out:mat -> elt -> mat -> unit
val scalar_pow_ : ?out:mat -> elt -> mat -> unit
val scalar_atan2_ : ?out:mat -> elt -> mat -> unit
val scalar_fmod_ : ?out:mat -> elt -> mat -> unit
val fma_ : ?out:mat -> mat -> mat -> mat -> unit
val dot_ : - ?transa:bool -> - ?transb:bool -> - ?alpha:elt -> - ?beta:elt -> - c:mat -> - mat -> - mat -> - unit
val conj_ : ?out:mat -> mat -> unit
val abs_ : ?out:mat -> mat -> unit
val neg_ : ?out:mat -> mat -> unit
val reci_ : ?out:mat -> mat -> unit
val signum_ : ?out:mat -> mat -> unit
val sqr_ : ?out:mat -> mat -> unit
val sqrt_ : ?out:mat -> mat -> unit
val cbrt_ : ?out:mat -> mat -> unit
val exp_ : ?out:mat -> mat -> unit
val exp2_ : ?out:mat -> mat -> unit
val exp10_ : ?out:mat -> mat -> unit
val expm1_ : ?out:mat -> mat -> unit
val log_ : ?out:mat -> mat -> unit
val log2_ : ?out:mat -> mat -> unit
val log10_ : ?out:mat -> mat -> unit
val log1p_ : ?out:mat -> mat -> unit
val sin_ : ?out:mat -> mat -> unit
val cos_ : ?out:mat -> mat -> unit
val tan_ : ?out:mat -> mat -> unit
val asin_ : ?out:mat -> mat -> unit
val acos_ : ?out:mat -> mat -> unit
val atan_ : ?out:mat -> mat -> unit
val sinh_ : ?out:mat -> mat -> unit
val cosh_ : ?out:mat -> mat -> unit
val tanh_ : ?out:mat -> mat -> unit
val asinh_ : ?out:mat -> mat -> unit
val acosh_ : ?out:mat -> mat -> unit
val atanh_ : ?out:mat -> mat -> unit
val floor_ : ?out:mat -> mat -> unit
val ceil_ : ?out:mat -> mat -> unit
val round_ : ?out:mat -> mat -> unit
val trunc_ : ?out:mat -> mat -> unit
val fix_ : ?out:mat -> mat -> unit
val erf_ : ?out:mat -> mat -> unit
val erfc_ : ?out:mat -> mat -> unit
val relu_ : ?out:mat -> mat -> unit
val softplus_ : ?out:mat -> mat -> unit
val softsign_ : ?out:mat -> mat -> unit
val sigmoid_ : ?out:mat -> mat -> unit
val softmax_ : ?out:mat -> ?axis:int -> mat -> unit
val cumsum_ : ?out:mat -> ?axis:int -> mat -> unit
val cumprod_ : ?out:mat -> ?axis:int -> mat -> unit
val cummin_ : ?out:mat -> ?axis:int -> mat -> unit
val cummax_ : ?out:mat -> ?axis:int -> mat -> unit
val dropout_ : ?out:mat -> ?rate:float -> mat -> unit
val elt_equal_ : ?out:mat -> mat -> mat -> unit
val elt_not_equal_ : ?out:mat -> mat -> mat -> unit
val elt_less_ : ?out:mat -> mat -> mat -> unit
val elt_greater_ : ?out:mat -> mat -> mat -> unit
val elt_less_equal_ : ?out:mat -> mat -> mat -> unit
val elt_greater_equal_ : ?out:mat -> mat -> mat -> unit
val elt_equal_scalar_ : ?out:mat -> mat -> elt -> unit
val elt_not_equal_scalar_ : ?out:mat -> mat -> elt -> unit
val elt_less_scalar_ : ?out:mat -> mat -> elt -> unit
val elt_greater_scalar_ : ?out:mat -> mat -> elt -> unit
val elt_less_equal_scalar_ : ?out:mat -> mat -> elt -> unit
val elt_greater_equal_scalar_ : ?out:mat -> mat -> elt -> unit
include Owl_dense_matrix_intf.Real with type elt := elt and type mat := mat
Specific real functions
val i0 : mat -> mat
val i0e : mat -> mat
val i1 : mat -> mat
val i1e : mat -> mat
val iv : v:mat -> mat -> mat
val scalar_iv : v:elt -> mat -> mat
val iv_scalar : v:mat -> elt -> mat
val j0 : mat -> mat
val j1 : mat -> mat
val jv : v:mat -> mat -> mat
val scalar_jv : v:elt -> mat -> mat
val jv_scalar : v:mat -> elt -> mat
val semidef : int -> mat
val min_rows : mat -> (elt * int * int) array
val min_cols : mat -> (elt * int * int) array
val max_rows : mat -> (elt * int * int) array
val max_cols : mat -> (elt * int * int) array
val signum : mat -> mat
val erf : mat -> mat
val erfc : mat -> mat
val logistic : mat -> mat
val relu : mat -> mat
val elu : ?alpha:elt -> mat -> mat
val leaky_relu : ?alpha:elt -> mat -> mat
val softplus : mat -> mat
val softsign : mat -> mat
val softmax : ?axis:int -> mat -> mat
val sigmoid : mat -> mat
val log_sum_exp' : mat -> elt
val log_sum_exp : ?axis:int -> ?keep_dims:bool -> mat -> mat
val max_pool : - ?padding:Owl_types.padding -> - mat -> - int array -> - int array -> - mat
val avg_pool : - ?padding:Owl_types.padding -> - mat -> - int array -> - int array -> - mat
val atan2 : mat -> mat -> mat
val scalar_atan2 : elt -> mat -> mat
val atan2_scalar : mat -> elt -> mat
val hypot : mat -> mat -> mat
val fmod : mat -> mat -> mat
val fmod_scalar : mat -> elt -> mat
val scalar_fmod : elt -> mat -> mat
val cross_entropy' : mat -> mat -> elt
val clip_by_l2norm : elt -> mat -> mat
val poisson : mu:elt -> int -> int -> mat
include module type of struct include Operator end
include sig ... end
val (+$) : - ('a, 'b) Owl_dense_matrix_generic.t -> - 'a -> - ('a, 'b) Owl_dense_matrix_generic.t
val (-$) : - ('a, 'b) Owl_dense_matrix_generic.t -> - 'a -> - ('a, 'b) Owl_dense_matrix_generic.t
val (*$) : - ('a, 'b) Owl_dense_matrix_generic.t -> - 'a -> - ('a, 'b) Owl_dense_matrix_generic.t
val (/$) : - ('a, 'b) Owl_dense_matrix_generic.t -> - 'a -> - ('a, 'b) Owl_dense_matrix_generic.t
val ($+) : - 'a -> - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t
val ($-) : - 'a -> - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t
val ($*) : - 'a -> - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t
val ($/) : - 'a -> - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t
val (=) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - bool
val (!=) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - bool
val (<>) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - bool
val (>) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - bool
val (<) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - bool
val (>=) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - bool
val (<=) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - bool
include sig ... end
val (=$) : ('a, 'b) Owl_dense_matrix_generic.t -> 'a -> bool
val (!=$) : ('a, 'b) Owl_dense_matrix_generic.t -> 'a -> bool
val (<>$) : ('a, 'b) Owl_dense_matrix_generic.t -> 'a -> bool
val (<$) : ('a, 'b) Owl_dense_matrix_generic.t -> 'a -> bool
val (>$) : ('a, 'b) Owl_dense_matrix_generic.t -> 'a -> bool
val (<=$) : ('a, 'b) Owl_dense_matrix_generic.t -> 'a -> bool
val (>=$) : ('a, 'b) Owl_dense_matrix_generic.t -> 'a -> bool
val (=.) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t
val (!=.) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t
val (<>.) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t
val (<.) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t
val (>.) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t
val (<=.) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t
val (>=.) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t
val (=.$) : - ('a, 'b) Owl_dense_matrix_generic.t -> - 'a -> - ('a, 'b) Owl_dense_matrix_generic.t
val (!=.$) : - ('a, 'b) Owl_dense_matrix_generic.t -> - 'a -> - ('a, 'b) Owl_dense_matrix_generic.t
val (<>.$) : - ('a, 'b) Owl_dense_matrix_generic.t -> - 'a -> - ('a, 'b) Owl_dense_matrix_generic.t
val (<.$) : - ('a, 'b) Owl_dense_matrix_generic.t -> - 'a -> - ('a, 'b) Owl_dense_matrix_generic.t
val (>.$) : - ('a, 'b) Owl_dense_matrix_generic.t -> - 'a -> - ('a, 'b) Owl_dense_matrix_generic.t
val (<=.$) : - ('a, 'b) Owl_dense_matrix_generic.t -> - 'a -> - ('a, 'b) Owl_dense_matrix_generic.t
val (>=.$) : - ('a, 'b) Owl_dense_matrix_generic.t -> - 'a -> - ('a, 'b) Owl_dense_matrix_generic.t
val (=~) : - ?eps:float -> - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - bool
val (=~$) : ?eps:float -> ('a, 'b) Owl_dense_matrix_generic.t -> 'a -> bool
val (=~.) : - ?eps:float -> - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t
val (=~.$) : - ?eps:float -> - ('a, 'b) Owl_dense_matrix_generic.t -> - 'a -> - ('a, 'b) Owl_dense_matrix_generic.t
val (%) : - (float, 'a) Owl_dense_matrix_generic.t -> - (float, 'a) Owl_dense_matrix_generic.t -> - (float, 'a) Owl_dense_matrix_generic.t
val (%$) : - (float, 'a) Owl_dense_matrix_generic.t -> - float -> - (float, 'a) Owl_dense_matrix_generic.t
val (**) : - (float, 'a) Owl_dense_matrix_generic.t -> - (float, 'a) Owl_dense_matrix_generic.t -> - (float, 'a) Owl_dense_matrix_generic.t
val ($**) : - float -> - (float, 'a) Owl_dense_matrix_generic.t -> - (float, 'a) Owl_dense_matrix_generic.t
val (**$) : - (float, 'a) Owl_dense_matrix_generic.t -> - float -> - (float, 'a) Owl_dense_matrix_generic.t
val (+=) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - unit
val (-=) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - unit
val (*=) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - unit
val (/=) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - unit
val (+$=) : ('a, 'b) Owl_dense_matrix_generic.t -> 'a -> unit
val (-$=) : ('a, 'b) Owl_dense_matrix_generic.t -> 'a -> unit
val (*$=) : ('a, 'b) Owl_dense_matrix_generic.t -> 'a -> unit
val (/$=) : ('a, 'b) Owl_dense_matrix_generic.t -> 'a -> unit
val (@=) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t
val (@||) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t
val (.!{;..}) : - ('a, 'b) Owl_dense_matrix_generic.t -> - Owl_types.index array -> - ('a, 'b) Owl_dense_matrix_generic.t
val (.!{;..}<-) : - ('a, 'b) Owl_dense_matrix_generic.t -> - Owl_types.index array -> - ('a, 'b) Owl_dense_matrix_generic.t -> - unit
val (.${}) : - ('a, 'b) Owl_dense_matrix_generic.t -> - int list -> - ('a, 'b) Owl_dense_matrix_generic.t
val (.${;..}) : - ('a, 'b) Owl_dense_matrix_generic.t -> - int list array -> - ('a, 'b) Owl_dense_matrix_generic.t
val (.${}<-) : - ('a, 'b) Owl_dense_matrix_generic.t -> - int list -> - ('a, 'b) Owl_dense_matrix_generic.t -> - unit
val (.${;..}<-) : - ('a, 'b) Owl_dense_matrix_generic.t -> - int list array -> - ('a, 'b) Owl_dense_matrix_generic.t -> - unit
include sig ... end
val (*@) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t
val (.%{}) : ('a, 'b) Owl_dense_matrix_generic.t -> (int * int) -> 'a
val (.%{;..}) : ('a, 'b) Owl_dense_matrix_generic.t -> int array -> 'a
val (.%{}<-) : ('a, 'b) Owl_dense_matrix_generic.t -> (int * int) -> 'a -> unit
val (.%{;..}<-) : - ('a, 'b) Owl_dense_matrix_generic.t -> - int array -> - 'a -> - unit
include sig ... end
val (**@) : - ('a, 'b) Owl_linalg_generic.t -> - float -> - ('a, 'b) Owl_linalg_generic.t
val (/@) : - ('a, 'b) Owl_linalg_generic.t -> - ('a, 'b) Owl_linalg_generic.t -> - ('a, 'b) Owl_linalg_generic.t
val mpow : Owl_linalg_d.mat -> float -> Owl_linalg_d.mat
val diag : - ?k:int -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t
\ No newline at end of file diff --git a/owl/Owl_dense_matrix/Generic/index.html b/owl/Owl_dense_matrix/Generic/index.html deleted file mode 100644 index 891d5ab4a..000000000 --- a/owl/Owl_dense_matrix/Generic/index.html +++ /dev/null @@ -1,402 +0,0 @@ - -Generic (owl.Owl_dense_matrix.Generic)

Module Owl_dense_matrix.Generic

include module type of struct include Owl_dense_matrix_generic end

About the comparison of two complex numbers x and y, Owl uses the following conventions: 1) x and y are equal iff both real and imaginary parts are equal; 2) x is less than y if the magnitude of x is less than the magnitude of x; in case both x and y have the same magnitudes, x is less than x if the phase of x is less than the phase of y; 3) less or equal, greater, greater or equal relation can be further defined atop of the aforementioned conventions.

The generic module supports operations for the following Bigarry element types: Int8_signed, Int8_unsigned, Int16_signed, Int16_unsigned, Int32, Int64, Float32, Float64, Complex32, Complex64.

Type definition
type ('a, 'b) t = ('a, 'b, Stdlib.Bigarray.c_layout) Stdlib.Bigarray.Genarray.t

N-dimensional array type, i.e. Bigarray Genarray type.

Create matrices
val empty : ('a, 'b) Owl_dense_ndarray_generic.kind -> int -> int -> ('a, 'b) t

empty m n creates an m by n matrix without initialising the values of elements in x.

val create : - ('a, 'b) Owl_dense_ndarray_generic.kind -> - int -> - int -> - 'a -> - ('a, 'b) t

create m n a creates an m by n matrix and all the elements of x are initialised with the value a.

val init : - ('a, 'b) Owl_dense_ndarray_generic.kind -> - int -> - int -> - (int -> 'a) -> - ('a, 'b) t

init m n f creates a matrix x of shape m x n, then using f to initialise the elements in x. The input of f is 1-dimensional index of the matrix. You need to explicitly convert it if you need 2D index. The function Owl_utils.ind can help you.

val init_2d : - ('a, 'b) Owl_dense_ndarray_generic.kind -> - int -> - int -> - (int -> int -> 'a) -> - ('a, 'b) t

init_2d m n f s almost the same as init but f receives 2D index as input. It is more convenient since you don't have to convert the index by yourself, but this also means init_2d is slower than init.

val zeros : ('a, 'b) Owl_dense_ndarray_generic.kind -> int -> int -> ('a, 'b) t

zeros m n creates an m by n matrix where all the elements are initialised to zeros.

val ones : ('a, 'b) Owl_dense_ndarray_generic.kind -> int -> int -> ('a, 'b) t

ones m n creates an m by n matrix where all the elements are ones.

val eye : ('a, 'b) Owl_dense_ndarray_generic.kind -> int -> ('a, 'b) t

eye m creates an m by m identity matrix.

val complex : - ('a, 'b) Owl_dense_ndarray_generic.kind -> - ('c, 'd) Owl_dense_ndarray_generic.kind -> - ('a, 'b) t -> - ('a, 'b) t -> - ('c, 'd) t

complex re im constructs a complex ndarray/matrix from re and im. re and im contain the real and imaginary part of x respectively.

Note that both re and im can be complex but must have same type. The real part of re will be the real part of x and the imaginary part of im will be the imaginary part of x.

val polar : - ('a, 'b) Owl_dense_ndarray_generic.kind -> - ('c, 'd) Owl_dense_ndarray_generic.kind -> - ('a, 'b) t -> - ('a, 'b) t -> - ('c, 'd) t

complex rho theta constructs a complex ndarray/matrix from polar coordinates rho and theta. rho contains the magnitudes and theta contains phase angles. Note that the behaviour is undefined if rho has negative elelments or theta has infinity elelments.

val unit_basis : - ('a, 'b) Owl_dense_ndarray_generic.kind -> - int -> - int -> - ('a, 'b) t

unit_basis k n i returns a unit basis vector with ith element set to 1.

val sequential : - ('a, 'b) Owl_dense_ndarray_generic.kind -> - ?a:'a -> - ?step:'a -> - int -> - int -> - ('a, 'b) t

sequential ~a ~step m n creates an m by n matrix. The elements in x are initialised sequentiallly from ~a and is increased by ~step.

The default value of ~a is zero whilst the default value of ~step is one.

val uniform : - ('a, 'b) Owl_dense_ndarray_generic.kind -> - ?a:'a -> - ?b:'a -> - int -> - int -> - ('a, 'b) t

uniform m n creates an m by n matrix where all the elements follow a uniform distribution in (0,1) interval. uniform ~scale:a m n adjusts the interval to (0,a).

val gaussian : - ('a, 'b) Owl_dense_ndarray_generic.kind -> - ?mu:'a -> - ?sigma:'a -> - int -> - int -> - ('a, 'b) t

gaussian m n creates an m by n matrix where all the elements in x follow a Gaussian distribution with specified sigma. By default sigma = 1.

val poisson : - ('a, 'b) Owl_dense_ndarray_generic.kind -> - mu:float -> - int -> - int -> - ('a, 'b) t

poisson m n creates an m by n matrix where all the elements in x follow a Poisson distribution with specified rate mu.

val semidef : - (float, 'b) Owl_dense_ndarray_generic.kind -> - int -> - (float, 'b) t

semidef n returns an random n by n positive semi-definite matrix.

val linspace : - ('a, 'b) Owl_dense_ndarray_generic.kind -> - 'a -> - 'a -> - int -> - ('a, 'b) t

linspace a b n linearly divides the interval [a,b] into n pieces by creating an m by 1 row vector. E.g., linspace 0. 5. 6 will create a row vector [0;1;2;3;4;5].

val logspace : - ('a, 'b) Owl_dense_ndarray_generic.kind -> - ?base:float -> - 'a -> - 'a -> - int -> - ('a, 'b) t

logspace base a b n ... the default value of base is e.

val meshgrid : - ('a, 'b) Owl_dense_ndarray_generic.kind -> - 'a -> - 'a -> - 'a -> - 'a -> - int -> - int -> - ('a, 'b) t * ('a, 'b) t

meshgrid a1 b1 a2 b2 n1 n2 is similar to the meshgrid function in Matlab. It returns two matrices x and y where the row vectors in x are linearly spaced between [a1,b1] by n1 whilst the column vectors in y are linearly spaced between (a2,b2) by n2.

val meshup : ('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t * ('a, 'b) t

meshup x y creates mesh grids by using two row vectors x and y.

val bernoulli : - ('a, 'b) Owl_dense_ndarray_generic.kind -> - ?p:float -> - int -> - int -> - ('a, 'b) t

bernoulli k ~p:0.3 m n

val diagm : ?k:int -> ('a, 'b) t -> ('a, 'b) t

diagm k v creates a diagonal matrix using the elements in v as diagonal values. k specifies the main diagonal index. If k > 0 then it is above the main diagonal, if k < 0 then it is below the main diagonal. This function is the same as the diag function in Matlab.

val triu : ?k:int -> ('a, 'b) t -> ('a, 'b) t

triu k x returns the element on and above the kth diagonal of x. k = 0 is the main diagonal, k > 0 is above the main diagonal, and k < 0 is below the main diagonal.

val tril : ?k:int -> ('a, 'b) t -> ('a, 'b) t

tril k x returns the element on and below the kth diagonal of x. k = 0 is the main diagonal, k > 0 is above the main diagonal, and k < 0 is below the main diagonal.

val symmetric : ?upper:bool -> ('a, 'b) t -> ('a, 'b) t

symmetric ~upper x creates a symmetric matrix using either upper or lower triangular part of x. If upper is true then it uses the upper part, if upper is false, then symmetric uses the lower part. By default upper is true.

val hermitian : - ?upper:bool -> - (Stdlib.Complex.t, 'a) t -> - (Stdlib.Complex.t, 'a) t

hermitian ~upper x creates a hermitian matrix based on x. By default, the upper triangular part is used for creating the hermitian matrix, but you use the lower part by setting upper=false

val bidiagonal : ?upper:bool -> ('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t

bidiagonal upper dv ev creates a bidiagonal matrix using dv and ev. Both dv and ev are row vectors. dv is the main diagonal. If upper is true then ev is superdiagonal; if upper is false then ev is subdiagonal. By default, upper is true.

NOTE: because the diagonal elements in a hermitian matrix must be real, the function set the imaginary part of the diagonal elements to zero by default. In other words, if the diagonal elements of x have non-zero imaginary parts, the imaginary parts will be dropped without a warning.

val toeplitz : ?c:('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t

toeplitz ~c r generates a toeplitz matrix using r and c. Both r and c are row vectors of the same length. If the first elements of c is different from that of r, r's first element will be used.

Note: 1) If c is not passed in, then c = r will be used. 2) If c is not passed in and r is complex, the c = conj r will be used. 3) If r and c have different length, then the result is a rectangular matrix.

val hankel : ?r:('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t

hankel ~r c generates a hankel matrix using r and c. c will be the first column and r will be the last row of the returned matrix.

Note: 1) If only c is passed in, the elelments below the anti-diagnoal are zero. 2) If the last element of c is different from the first element of r then the first element of c prevails. 3) c and r can have different length, the return will be an rectangular matrix.

val hadamard : ('a, 'b) Owl_dense_ndarray_generic.kind -> int -> ('a, 'b) t

hadamard k n constructs a hadamard matrix of order n. For a hadamard H, we have H'*H = n*I. Currently, this function handles only the cases where n, n/12, or n/20 is a power of 2.

val magic : ('a, 'b) Owl_dense_ndarray_generic.kind -> int -> ('a, 'b) t

magic k n constructs a n x n magic square matrix x. The elements in x are consecutive numbers increasing from 1 to n^2. n must n >= 3.

There are three different algorithms to deal with n is odd, singly even, and doubly even respectively.

Obtain basic properties
val shape : ('a, 'b) t -> int * int

If x is an m by n matrix, shape x returns (m,n), i.e., the size of two dimensions of x.

val row_num : ('a, 'b) t -> int

row_num x returns the number of rows in matrix x.

val col_num : ('a, 'b) t -> int

col_num x returns the number of columns in matrix x.

val numel : ('a, 'b) t -> int

numel x returns the number of elements in matrix x. It is equivalent to (row_num x) * (col_num x).

val nnz : ('a, 'b) t -> int

nnz x returns the number of non-zero elements in x.

val density : ('a, 'b) t -> float

density x returns the percentage of non-zero elements in x.

val size_in_bytes : ('a, 'b) t -> int

size_in_bytes x returns the size of x in bytes in memory.

val same_shape : ('a, 'b) t -> ('a, 'b) t -> bool

same_shape x y returns true if two matrics have the same shape.

val same_data : ('a, 'b) t -> ('a, 'b) t -> bool

Refer to :doc:`owl_dense_ndarray_generic`.

val kind : ('a, 'b) t -> ('a, 'b) Owl_dense_ndarray_generic.kind

kind x returns the type of matrix x.

Manipulate a matrix
val get : ('a, 'b) t -> int -> int -> 'a

get x i j returns the value of element (i,j) of x. The shorthand for get x i j is x.{i,j}

val set : ('a, 'b) t -> int -> int -> 'a -> unit

set x i j a sets the element (i,j) of x to value a. The shorthand for set x i j a is x.{i,j} <- a

val get_index : ('a, 'b) t -> int array array -> 'a array

get_index i x returns an array of element values specified by the indices i. The length of array i equals the number of dimensions of x. The arrays in i must have the same length, and each represents the indices in that dimension.

E.g., [| [|1;2|]; [|3;4|] |] returns the value of elements at position (1,3) and (2,4) respectively.

val set_index : ('a, 'b) t -> int array array -> 'a array -> unit

set_index sets the value of elements in x according to the indices specified by i. The length of array i equals the number of dimensions of x. The arrays in i must have the same length, and each represents the indices in that dimension.

val get_fancy : Owl_types.index list -> ('a, 'b) t -> ('a, 'b) t

get_fancy s x returns a copy of the slice in x. The slice is defined by a which is an int array. Please refer to the same function in the Owl_dense_ndarray_generic documentation for more details.

val set_fancy : Owl_types.index list -> ('a, 'b) t -> ('a, 'b) t -> unit

set_fancy axis x y set the slice defined by axis in x according to the values in y. y must have the same shape as the one defined by axis.

About the slice definition of axis, please refer to slice function.

val get_fancy_ext : Owl_types.index array -> ('a, 'b) t -> ('a, 'b) t

This function is used for extended indexing operator since ocaml 4.10.0. The indexing and slicing syntax become much ligher.

val set_fancy_ext : Owl_types.index array -> ('a, 'b) t -> ('a, 'b) t -> unit

This function is used for extended indexing operator since ocaml 4.10.0. The indexing and slicing syntax become much ligher.

val get_slice : int list list -> ('a, 'b) t -> ('a, 'b) t

get_slice axis x aims to provide a simpler version of get_fancy. This function assumes that every list element in the passed in in list list represents a range, i.e., R constructor.

E.g., [[];[0;3];[0]] is equivalent to [R []; R [0;3]; R [0]].

val set_slice : int list list -> ('a, 'b) t -> ('a, 'b) t -> unit

set_slice axis x y aims to provide a simpler version of set_slice. This function assumes that every list element in the passed in in list list represents a range, i.e., R constructor.

E.g., [[];[0;3];[0]] is equivalent to [R []; R [0;3]; R [0]].

val get_slice_ext : int list array -> ('a, 'b) t -> ('a, 'b) t

Please refer to Ndarray document.

val set_slice_ext : int list array -> ('a, 'b) t -> ('a, 'b) t -> unit

Please refer to Ndarray document.

val row : ('a, 'b) t -> int -> ('a, 'b) t

row x i returns row i of x. Note: Unlike col, the return value is simply a view onto the original row in x, so modifying row's value also alters x.

The function supports nagative indices.

val col : ('a, 'b) t -> int -> ('a, 'b) t

col x j returns column j of x. Note: Unlike row, the return value is a copy of the original row in x.

The function supports nagative indices.

val rows : ('a, 'b) t -> int array -> ('a, 'b) t

rows x a returns the rows (defined in an int array a) of x. The returned rows will be combined into a new dense matrix. The order of rows in the new matrix is the same as that in the array a.

The function supports nagative indices.

val cols : ('a, 'b) t -> int array -> ('a, 'b) t

Similar to rows, cols x a returns the columns (specified in array a) of x in a new dense matrix.

The function supports nagative indices.

val resize : ?head:bool -> ('a, 'b) t -> int array -> ('a, 'b) t

resize x s please refer to the Ndarray document.

val reshape : ('a, 'b) t -> int array -> ('a, 'b) t

reshape x s returns a new m by n matrix from the m' by n' matrix x. Note that (m * n) must be equal to (m' * n'), and the returned matrix shares the same memory with the original x.

val flatten : ('a, 'b) t -> ('a, 'b) t

flatten x reshape x into a 1 by n row vector without making a copy. Therefore the returned value shares the same memory space with original x.

val reverse : ('a, 'b) t -> ('a, 'b) t

reverse x reverse the order of all elements in the flattened x and returns the results in a new matrix. The original x remains intact.

val flip : ?axis:int -> ('a, 'b) t -> ('a, 'b) t

flip ~axis x flips a matrix/ndarray along axis. By default axis = 0. The result is returned in a new matrix/ndarray, so the original x remains intact.

val rotate : ('a, 'b) t -> int -> ('a, 'b) t

rotate x d rotates x clockwise d degrees. d must be multiple times of 90, otherwise the function will fail. If x is an n-dimensional array, then the function rotates the plane formed by the first and second dimensions.

val reset : ('a, 'b) t -> unit

reset x resets all the elements of x to zero value.

val fill : ('a, 'b) t -> 'a -> unit

fill x a fills the x with value a.

val copy : ('a, 'b) t -> ('a, 'b) t

copy x returns a copy of matrix x.

val copy_row_to : ('a, 'b) t -> ('a, 'b) t -> int -> unit

copy_row_to v x i copies an 1 by n row vector v to the ith row in an m by n matrix x.

val copy_col_to : ('a, 'b) t -> ('a, 'b) t -> int -> unit

copy_col_to v x j copies an 1 by n column vector v to the jth column in an m by n matrix x.

val concat_vertical : ('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t

concat_vertical x y concats two matrices x and y vertically, therefore their column numbers must be the same.

The associated operator is @=, please refer to :doc:`owl_operator`.

val concat_horizontal : ('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t

concat_horizontal x y concats two matrices x and y horizontally, therefore their row numbers must be the same.

The associated operator is @||, please refer to :doc:`owl_operator`.

val concat_vh : ('a, 'b) t array array -> ('a, 'b) t

concat_vh is used to assemble small parts of matrices into a bigger one. E.g. [| [|a; b; c|]; [|d; e; f|]; [|g; h; i|] |] will be concatenated into a big matrix as follows.

Please refer to :doc:`owl_dense_ndarray_generic`. for details.

val concatenate : ?axis:int -> ('a, 'b) t array -> ('a, 'b) t

concatenate ~axis:1 x concatenates an array of matrices along the second dimension. For the matrices in x, they must have the same shape except the dimension specified by axis. The default value of axis is 0, i.e., the lowest dimension on a marix, i.e., rows.

val split : ?axis:int -> int array -> ('a, 'b) t -> ('a, 'b) t array

split ~axis parts x splits an ndarray x into parts along the specified axis. This function is the inverse operation of concatenate. The elements in x must sum up to the dimension in the specified axis.

val split_vh : (int * int) array array -> ('a, 'b) t -> ('a, 'b) t array array

Please refer to :doc:`owl_dense_ndarray_generic`. for details.

val transpose : ('a, 'b) t -> ('a, 'b) t

transpose x transposes an m by n matrix to n by m one.

val ctranspose : ('a, 'b) t -> ('a, 'b) t

ctranspose x performs conjugate transpose of a complex matrix x. If x is a real matrix, then ctranspose x is equivalent to transpose x.

val diag : ?k:int -> ('a, 'b) t -> ('a, 'b) t

diag k x returns the kth diagonal elements of x. k > 0 means above the main diagonal and k < 0 means the below the main diagonal.

val swap_rows : ('a, 'b) t -> int -> int -> unit

swap_rows x i i' swaps the row i with row i' of x.

val swap_cols : ('a, 'b) t -> int -> int -> unit

swap_cols x j j' swaps the column j with column j' of x.

val tile : ('a, 'b) t -> int array -> ('a, 'b) t

tile x a provides the exact behaviour as numpy.tile function.

val repeat : ('a, 'b) t -> int array -> ('a, 'b) t

repeat x a repeats the elements x according the repetition specified by a.

val pad : ?v:'a -> int list list -> ('a, 'b) t -> ('a, 'b) t

padd ~v:0. [[1;1]] x

val dropout : ?rate:float -> ('a, 'b) t -> ('a, 'b) t

dropout ~rate:0.3 x drops out 30% of the elements in x, in other words, by setting their values to zeros.

val top : ('a, 'b) t -> int -> int array array

top x n returns the indices of n greatest values of x. The indices are arranged according to the corresponding element values, from the greatest one to the smallest one.

val bottom : ('a, 'b) t -> int -> int array array

bottom x n returns the indices of n smallest values of x. The indices are arranged according to the corresponding element values, from the smallest one to the greatest one.

val sort : ('a, 'b) t -> ('a, 'b) t

sort x performs quicksort of the elelments in x. A new copy is returned as result, the original x remains intact. If you want to perform in-place sorting, please use `sort_` instead.

val argsort : ('a, 'b) t -> (int64, Stdlib.Bigarray.int64_elt) t

argsort x returns the indices with which the elements in x are sorted in increasing order. Note that the returned index ndarray has the same shape as that of x, and the indices are 1D indices.

Iteration functions
val iteri : (int -> 'a -> unit) -> ('a, 'b) t -> unit

iteri f x iterates all the elements in x and applies the user defined function f : int -> int -> float -> 'a. f i j v takes three parameters, i and j are the coordinates of current element, and v is its value.

val iter : ('a -> unit) -> ('a, 'b) t -> unit

iter f x is the same as as iteri f x except the coordinates of the current element is not passed to the function f : float -> 'a

val mapi : (int -> 'a -> 'a) -> ('a, 'b) t -> ('a, 'b) t

mapi f x maps each element in x to a new value by applying f : int -> int -> float -> float. The first two parameters are the coordinates of the element, and the third parameter is the value.

val map : ('a -> 'a) -> ('a, 'b) t -> ('a, 'b) t

map f x is similar to mapi f x except the coordinates of the current element is not passed to the function f : float -> float

val foldi : - ?axis:int -> - (int -> 'a -> 'a -> 'a) -> - 'a -> - ('a, 'b) t -> - ('a, 'b) t

foldi ~axis f a x folds (or reduces) the elements in x from left along the specified axis using passed in function f. a is the initial element and in f i acc b acc is the accumulater and b is one of the elements in x along the same axis. Note that i is 1d index of b.

val fold : ?axis:int -> ('a -> 'a -> 'a) -> 'a -> ('a, 'b) t -> ('a, 'b) t

Similar to foldi, except that the index of an element is not passed to f.

val scani : ?axis:int -> (int -> 'a -> 'a -> 'a) -> ('a, 'b) t -> ('a, 'b) t

scan ~axis f x scans the x along the specified axis using passed in function f. f acc a b returns an updated acc which will be passed in the next call to f i acc a. This function can be used to implement accumulative operations such as sum and prod functions. Note that the i is 1d index of a in x.

val scan : ?axis:int -> ('a -> 'a -> 'a) -> ('a, 'b) t -> ('a, 'b) t

Similar to scani, except that the index of an element is not passed to f.

val filteri : (int -> 'a -> bool) -> ('a, 'b) t -> int array

filteri f x uses f : int -> int -> float -> bool to filter out certain elements in x. An element will be included if f returns true. The returned result is a list of coordinates of the selected elements.

val filter : ('a -> bool) -> ('a, 'b) t -> int array

Similar to filteri, but the coordinates of the elements are not passed to the function f : float -> bool.

val iteri_2d : (int -> int -> 'a -> unit) -> ('a, 'b) t -> unit

Similar to `iteri` but 2d indices (i,j) are passed to the user function.

val mapi_2d : (int -> int -> 'a -> 'a) -> ('a, 'b) t -> ('a, 'b) t

Similar to `mapi` but 2d indices (i,j) are passed to the user function.

val foldi_2d : - ?axis:int -> - (int -> int -> 'a -> 'a -> 'a) -> - 'a -> - ('a, 'b) t -> - ('a, 'b) t

Similar to `foldi` but 2d indices (i,j) are passed to the user function.

val scani_2d : - ?axis:int -> - (int -> int -> 'a -> 'a -> 'a) -> - ('a, 'b) t -> - ('a, 'b) t

Similar to `scani` but 2d indices (i,j) are passed to the user function.

val filteri_2d : (int -> int -> 'a -> bool) -> ('a, 'b) t -> (int * int) array

Similar to `filteri` but 2d indices (i,j) are returned.

val iter2i_2d : - (int -> int -> 'a -> 'c -> unit) -> - ('a, 'b) t -> - ('c, 'd) t -> - unit

Similar to `iter2i` but 2d indices (i,j) are passed to the user function.

val map2i_2d : - (int -> int -> 'a -> 'a -> 'a) -> - ('a, 'b) t -> - ('a, 'b) t -> - ('a, 'b) t

Similar to `map2i` but 2d indices (i,j) are passed to the user function.

val iter2i : (int -> 'a -> 'b -> unit) -> ('a, 'c) t -> ('b, 'd) t -> unit

Similar to iteri but applies to two matrices x and y. Both x and y must have the same shape.

val iter2 : ('a -> 'b -> unit) -> ('a, 'c) t -> ('b, 'd) t -> unit

Similar to iter2i, except that the index is not passed to f.

val map2i : (int -> 'a -> 'a -> 'a) -> ('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t

map2i f x y applies f to two elements of the same position in both x and y. Note that 1d index is passed to function f.

val map2 : ('a -> 'a -> 'a) -> ('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t

map2 f x y is similar to map2i f x y except the index is not passed.

val iteri_rows : (int -> ('a, 'b) t -> unit) -> ('a, 'b) t -> unit

iteri_rows f x iterates every row in x and applies function f : int -> mat -> unit to each of them.

val iter_rows : (('a, 'b) t -> unit) -> ('a, 'b) t -> unit

Similar to iteri_rows except row number is not passed to f.

val iter2i_rows : - (int -> ('a, 'b) t -> ('a, 'b) t -> unit) -> - ('a, 'b) t -> - ('a, 'b) t -> - unit

iter2_rows f x y iterates rows of two matrices x and `y.

val iter2_rows : - (('a, 'b) t -> ('a, 'b) t -> unit) -> - ('a, 'b) t -> - ('a, 'b) t -> - unit

Similar to iter2iter2i_rows but without passing in indices.

val iteri_cols : (int -> ('a, 'b) t -> unit) -> ('a, 'b) t -> unit

iteri_cols f x iterates every column in x and applies function f : int -> mat -> unit to each of them. Column number is passed to f as the first parameter.

val iter_cols : (('a, 'b) t -> unit) -> ('a, 'b) t -> unit

Similar to iteri_cols except col number is not passed to f.

val filteri_rows : (int -> ('a, 'b) t -> bool) -> ('a, 'b) t -> int array

filteri_rows f x uses function f : int -> mat -> bool to check each row in x, then returns an int array containing the indices of those rows which satisfy the function f.

val filter_rows : (('a, 'b) t -> bool) -> ('a, 'b) t -> int array

Similar to filteri_rows except that the row indices are not passed to f.

val filteri_cols : (int -> ('a, 'b) t -> bool) -> ('a, 'b) t -> int array

filteri_cols f x uses function f : int -> mat -> bool to check each column in x, then returns an int array containing the indices of those columns which satisfy the function f.

val filter_cols : (('a, 'b) t -> bool) -> ('a, 'b) t -> int array

Similar to filteri_cols except that the column indices are not passed to f.

val fold_rows : ('c -> ('a, 'b) t -> 'c) -> 'c -> ('a, 'b) t -> 'c

fold_rows f a x folds all the rows in x using function f. The order of folding is from the first row to the last one.

val fold_cols : ('c -> ('a, 'b) t -> 'c) -> 'c -> ('a, 'b) t -> 'c

fold_cols f a x folds all the columns in x using function f. The order of folding is from the first column to the last one.

val mapi_rows : (int -> ('a, 'b) t -> 'c) -> ('a, 'b) t -> 'c array

mapi_rows f x maps every row in x to a type 'a value by applying function f : int -> mat -> 'a to each of them. The results is an array of all the returned values.

val map_rows : (('a, 'b) t -> 'c) -> ('a, 'b) t -> 'c array

Similar to mapi_rows except row number is not passed to f.

val mapi_cols : (int -> ('a, 'b) t -> 'c) -> ('a, 'b) t -> 'c array

mapi_cols f x maps every column in x to a type 'a value by applying function f : int -> mat -> 'a.

val map_cols : (('a, 'b) t -> 'c) -> ('a, 'b) t -> 'c array

Similar to mapi_cols except column number is not passed to f.

val mapi_by_row : - int -> - (int -> ('a, 'b) t -> ('a, 'b) t) -> - ('a, 'b) t -> - ('a, 'b) t

mapi_by_row d f x applies f to each row of a m by n matrix x, then uses the returned d dimensional row vectors to assemble a new m by d matrix.

val map_by_row : int -> (('a, 'b) t -> ('a, 'b) t) -> ('a, 'b) t -> ('a, 'b) t

map_by_row d f x is similar to mapi_by_row except that the row indices are not passed to f.

val mapi_by_col : - int -> - (int -> ('a, 'b) t -> ('a, 'b) t) -> - ('a, 'b) t -> - ('a, 'b) t

mapi_by_col d f x applies f to each column of a m by n matrix x, then uses the returned d dimensional column vectors to assemble a new d by n matrix.

val map_by_col : int -> (('a, 'b) t -> ('a, 'b) t) -> ('a, 'b) t -> ('a, 'b) t

map_by_col d f x is similar to mapi_by_col except that the column indices are not passed to f.

val mapi_at_row : (int -> 'a -> 'a) -> ('a, 'b) t -> int -> ('a, 'b) t

mapi_at_row f x i creates a new matrix by applying function f only to the ith row in matrix x.

val map_at_row : ('a -> 'a) -> ('a, 'b) t -> int -> ('a, 'b) t

map_at_row f x i is similar to mapi_at_row except that the coordinates of an element is not passed to f.

val mapi_at_col : (int -> 'a -> 'a) -> ('a, 'b) t -> int -> ('a, 'b) t

mapi_at_col f x j creates a new matrix by applying function f only to the jth column in matrix x.

val map_at_col : ('a -> 'a) -> ('a, 'b) t -> int -> ('a, 'b) t

map_at_col f x i is similar to mapi_at_col except that the coordinates of an element is not passed to f.

Examination & Comparison
val exists : ('a -> bool) -> ('a, 'b) t -> bool

exists f x checks all the elements in x using f. If at least one element satisfies f then the function returns true otherwise false.

val not_exists : ('a -> bool) -> ('a, 'b) t -> bool

not_exists f x checks all the elements in x, the function returns true only if all the elements fail to satisfy f : float -> bool.

val for_all : ('a -> bool) -> ('a, 'b) t -> bool

for_all f x checks all the elements in x, the function returns true if and only if all the elements pass the check of function f.

val is_zero : ('a, 'b) t -> bool

is_zero x returns true if all the elements in x are zeros.

val is_positive : ('a, 'b) t -> bool

is_positive x returns true if all the elements in x are positive.

val is_negative : ('a, 'b) t -> bool

is_negative x returns true if all the elements in x are negative.

val is_nonpositive : ('a, 'b) t -> bool

is_nonpositive returns true if all the elements in x are non-positive.

val is_nonnegative : ('a, 'b) t -> bool

is_nonnegative returns true if all the elements in x are non-negative.

val is_normal : ('a, 'b) t -> bool

is_normal x returns true if all the elelments in x are normal float numbers, i.e., not NaN, not INF, not SUBNORMAL. Please refer to

https://www.gnu.org/software/libc/manual/html_node/Floating-Point-Classes.html https://www.gnu.org/software/libc/manual/html_node/Infinity-and-NaN.html#Infinity-and-NaN

val not_nan : ('a, 'b) t -> bool

not_nan x returns false if there is any NaN element in x. Otherwise, the function returns true indicating all the numbers in x are not NaN.

val not_inf : ('a, 'b) t -> bool

not_inf x returns false if there is any positive or negative INF element in x. Otherwise, the function returns true.

val equal : ('a, 'b) t -> ('a, 'b) t -> bool

equal x y returns true if two matrices x and y are equal.

val not_equal : ('a, 'b) t -> ('a, 'b) t -> bool

not_equal x y returns true if there is at least one element in x is not equal to that in y.

val greater : ('a, 'b) t -> ('a, 'b) t -> bool

greater x y returns true if all the elements in x are greater than the corresponding elements in y.

val less : ('a, 'b) t -> ('a, 'b) t -> bool

less x y returns true if all the elements in x are smaller than the corresponding elements in y.

val greater_equal : ('a, 'b) t -> ('a, 'b) t -> bool

greater_equal x y returns true if all the elements in x are not smaller than the corresponding elements in y.

val less_equal : ('a, 'b) t -> ('a, 'b) t -> bool

less_equal x y returns true if all the elements in x are not greater than the corresponding elements in y.

val elt_equal : ('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t

elt_equal x y performs element-wise = comparison of x and y. Assume that a is from x and b is the corresponding element of a from y of the same position. The function returns another binary (0 and 1) ndarray/matrix wherein 1 indicates a = b.

val elt_not_equal : ('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t

elt_not_equal x y performs element-wise != comparison of x and y. Assume that a is from x and b is the corresponding element of a from y of the same position. The function returns another binary (0 and 1) ndarray/matrix wherein 1 indicates a <> b.

val elt_less : ('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t

elt_less x y performs element-wise < comparison of x and y. Assume that a is from x and b is the corresponding element of a from y of the same position. The function returns another binary (0 and 1) ndarray/matrix wherein 1 indicates a < b.

val elt_greater : ('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t

elt_greater x y performs element-wise > comparison of x and y. Assume that a is from x and b is the corresponding element of a from y of the same position. The function returns another binary (0 and 1) ndarray/matrix wherein 1 indicates a > b.

val elt_less_equal : ('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t

elt_less_equal x y performs element-wise <= comparison of x and y. Assume that a is from x and b is the corresponding element of a from y of the same position. The function returns another binary (0 and 1) ndarray/matrix wherein 1 indicates a <= b.

val elt_greater_equal : ('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t

elt_greater_equal x y performs element-wise >= comparison of x and y. Assume that a is from x and b is the corresponding element of a from y of the same position. The function returns another binary (0 and 1) ndarray/matrix wherein 1 indicates a >= b.

val equal_scalar : ('a, 'b) t -> 'a -> bool

equal_scalar x a checks if all the elements in x are equal to a. The function returns true iff for every element b in x, b = a.

val not_equal_scalar : ('a, 'b) t -> 'a -> bool

not_equal_scalar x a checks if all the elements in x are not equal to a. The function returns true iff for every element b in x, b <> a.

val less_scalar : ('a, 'b) t -> 'a -> bool

less_scalar x a checks if all the elements in x are less than a. The function returns true iff for every element b in x, b < a.

val greater_scalar : ('a, 'b) t -> 'a -> bool

greater_scalar x a checks if all the elements in x are greater than a. The function returns true iff for every element b in x, b > a.

val less_equal_scalar : ('a, 'b) t -> 'a -> bool

less_equal_scalar x a checks if all the elements in x are less or equal to a. The function returns true iff for every element b in x, b <= a.

val greater_equal_scalar : ('a, 'b) t -> 'a -> bool

greater_equal_scalar x a checks if all the elements in x are greater or equal to a. The function returns true iff for every element b in x, b >= a.

val elt_equal_scalar : ('a, 'b) t -> 'a -> ('a, 'b) t

elt_equal_scalar x a performs element-wise = comparison of x and a. Assume that b is one element from x The function returns another binary (0 and 1) ndarray/matrix wherein 1 of the corresponding position indicates a = b, otherwise 0.

val elt_not_equal_scalar : ('a, 'b) t -> 'a -> ('a, 'b) t

elt_not_equal_scalar x a performs element-wise != comparison of x and a. Assume that b is one element from x The function returns another binary (0 and 1) ndarray/matrix wherein 1 of the corresponding position indicates a <> b, otherwise 0.

val elt_less_scalar : ('a, 'b) t -> 'a -> ('a, 'b) t

elt_less_scalar x a performs element-wise < comparison of x and a. Assume that b is one element from x The function returns another binary (0 and 1) ndarray/matrix wherein 1 of the corresponding position indicates a < b, otherwise 0.

val elt_greater_scalar : ('a, 'b) t -> 'a -> ('a, 'b) t

elt_greater_scalar x a performs element-wise > comparison of x and a. Assume that b is one element from x The function returns another binary (0 and 1) ndarray/matrix wherein 1 of the corresponding position indicates a > b, otherwise 0.

val elt_less_equal_scalar : ('a, 'b) t -> 'a -> ('a, 'b) t

elt_less_equal_scalar x a performs element-wise <= comparison of x and a. Assume that b is one element from x The function returns another binary (0 and 1) ndarray/matrix wherein 1 of the corresponding position indicates a <= b, otherwise 0.

val elt_greater_equal_scalar : ('a, 'b) t -> 'a -> ('a, 'b) t

elt_greater_equal_scalar x a performs element-wise >= comparison of x and a. Assume that b is one element from x The function returns another binary (0 and 1) ndarray/matrix wherein 1 of the corresponding position indicates a >= b, otherwise 0.

val approx_equal : ?eps:float -> ('a, 'b) t -> ('a, 'b) t -> bool

approx_equal ~eps x y returns true if x and y are approximately equal, i.e., for any two elements a from x and b from y, we have abs (a - b) < eps.

Note: the threshold check is exclusive for passed in eps.

val approx_equal_scalar : ?eps:float -> ('a, 'b) t -> 'a -> bool

approx_equal_scalar ~eps x a returns true all the elements in x are approximately equal to a, i.e., abs (x - a) < eps. For complex numbers, the eps applies to both real and imaginary part.

Note: the threshold check is exclusive for the passed in eps.

val approx_elt_equal : ?eps:float -> ('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t

approx_elt_equal ~eps x y compares the element-wise equality of x and y, then returns another binary (i.e., 0 and 1) ndarray/matrix wherein 1 indicates that two corresponding elements a from x and b from y are considered as approximately equal, namely abs (a - b) < eps.

val approx_elt_equal_scalar : ?eps:float -> ('a, 'b) t -> 'a -> ('a, 'b) t

approx_elt_equal_scalar ~eps x a compares all the elements of x to a scalar value a, then returns another binary (i.e., 0 and 1) ndarray/matrix wherein 1 indicates that the element b from x is considered as approximately equal to a, namely abs (a - b) < eps.

Randomisation functions
val draw_rows : - ?replacement:bool -> - ('a, 'b) t -> - int -> - ('a, 'b) t * int array

draw_rows x m draws m rows randomly from x. The row indices are also returned in an int array along with the selected rows. The parameter replacement indicates whether the drawing is by replacement or not.

val draw_cols : - ?replacement:bool -> - ('a, 'b) t -> - int -> - ('a, 'b) t * int array

draw_cols x m draws m cols randomly from x. The column indices are also returned in an int array along with the selected columns. The parameter replacement indicates whether the drawing is by replacement or not.

val draw_rows2 : - ?replacement:bool -> - ('a, 'b) t -> - ('a, 'b) t -> - int -> - ('a, 'b) t * ('a, 'b) t * int array

draw_rows2 x y c is similar to draw_rows but applies to two matrices.

val draw_cols2 : - ?replacement:bool -> - ('a, 'b) t -> - ('a, 'b) t -> - int -> - ('a, 'b) t * ('a, 'b) t * int array

draw_col2 x y c is similar to draw_cols but applies to two matrices.

val shuffle_rows : ('a, 'b) t -> ('a, 'b) t

shuffle_rows x shuffles all the rows in matrix x.

val shuffle_cols : ('a, 'b) t -> ('a, 'b) t

shuffle_cols x shuffles all the columns in matrix x.

val shuffle : ('a, 'b) t -> ('a, 'b) t

shuffle x shuffles all the elements in x by first shuffling along the rows then shuffling along columns. It is equivalent to shuffle_cols (shuffle_rows x).

Input/Output functions
val to_array : ('a, 'b) t -> 'a array

to_array x flattens an m by n matrix x then returns x as an float array of length (numel x).

val of_array : - ('a, 'b) Owl_dense_ndarray_generic.kind -> - 'a array -> - int -> - int -> - ('a, 'b) t

of_array x m n converts a float array x into an m by n matrix. Note the length of x must be equal to (m * n).

Similar to reshape function, you can pass in one negative index to let Owl automatically infer its dimension.

val to_arrays : ('a, 'b) t -> 'a array array

to arrays x returns an array of float arrays, wherein each row in x becomes an array in the result.

val of_arrays : - ('a, 'b) Owl_dense_ndarray_generic.kind -> - 'a array array -> - ('a, 'b) t

of_arrays x converts an array of m float arrays (of length n) in to an m by n matrix.

val to_rows : ('a, 'b) t -> ('a, 'b) t array
val of_rows : ('a, 'b) t array -> ('a, 'b) t
val to_cols : ('a, 'b) t -> ('a, 'b) t array
val of_cols : ('a, 'b) t array -> ('a, 'b) t
val print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:('a -> string) -> - ('a, 'b) t -> - unit

print x pretty prints matrix x without headings.

val save : out:string -> ('a, 'b) t -> unit

save x ~out saves the matrix x to a file with the name out. The format is binary by using Marshal module to serialise the matrix.

val load : ('a, 'b) Owl_dense_ndarray_generic.kind -> string -> ('a, 'b) t

load f loads a matrix from file f. The file must be previously saved by using save function.

val save_txt : ?sep:string -> ?append:bool -> out:string -> ('a, 'b) t -> unit

save_txt ~sep ~append ~out x saves the matrix x into a text file out delimited by the specified string sep (default: tab). If append is false (it is by default), an existing file will be truncated and overwritten. If append is true and the file exists, new rows will be appended to it. Files are created, if necessary, with the AND of 0o644 and the user's umask value. Note that the operation can be very time consuming.

val load_txt : - ?sep:string -> - ('a, 'b) Owl_dense_ndarray_generic.kind -> - string -> - ('a, 'b) t

load_txt ~sep k f load a text file f into a matrix of type k. The delimitor is specified by sep which can be a regular expression.

val save_npy : out:string -> ('a, 'b) t -> unit

save_npy ~out x saves the matrix x into a npy file out. This function is implemented using npy-ocaml https://github.com/LaurentMazare/npy-ocaml.

val load_npy : ('a, 'b) Owl_dense_ndarray_generic.kind -> string -> ('a, 'b) t

load_npy file load a npy file into a matrix of type k. If the matrix is in the file is not of type k, fails with [file]: incorrect format. This function is implemented using npy-ocaml https://github.com/LaurentMazare/npy-ocaml.

Unary math operators
val re_c2s : - (Stdlib.Complex.t, Stdlib.Bigarray.complex32_elt) t -> - (float, Stdlib.Bigarray.float32_elt) t

re_c2s x returns all the real components of x in a new ndarray of same shape.

val re_z2d : - (Stdlib.Complex.t, Stdlib.Bigarray.complex64_elt) t -> - (float, Stdlib.Bigarray.float64_elt) t

re_d2z x returns all the real components of x in a new ndarray of same shape.

val im_c2s : - (Stdlib.Complex.t, Stdlib.Bigarray.complex32_elt) t -> - (float, Stdlib.Bigarray.float32_elt) t

im_c2s x returns all the imaginary components of x in a new ndarray of same shape.

val im_z2d : - (Stdlib.Complex.t, Stdlib.Bigarray.complex64_elt) t -> - (float, Stdlib.Bigarray.float64_elt) t

im_d2z x returns all the imaginary components of x in a new ndarray of same shape.

val min : ?axis:int -> ?keep_dims:bool -> ('a, 'b) t -> ('a, 'b) t

min x returns the minimum of all elements in x along specified axis. If no axis is specified, x will be flattened and the minimum of all the elements will be returned. For two complex numbers, the one with the smaller magnitude will be selected. If two magnitudes are the same, the one with the smaller phase will be selected.

val min' : ('a, 'b) t -> 'a

min' x is similar to min but returns the minimum of all elements in x in scalar value.

val max : ?axis:int -> ?keep_dims:bool -> ('a, 'b) t -> ('a, 'b) t

max x returns the maximum of all elements in x along specified axis. If no axis is specified, x will be flattened and the maximum of all the elements will be returned. For two complex numbers, the one with the greater magnitude will be selected. If two magnitudes are the same, the one with the greater phase will be selected.

val max' : ('a, 'b) t -> 'a

max' x is similar to max but returns the maximum of all elements in x in scalar value.

val minmax : - ?axis:int -> - ?keep_dims:bool -> - ('a, 'b) t -> - ('a, 'b) t * ('a, 'b) t

minmax' x returns (min_v, max_v), min_v is the minimum value in x while max_v is the maximum.

val minmax' : ('a, 'b) t -> 'a * 'a

minmax' x returns (min_v, max_v), min_v is the minimum value in x while max_v is the maximum.

val min_i : ('a, 'b) t -> 'a * int array

min_i x returns the minimum of all elements in x as well as its index.

val max_i : ('a, 'b) t -> 'a * int array

max_i x returns the maximum of all elements in x as well as its index.

val minmax_i : ('a, 'b) t -> ('a * int array) * ('a * int array)

minmax_i x returns ((min_v,min_i), (max_v,max_i)) where (min_v,min_i) is the minimum value in x along with its index while (max_v,max_i) is the maximum value along its index.

val trace : ('a, 'b) t -> 'a

trace x returns the sum of diagonal elements in x.

val sum : ?axis:int -> ?keep_dims:bool -> ('a, 'b) t -> ('a, 'b) t

sum_ axis x sums the elements in x along specified axis.

val sum' : ('a, 'b) t -> 'a

sum x returns the summation of all the elements in x.

val prod : ?axis:int -> ?keep_dims:bool -> ('a, 'b) t -> ('a, 'b) t

prod_ axis x multiplies the elements in x along specified axis.

val prod' : ('a, 'b) t -> 'a

prod x returns the product of all the elements in x.

val mean : ?axis:int -> ?keep_dims:bool -> ('a, 'b) t -> ('a, 'b) t

mean ~axis x calculates the mean along specified axis.

val mean' : ('a, 'b) t -> 'a

mean' x calculates the mean of all the elements in x.

val var : ?axis:int -> ?keep_dims:bool -> ('a, 'b) t -> ('a, 'b) t

var ~axis x calculates the variance along specified axis.

val var' : ('a, 'b) t -> 'a

var' x calculates the variance of all the elements in x.

val std : ?axis:int -> ?keep_dims:bool -> ('a, 'b) t -> ('a, 'b) t

std ~axis calculates the standard deviation along specified axis.

val std' : ('a, 'b) t -> 'a

std' x calculates the standard deviation of all the elements in x.

val sem : ?axis:int -> ?keep_dims:bool -> ('a, 'b) t -> ('a, 'b) t

sem ~axis calculates the standard deviation along specified axis.

val sem' : ('a, 'b) t -> 'a

sem' x calculates the standard deviation of all the elements in x.

val sum_rows : ?keep_dims:bool -> ('a, 'b) t -> ('a, 'b) t

sum_rows x returns the summation of all the row vectors in x.

val sum_cols : ?keep_dims:bool -> ('a, 'b) t -> ('a, 'b) t

sum_cols returns the summation of all the column vectors in x.

val mean_rows : ?keep_dims:bool -> ('a, 'b) t -> ('a, 'b) t

mean_rows x returns the mean value of all row vectors in x. It is equivalent to div_scalar (sum_rows x) (float_of_int (row_num x)).

val mean_cols : ?keep_dims:bool -> ('a, 'b) t -> ('a, 'b) t

mean_cols x returns the mean value of all column vectors in x. It is equivalent to div_scalar (sum_cols x) (float_of_int (col_num x)).

val min_rows : (float, 'b) t -> (float * int * int) array

min_rows x returns the minimum value in each row along with their coordinates.

val min_cols : (float, 'b) t -> (float * int * int) array

min_cols x returns the minimum value in each column along with their coordinates.

val max_rows : (float, 'b) t -> (float * int * int) array

max_rows x returns the maximum value in each row along with their coordinates.

val max_cols : (float, 'b) t -> (float * int * int) array

max_cols x returns the maximum value in each column along with their coordinates.

val abs : ('a, 'b) t -> ('a, 'b) t

abs x returns the absolute value of all elements in x in a new matrix.

val abs_c2s : - (Stdlib.Complex.t, Stdlib.Bigarray.complex32_elt) t -> - (float, Stdlib.Bigarray.float32_elt) t

abs_c2s x is similar to abs but takes complex32 as input.

val abs_z2d : - (Stdlib.Complex.t, Stdlib.Bigarray.complex64_elt) t -> - (float, Stdlib.Bigarray.float64_elt) t

abs_z2d x is similar to abs but takes complex64 as input.

val abs2 : ('a, 'b) t -> ('a, 'b) t

abs2 x returns the square of absolute value of all elements in x in a new ndarray.

val abs2_c2s : - (Stdlib.Complex.t, Stdlib.Bigarray.complex32_elt) t -> - (float, Stdlib.Bigarray.float32_elt) t

abs2_c2s x is similar to abs2 but takes complex32 as input.

val abs2_z2d : - (Stdlib.Complex.t, Stdlib.Bigarray.complex64_elt) t -> - (float, Stdlib.Bigarray.float64_elt) t

abs2_z2d x is similar to abs2 but takes complex64 as input.

val conj : ('a, 'b) t -> ('a, 'b) t

conj x computes the conjugate of the elements in x and returns the result in a new matrix. If the passed in x is a real matrix, the function simply returns a copy of the original x.

val neg : ('a, 'b) t -> ('a, 'b) t

neg x negates the elements in x and returns the result in a new matrix.

val reci : ('a, 'b) t -> ('a, 'b) t

reci x computes the reciprocal of every elements in x and returns the result in a new ndarray.

val reci_tol : ?tol:'a -> ('a, 'b) t -> ('a, 'b) t

reci_tol ~tol x computes the reciprocal of every element in x. Different from reci, reci_tol sets the elements whose abs value smaller than tol to zeros. If tol is not specified, the default Owl_utils.eps Float32 will be used. For complex numbers, refer to Owl's doc to see how to compare.

val signum : (float, 'a) t -> (float, 'a) t

signum computes the sign value (-1 for negative numbers, 0 (or -0) for zero, 1 for positive numbers, nan for nan).

val sqr : ('a, 'b) t -> ('a, 'b) t

sqr x computes the square of the elements in x and returns the result in a new matrix.

val sqrt : ('a, 'b) t -> ('a, 'b) t

sqrt x computes the square root of the elements in x and returns the result in a new matrix.

val cbrt : ('a, 'b) t -> ('a, 'b) t

cbrt x computes the cubic root of the elements in x and returns the result in a new matrix.

val exp : ('a, 'b) t -> ('a, 'b) t

exp x computes the exponential of the elements in x and returns the result in a new matrix.

val exp2 : ('a, 'b) t -> ('a, 'b) t

exp2 x computes the base-2 exponential of the elements in x and returns the result in a new matrix.

val exp10 : ('a, 'b) t -> ('a, 'b) t

exp2 x computes the base-10 exponential of the elements in x and returns the result in a new matrix.

val expm1 : ('a, 'b) t -> ('a, 'b) t

expm1 x computes exp x -. 1. of the elements in x and returns the result in a new matrix.

val log : ('a, 'b) t -> ('a, 'b) t

log x computes the logarithm of the elements in x and returns the result in a new matrix.

val log10 : ('a, 'b) t -> ('a, 'b) t

log10 x computes the base-10 logarithm of the elements in x and returns the result in a new matrix.

val log2 : ('a, 'b) t -> ('a, 'b) t

log2 x computes the base-2 logarithm of the elements in x and returns the result in a new matrix.

val log1p : ('a, 'b) t -> ('a, 'b) t

log1p x computes log (1 + x) of the elements in x and returns the result in a new matrix.

val sin : ('a, 'b) t -> ('a, 'b) t

sin x computes the sine of the elements in x and returns the result in a new matrix.

val cos : ('a, 'b) t -> ('a, 'b) t

cos x computes the cosine of the elements in x and returns the result in a new matrix.

val tan : ('a, 'b) t -> ('a, 'b) t

tan x computes the tangent of the elements in x and returns the result in a new matrix.

val asin : ('a, 'b) t -> ('a, 'b) t

asin x computes the arc sine of the elements in x and returns the result in a new matrix.

val acos : ('a, 'b) t -> ('a, 'b) t

acos x computes the arc cosine of the elements in x and returns the result in a new matrix.

val atan : ('a, 'b) t -> ('a, 'b) t

atan x computes the arc tangent of the elements in x and returns the result in a new matrix.

val sinh : ('a, 'b) t -> ('a, 'b) t

sinh x computes the hyperbolic sine of the elements in x and returns the result in a new matrix.

val cosh : ('a, 'b) t -> ('a, 'b) t

cosh x computes the hyperbolic cosine of the elements in x and returns the result in a new matrix.

val tanh : ('a, 'b) t -> ('a, 'b) t

tanh x computes the hyperbolic tangent of the elements in x and returns the result in a new matrix.

val asinh : ('a, 'b) t -> ('a, 'b) t

asinh x computes the hyperbolic arc sine of the elements in x and returns the result in a new matrix.

val acosh : ('a, 'b) t -> ('a, 'b) t

acosh x computes the hyperbolic arc cosine of the elements in x and returns the result in a new matrix.

val atanh : ('a, 'b) t -> ('a, 'b) t

atanh x computes the hyperbolic arc tangent of the elements in x and returns the result in a new matrix.

val floor : ('a, 'b) t -> ('a, 'b) t

floor x computes the floor of the elements in x and returns the result in a new matrix.

val ceil : ('a, 'b) t -> ('a, 'b) t

ceil x computes the ceiling of the elements in x and returns the result in a new matrix.

val round : ('a, 'b) t -> ('a, 'b) t

round x rounds the elements in x and returns the result in a new matrix.

val trunc : ('a, 'b) t -> ('a, 'b) t

trunc x computes the truncation of the elements in x and returns the result in a new matrix.

val fix : ('a, 'b) t -> ('a, 'b) t

fix x rounds each element of x to the nearest integer toward zero. For positive elements, the behavior is the same as floor. For negative ones, the behavior is the same as ceil.

val modf : ('a, 'b) t -> ('a, 'b) t * ('a, 'b) t

modf x performs modf over all the elements in x, the fractal part is saved in the first element of the returned tuple whereas the integer part is saved in the second element.

val erf : (float, 'a) t -> (float, 'a) t

erf x computes the error function of the elements in x and returns the result in a new matrix.

val erfc : (float, 'a) t -> (float, 'a) t

erfc x computes the complementary error function of the elements in x and returns the result in a new matrix.

val logistic : (float, 'a) t -> (float, 'a) t

logistic x computes the logistic function 1/(1 + exp(-a) of the elements in x and returns the result in a new matrix.

val relu : (float, 'a) t -> (float, 'a) t

relu x computes the rectified linear unit function max(x, 0) of the elements in x and returns the result in a new matrix.

val elu : ?alpha:float -> (float, 'a) t -> (float, 'a) t

refer to Owl_dense_ndarray_generic.elu

val leaky_relu : ?alpha:float -> (float, 'a) t -> (float, 'a) t

refer to Owl_dense_ndarray_generic.leaky_relu

val softplus : (float, 'a) t -> (float, 'a) t

softplus x computes the softplus function log(1 + exp(x) of the elements in x and returns the result in a new matrix.

val softsign : (float, 'a) t -> (float, 'a) t

softsign x computes the softsign function x / (1 + abs(x)) of the elements in x and returns the result in a new matrix.

val softmax : ?axis:int -> (float, 'a) t -> (float, 'a) t

softmax x computes the softmax functions (exp x) / (sum (exp x)) of all the elements along the specified axis in x and returns the result in a new ndarray.

val sigmoid : (float, 'a) t -> (float, 'a) t

sigmoid x computes the sigmoid function 1 / (1 + exp (-x)) for each element in x.

val log_sum_exp' : (float, 'a) t -> float

log_sum_exp x computes the logarithm of the sum of exponentials of all the elements in x.

val log_sum_exp : - ?axis:int -> - ?keep_dims:bool -> - (float, 'a) t -> - (float, 'a) t

log_sum_exp ~axis x computes the logarithm of the sum of exponentials of all the elements in x along axis axis.

val l1norm : ?axis:int -> ?keep_dims:bool -> ('a, 'b) t -> ('a, 'b) t

l1norm x calculates the l1-norm of of x along specified axis.

val l1norm' : ('a, 'b) t -> 'a

l1norm x calculates the l1-norm of all the element in x.

val l2norm : ?axis:int -> ?keep_dims:bool -> ('a, 'b) t -> ('a, 'b) t

l2norm x calculates the l2-norm of of x along specified axis.

val l2norm' : ('a, 'b) t -> 'a

l2norm x calculates the l2-norm of all the element in x.

val l2norm_sqr : ?axis:int -> ?keep_dims:bool -> ('a, 'b) t -> ('a, 'b) t

l2norm x calculates the square l2-norm of of x along specified axis.

val l2norm_sqr' : ('a, 'b) t -> 'a

l2norm_sqr x calculates the square of l2-norm (or l2norm, Euclidean norm) of all elements in x. The function uses conjugate transpose in the product, hence it always returns a float number.

val vecnorm : - ?axis:int -> - ?p:float -> - ?keep_dims:bool -> - ('a, 'b) t -> - ('a, 'b) t

Refer to :doc:`owl_dense_ndarray_generic`.

val vecnorm' : ?p:float -> ('a, 'b) t -> 'a

Refer to :doc:`owl_dense_ndarray_generic`.

val max_pool : - ?padding:Owl_types.padding -> - (float, 'a) t -> - int array -> - int array -> - (float, 'a) t

Refer to :doc:`owl_dense_ndarray_generic`.

val avg_pool : - ?padding:Owl_types.padding -> - (float, 'a) t -> - int array -> - int array -> - (float, 'a) t

Refer to :doc:`owl_dense_ndarray_generic`.

val cumsum : ?axis:int -> ('a, 'b) t -> ('a, 'b) t

cumsum ~axis x, refer to the documentation in Owl_dense_ndarray_generic.

val cumprod : ?axis:int -> ('a, 'b) t -> ('a, 'b) t

cumprod ~axis x, refer to the documentation in Owl_dense_ndarray_generic.

val cummin : ?axis:int -> ('a, 'b) t -> ('a, 'b) t

cummin ~axis x : performs cumulative min along axis dimension.

val cummax : ?axis:int -> ('a, 'b) t -> ('a, 'b) t

cummax ~axis x : performs cumulative max along axis dimension.

val diff : ?axis:int -> ?n:int -> ('a, 'b) t -> ('a, 'b) t

diff ~axis ~n x calculates the n-th difference of x along the specified axis.

Parameters: * axis: axis to calculate the difference. The default value is the highest dimension. * n: how many times to calculate the difference. The default value is 1.

Return: * The difference ndarray y. Note the shape of y 1 less than that of x along specified axis.

val angle : (Stdlib.Complex.t, 'a) t -> (Stdlib.Complex.t, 'a) t

angle x calculates the phase angle of all complex numbers in x.

val proj : (Stdlib.Complex.t, 'a) t -> (Stdlib.Complex.t, 'a) t

proj x computes the projection on Riemann sphere of all elelments in x.

val mat2gray : ?amin:'a -> ?amax:'a -> ('a, 'b) t -> ('a, 'b) t

mat2gray ~amin ~amax x converts the matrix x to the intensity image. The elements in x are clipped by amin and amax, and they will be between 0. and 1. after conversion to represents the intensity.

val lgamma : ('a, 'b) t -> ('a, 'b) t

lgamma x computes the loggamma of the elements in x and returns the result in a new matrix.

val dawsn : ('a, 'b) t -> ('a, 'b) t

dawsn x computes the Dawson function of the elements in x and returns the result in a new matrix.

val i0 : ('a, 'b) t -> ('a, 'b) t

i0 x computes the modified Bessel function of order 0 of the elements in x and returns the result in a new ndarray.

val i0e : ('a, 'b) t -> ('a, 'b) t

i0e x computes the exponentially scaled modified Bessel function of order 0 of the elements in x and returns the result in a new ndarray.

val i1 : ('a, 'b) t -> ('a, 'b) t

i1 x computes the modified Bessel function of order 1 of the elements in x and returns the result in a new ndarray.

val i1e : ('a, 'b) t -> ('a, 'b) t

i1e x computes the exponentially scaled modified Bessel function of order 1 of the elements in x and returns the result in a new ndarray.

val iv : v:('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t

iv v x computes modified Bessel function of x of real order v

val scalar_iv : v:'a -> ('a, 'b) t -> ('a, 'b) t

scalar_iv v x computes the modified Bessel function of x of real order v.

val iv_scalar : v:('a, 'b) t -> 'a -> ('a, 'b) t

iv_scalar v x computes modified Bessel function of x of real order v

val j0 : ('a, 'b) t -> ('a, 'b) t

j0 x computes the Bessel function of order 0 of the elements in x and returns the result in a new ndarray.

val j1 : ('a, 'b) t -> ('a, 'b) t

j1 x computes the Bessel function of order 1 of the elements in x and returns the result in a new ndarray.

val jv : v:('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t

jv v x computes Bessel function the first kind of x of real order v

val scalar_jv : v:'a -> ('a, 'b) t -> ('a, 'b) t

scalar_jv v x computes the Bessel function of the first kind of x of real order v.

val jv_scalar : v:('a, 'b) t -> 'a -> ('a, 'b) t

jv_scalar v x computes Bessel function of the first kind of x of real order v

Binary math operators
val add : ('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t

add x y adds all the elements in x and y elementwise, and returns the result in a new matrix.

val sub : ('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t

sub x y subtracts all the elements in x and y elementwise, and returns the result in a new matrix.

val mul : ('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t

mul x y multiplies all the elements in x and y elementwise, and returns the result in a new matrix.

val div : ('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t

div x y divides all the elements in x and y elementwise, and returns the result in a new matrix.

val add_scalar : ('a, 'b) t -> 'a -> ('a, 'b) t

add_scalar x a adds a scalar value a to each element in x, and returns the result in a new matrix.

val sub_scalar : ('a, 'b) t -> 'a -> ('a, 'b) t

sub_scalar x a subtracts a scalar value a from each element in x, and returns the result in a new matrix.

val mul_scalar : ('a, 'b) t -> 'a -> ('a, 'b) t

mul_scalar x a multiplies each element in x by a scalar value a, and returns the result in a new matrix.

val div_scalar : ('a, 'b) t -> 'a -> ('a, 'b) t

div_scalar x a divides each element in x by a scalar value a, and returns the result in a new matrix.

val scalar_add : 'a -> ('a, 'b) t -> ('a, 'b) t

scalar_add a x adds a scalar value a to each element in x, and returns the result in a new matrix.

val scalar_sub : 'a -> ('a, 'b) t -> ('a, 'b) t

scalar_sub a x subtracts each element in x from a scalar value a, and returns the result in a new matrix.

val scalar_mul : 'a -> ('a, 'b) t -> ('a, 'b) t

scalar_mul a x multiplies each element in x by a scalar value a, and returns the result in a new matrix.

val scalar_div : 'a -> ('a, 'b) t -> ('a, 'b) t

scalar_div a x divides a scalar value a by each element in x, and returns the result in a new matrix.

val dot : ('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t

dot x y returns the matrix product of matrix x and y.

val add_diag : ('a, 'b) t -> 'a -> ('a, 'b) t

add_diag x a adds a to the diagonal elements in x. A new copy of the data is returned.

val pow : ('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t

pow x y computes pow(a, b) of all the elements in x and y elementwise, and returns the result in a new matrix.

val scalar_pow : 'a -> ('a, 'b) t -> ('a, 'b) t

scalar_pow a x

val pow_scalar : ('a, 'b) t -> 'a -> ('a, 'b) t

pow_scalar x a

val atan2 : (float, 'a) t -> (float, 'a) t -> (float, 'a) t

atan2 x y computes atan2(a, b) of all the elements in x and y elementwise, and returns the result in a new matrix.

val scalar_atan2 : float -> (float, 'a) t -> (float, 'a) t

scalar_atan2 a x

val atan2_scalar : (float, 'a) t -> float -> (float, 'a) t

scalar_atan2 x a

val hypot : (float, 'a) t -> (float, 'a) t -> (float, 'a) t

hypot x y computes sqrt(x*x + y*y) of all the elements in x and y elementwise, and returns the result in a new matrix.

val min2 : ('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t

min2 x y computes the minimum of all the elements in x and y elementwise, and returns the result in a new matrix.

val max2 : ('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t

max2 x y computes the maximum of all the elements in x and y elementwise, and returns the result in a new matrix.

val fmod : (float, 'a) t -> (float, 'a) t -> (float, 'a) t

fmod x y performs float modulus division.

val fmod_scalar : (float, 'a) t -> float -> (float, 'a) t

fmod_scalar x a performs mod division between x and scalar a.

val scalar_fmod : float -> (float, 'a) t -> (float, 'a) t

scalar_fmod x a performs mod division between scalar a and x.

val ssqr' : ('a, 'b) t -> 'a -> 'a

ssqr x a computes the sum of squared differences of all the elements in x from constant a. This function only computes the square of each element rather than the conjugate transpose as sqr_nrm2 does.

val ssqr_diff' : ('a, 'b) t -> ('a, 'b) t -> 'a

ssqr_diff x y computes the sum of squared differences of every elements in x and its corresponding element in y.

val cross_entropy' : (float, 'a) t -> (float, 'a) t -> float

cross_entropy x y calculates the cross entropy between x and y using base e.

val clip_by_value : ?amin:'a -> ?amax:'a -> ('a, 'b) t -> ('a, 'b) t

clip_by_value ~amin ~amax x clips the elements in x based on amin and amax. The elements smaller than amin will be set to amin, and the elements greater than amax will be set to amax.

val clip_by_l2norm : float -> (float, 'a) t -> (float, 'a) t

clip_by_l2norm t x clips the x according to the threshold set by t.

val cov : ?b:('a, 'b) t -> a:('a, 'b) t -> ('a, 'b) t

cov ~a calculates the covariance matrix of a wherein each row represents one observation and each column represents one random variable. a is normalised by the number of observations-1. If there is only one observation, it is normalised by 1.

cov ~a ~b takes two matrices as inputs. The functions flatten a and b first then returns a 2 x 2 matrix, so two must have the same number of elements.

val kron : ('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t

kron a b calculates the Kronecker product between the matrices a and b. If a is an m x n matrix and b is a p x q matrix, then kron(a,b) is an m*p x n*q matrix formed by taking all possible products between the elements of a and the matrix b.

val fma : ('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t

fma x y z calculates the `fused multiply add`, i.e. (x * y) + z.

Cast functions
val cast : ('a, 'b) Owl_dense_ndarray_generic.kind -> ('c, 'd) t -> ('a, 'b) t

cast kind x casts x of type ('c, 'd) t to type ('a, 'b) t specify by the passed in kind parameter. This function is a generalisation of the other type casting functions such as cast_s2d, cast_c2z, and etc.

val cast_s2d : - (float, Stdlib.Bigarray.float32_elt) t -> - (float, Stdlib.Bigarray.float64_elt) t

cast_s2d x casts x from float32 to float64.

val cast_d2s : - (float, Stdlib.Bigarray.float64_elt) t -> - (float, Stdlib.Bigarray.float32_elt) t

cast_d2s x casts x from float64 to float32.

val cast_c2z : - (Stdlib.Complex.t, Stdlib.Bigarray.complex32_elt) t -> - (Stdlib.Complex.t, Stdlib.Bigarray.complex64_elt) t

cast_c2z x casts x from complex32 to complex64.

val cast_z2c : - (Stdlib.Complex.t, Stdlib.Bigarray.complex64_elt) t -> - (Stdlib.Complex.t, Stdlib.Bigarray.complex32_elt) t

cast_z2c x casts x from complex64 to complex32.

val cast_s2c : - (float, Stdlib.Bigarray.float32_elt) t -> - (Stdlib.Complex.t, Stdlib.Bigarray.complex32_elt) t

cast_s2c x casts x from float32 to complex32.

val cast_d2z : - (float, Stdlib.Bigarray.float64_elt) t -> - (Stdlib.Complex.t, Stdlib.Bigarray.complex64_elt) t

cast_d2z x casts x from float64 to complex64.

val cast_s2z : - (float, Stdlib.Bigarray.float32_elt) t -> - (Stdlib.Complex.t, Stdlib.Bigarray.complex64_elt) t

cast_s2z x casts x from float32 to complex64.

val cast_d2c : - (float, Stdlib.Bigarray.float64_elt) t -> - (Stdlib.Complex.t, Stdlib.Bigarray.complex32_elt) t

cast_d2c x casts x from float64 to complex32.

In-place modification
val create_ : out:('a, 'b) t -> 'a -> unit

TODO

val uniform_ : ?a:'a -> ?b:'a -> out:('a, 'b) t -> unit

TODO

val bernoulli_ : ?p:float -> out:('a, 'b) t -> unit

TODO

val zeros_ : out:('a, 'b) t -> unit

TODO

val ones_ : out:('a, 'b) t -> unit

TODO

val one_hot_ : out:('a, 'b) t -> int -> ('a, 'b) t -> unit

TODO

val sort_ : ('a, 'b) t -> unit

sort_ x performs in-place quicksort of the elelments in x.

val copy_ : out:('a, 'b) t -> ('a, 'b) t -> unit

copy_ ~out src copies the data from ndarray src to destination out.

val reshape_ : out:('a, 'b) t -> ('a, 'b) t -> unit

TODO

val transpose_ : out:('a, 'b) t -> ?axis:int array -> ('a, 'b) t -> unit

transpose_ ~out x is similar to transpose x but the output is written to out.

val sum_ : out:('a, 'b) t -> axis:int -> ('a, 'b) t -> unit

TODO

val min_ : out:('a, 'b) t -> axis:int -> ('a, 'b) t -> unit

TODO

val max_ : out:('a, 'b) t -> axis:int -> ('a, 'b) t -> unit

TODO

val add_ : ?out:('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t -> unit

add_ x y is similar to add function but the output is written to out. You need to make sure out is big enough to hold the output result.

val sub_ : ?out:('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t -> unit

sub_ x y is similar to sub function but the output is written to out. You need to make sure out is big enough to hold the output result.

val mul_ : ?out:('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t -> unit

mul_ x y is similar to mul function but the output is written to out. You need to make sure out is big enough to hold the output result.

val div_ : ?out:('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t -> unit

div_ x y is similar to div function but the output is written to out. You need to make sure out is big enough to hold the output result.

val pow_ : ?out:('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t -> unit

pow_ x y is similar to pow function but the output is written to out. You need to make sure out is big enough to hold the output result.

val atan2_ : ?out:('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t -> unit

atan2_ x y is similar to atan2 function but the output is written to out. You need to make sure out is big enough to hold the output result.

val hypot_ : ?out:('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t -> unit

hypot_ x y is similar to hypot function but the output is written to out. You need to make sure out is big enough to hold the output result.

val fmod_ : ?out:('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t -> unit

fmod_ x y is similar to fmod function but the output is written to out. You need to make sure out is big enough to hold the output result.

val min2_ : ?out:('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t -> unit

min2_ x y is similar to min2 function but the output is written to out. You need to make sure out is big enough to hold the output result.

val max2_ : ?out:('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t -> unit

max2_ x y is similar to max2 function but the output is written to out. You need to make sure out is big enough to hold the output result.

val add_scalar_ : ?out:('a, 'b) t -> ('a, 'b) t -> 'a -> unit

add_scalar_ x y is similar to add_scalar function but the output is written to x.

val sub_scalar_ : ?out:('a, 'b) t -> ('a, 'b) t -> 'a -> unit

sub_scalar_ x y is similar to sub_scalar function but the output is written to x.

val mul_scalar_ : ?out:('a, 'b) t -> ('a, 'b) t -> 'a -> unit

mul_scalar_ x y is similar to mul_scalar function but the output is written to x.

val div_scalar_ : ?out:('a, 'b) t -> ('a, 'b) t -> 'a -> unit

div_scalar_ x y is similar to div_scalar function but the output is written to x.

val pow_scalar_ : ?out:('a, 'b) t -> ('a, 'b) t -> 'a -> unit

pow_scalar_ x y is similar to pow_scalar function but the output is written to x.

val atan2_scalar_ : ?out:('a, 'b) t -> ('a, 'b) t -> 'a -> unit

atan2_scalar_ x y is similar to atan2_scalar function but the output is written to x.

val fmod_scalar_ : ?out:('a, 'b) t -> ('a, 'b) t -> 'a -> unit

fmod_scalar_ x y is similar to fmod_scalar function but the output is written to x.

val scalar_add_ : ?out:('a, 'b) t -> 'a -> ('a, 'b) t -> unit

scalar_add_ a x is similar to scalar_add function but the output is written to x.

val scalar_sub_ : ?out:('a, 'b) t -> 'a -> ('a, 'b) t -> unit

scalar_sub_ a x is similar to scalar_sub function but the output is written to x.

val scalar_mul_ : ?out:('a, 'b) t -> 'a -> ('a, 'b) t -> unit

scalar_mul_ a x is similar to scalar_mul function but the output is written to x.

val scalar_div_ : ?out:('a, 'b) t -> 'a -> ('a, 'b) t -> unit

scalar_div_ a x is similar to scalar_div function but the output is written to x.

val scalar_pow_ : ?out:('a, 'b) t -> 'a -> ('a, 'b) t -> unit

scalar_pow_ a x is similar to scalar_pow function but the output is written to x.

val scalar_atan2_ : ?out:('a, 'b) t -> 'a -> ('a, 'b) t -> unit

scalar_atan2_ a x is similar to scalar_atan2 function but the output is written to x.

val scalar_fmod_ : ?out:('a, 'b) t -> 'a -> ('a, 'b) t -> unit

scalar_fmod_ a x is similar to scalar_fmod function but the output is written to x.

val fma_ : ?out:('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t -> unit

fma_ ~out x y z is similar to fma x y z function but the output is written to out.

val dot_ : - ?transa:bool -> - ?transb:bool -> - ?alpha:'a -> - ?beta:'a -> - c:('a, 'b) t -> - ('a, 'b) t -> - ('a, 'b) t -> - unit

Refer to :doc:`owl_dense_matrix_generic`

val conj_ : ?out:('a, 'b) t -> ('a, 'b) t -> unit

conj_ x is similar to conj but output is written to x

val abs_ : ?out:('a, 'b) t -> ('a, 'b) t -> unit

abs_ x is similar to abs but output is written to x

val neg_ : ?out:('a, 'b) t -> ('a, 'b) t -> unit

neg_ x is similar to neg but output is written to x

val reci_ : ?out:('a, 'b) t -> ('a, 'b) t -> unit

reci_ x is similar to reci but output is written to x

val signum_ : ?out:('a, 'b) t -> ('a, 'b) t -> unit

signum_ x is similar to signum but output is written to x

val sqr_ : ?out:('a, 'b) t -> ('a, 'b) t -> unit

sqr_ x is similar to sqr but output is written to x

val sqrt_ : ?out:('a, 'b) t -> ('a, 'b) t -> unit

sqrt_ x is similar to sqrt but output is written to x

val cbrt_ : ?out:('a, 'b) t -> ('a, 'b) t -> unit

cbrt_ x is similar to cbrt but output is written to x

val exp_ : ?out:('a, 'b) t -> ('a, 'b) t -> unit

exp_ x is similar to exp_ but output is written to x

val exp2_ : ?out:('a, 'b) t -> ('a, 'b) t -> unit

exp2_ x is similar to exp2 but output is written to x

val exp10_ : ?out:('a, 'b) t -> ('a, 'b) t -> unit

exp2_ x is similar to exp2 but output is written to x

val expm1_ : ?out:('a, 'b) t -> ('a, 'b) t -> unit

expm1_ x is similar to expm1 but output is written to x

val log_ : ?out:('a, 'b) t -> ('a, 'b) t -> unit

log_ x is similar to log but output is written to x

val log2_ : ?out:('a, 'b) t -> ('a, 'b) t -> unit

log2_ x is similar to log2 but output is written to x

val log10_ : ?out:('a, 'b) t -> ('a, 'b) t -> unit

log10_ x is similar to log10 but output is written to x

val log1p_ : ?out:('a, 'b) t -> ('a, 'b) t -> unit

log1p_ x is similar to log1p but output is written to x

val sin_ : ?out:('a, 'b) t -> ('a, 'b) t -> unit

sin_ x is similar to sin but output is written to x

val cos_ : ?out:('a, 'b) t -> ('a, 'b) t -> unit

cos_ x is similar to cos but output is written to x

val tan_ : ?out:('a, 'b) t -> ('a, 'b) t -> unit

tan_ x is similar to tan but output is written to x

val asin_ : ?out:('a, 'b) t -> ('a, 'b) t -> unit

asin_ x is similar to asin but output is written to x

val acos_ : ?out:('a, 'b) t -> ('a, 'b) t -> unit

acos_ x is similar to acos but output is written to x

val atan_ : ?out:('a, 'b) t -> ('a, 'b) t -> unit

atan_ x is similar to atan but output is written to x

val sinh_ : ?out:('a, 'b) t -> ('a, 'b) t -> unit

sinh_ x is similar to sinh but output is written to x

val cosh_ : ?out:('a, 'b) t -> ('a, 'b) t -> unit

cosh_ x is similar to cosh but output is written to x

val tanh_ : ?out:('a, 'b) t -> ('a, 'b) t -> unit

tanh_ x is similar to tanh but output is written to x

val asinh_ : ?out:('a, 'b) t -> ('a, 'b) t -> unit

asinh_ x is similar to asinh but output is written to x

val acosh_ : ?out:('a, 'b) t -> ('a, 'b) t -> unit

acosh_ x is similar to acosh but output is written to x

val atanh_ : ?out:('a, 'b) t -> ('a, 'b) t -> unit

atanh_ x is similar to atanh but output is written to x

val floor_ : ?out:('a, 'b) t -> ('a, 'b) t -> unit

floor_ x is similar to floor but output is written to x

val ceil_ : ?out:('a, 'b) t -> ('a, 'b) t -> unit

ceil_ x is similar to ceil but output is written to x

val round_ : ?out:('a, 'b) t -> ('a, 'b) t -> unit

round_ x is similar to round but output is written to x

val trunc_ : ?out:('a, 'b) t -> ('a, 'b) t -> unit

trunc_ x is similar to trunc but output is written to x

val fix_ : ?out:('a, 'b) t -> ('a, 'b) t -> unit

fix_ x is similar to fix but output is written to x

val erf_ : ?out:('a, 'b) t -> ('a, 'b) t -> unit

erf_ x is similar to erf but output is written to x

val erfc_ : ?out:('a, 'b) t -> ('a, 'b) t -> unit

erfc_ x is similar to erfc but output is written to x

val relu_ : ?out:('a, 'b) t -> ('a, 'b) t -> unit

relu_ x is similar to relu but output is written to x

val softplus_ : ?out:('a, 'b) t -> ('a, 'b) t -> unit

softplus_ x is similar to softplus but output is written to x

val softsign_ : ?out:('a, 'b) t -> ('a, 'b) t -> unit

softsign_ x is similar to softsign but output is written to x

val sigmoid_ : ?out:('a, 'b) t -> ('a, 'b) t -> unit

sigmoid_ x is similar to sigmoid but output is written to x

val softmax_ : ?out:('a, 'b) t -> ?axis:int -> ('a, 'b) t -> unit

softmax_ x is similar to softmax but output is written to x

val cumsum_ : ?out:('a, 'b) t -> ?axis:int -> ('a, 'b) t -> unit

cumsum_ x is similar to cumsum but output is written to x

val cumprod_ : ?out:('a, 'b) t -> ?axis:int -> ('a, 'b) t -> unit

cumprod_ x is similar to cumprod but output is written to x

val cummin_ : ?out:('a, 'b) t -> ?axis:int -> ('a, 'b) t -> unit

cummin_ x is similar to cummin but output is written to x

val cummax_ : ?out:('a, 'b) t -> ?axis:int -> ('a, 'b) t -> unit

cummax_ x is similar to cummax but output is written to x

val dropout_ : ?out:('a, 'b) t -> ?rate:float -> ('a, 'b) t -> unit

dropout_ x is similar to dropout but output is written to x

val elt_equal_ : ?out:('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t -> unit

elt_equal_ x y is similar to elt_equal function but the output is written to out. You need to make sure out is big enough to hold the output result.

val elt_not_equal_ : ?out:('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t -> unit

elt_not_equal_ x y is similar to elt_not_equal function but the output is written to out. You need to make sure out is big enough to hold the output result.

val elt_less_ : ?out:('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t -> unit

elt_less_ x y is similar to elt_less function but the output is written to out. You need to make sure out is big enough to hold the output result.

val elt_greater_ : ?out:('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t -> unit

elt_greater_ x y is similar to elt_greater function but the output is written to out. You need to make sure out is big enough to hold the output result.

val elt_less_equal_ : ?out:('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t -> unit

elt_less_equal_ x y is similar to elt_less_equal function but the output is written to out. You need to make sure out is big enough to hold the output result.

val elt_greater_equal_ : ?out:('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t -> unit

elt_greater_equal_ x y is similar to elt_greater_equal function but the output is written to out. You need to make sure out is big enough to hold the output result.

val elt_equal_scalar_ : ?out:('a, 'b) t -> ('a, 'b) t -> 'a -> unit

elt_equal_scalar_ x a is similar to elt_equal_scalar function but the output is written to x.

val elt_not_equal_scalar_ : ?out:('a, 'b) t -> ('a, 'b) t -> 'a -> unit

elt_not_equal_scalar_ x a is similar to elt_not_equal_scalar function but the output is written to x.

val elt_less_scalar_ : ?out:('a, 'b) t -> ('a, 'b) t -> 'a -> unit

elt_less_scalar_ x a is similar to elt_less_scalar function but the output is written to x.

val elt_greater_scalar_ : ?out:('a, 'b) t -> ('a, 'b) t -> 'a -> unit

elt_greater_scalar_ x a is similar to elt_greater_scalar function but the output is written to x.

val elt_less_equal_scalar_ : ?out:('a, 'b) t -> ('a, 'b) t -> 'a -> unit

elt_less_equal_scalar_ x a is similar to elt_less_equal_scalar function but the output is written to x.

val elt_greater_equal_scalar_ : ?out:('a, 'b) t -> ('a, 'b) t -> 'a -> unit

elt_greater_equal_scalar_ x a is similar to elt_greater_equal_scalar function but the output is written to x.

include module type of struct include Operator end
include sig ... end
val (+$) : - ('a, 'b) Owl_dense_matrix_generic.t -> - 'a -> - ('a, 'b) Owl_dense_matrix_generic.t
val (-$) : - ('a, 'b) Owl_dense_matrix_generic.t -> - 'a -> - ('a, 'b) Owl_dense_matrix_generic.t
val (*$) : - ('a, 'b) Owl_dense_matrix_generic.t -> - 'a -> - ('a, 'b) Owl_dense_matrix_generic.t
val (/$) : - ('a, 'b) Owl_dense_matrix_generic.t -> - 'a -> - ('a, 'b) Owl_dense_matrix_generic.t
val ($+) : - 'a -> - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t
val ($-) : - 'a -> - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t
val ($*) : - 'a -> - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t
val ($/) : - 'a -> - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t
val (=) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - bool
val (!=) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - bool
val (<>) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - bool
val (>) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - bool
val (<) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - bool
val (>=) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - bool
val (<=) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - bool
include sig ... end
val (=$) : ('a, 'b) Owl_dense_matrix_generic.t -> 'a -> bool
val (!=$) : ('a, 'b) Owl_dense_matrix_generic.t -> 'a -> bool
val (<>$) : ('a, 'b) Owl_dense_matrix_generic.t -> 'a -> bool
val (<$) : ('a, 'b) Owl_dense_matrix_generic.t -> 'a -> bool
val (>$) : ('a, 'b) Owl_dense_matrix_generic.t -> 'a -> bool
val (<=$) : ('a, 'b) Owl_dense_matrix_generic.t -> 'a -> bool
val (>=$) : ('a, 'b) Owl_dense_matrix_generic.t -> 'a -> bool
val (=.) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t
val (!=.) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t
val (<>.) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t
val (<.) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t
val (>.) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t
val (<=.) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t
val (>=.) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t
val (=.$) : - ('a, 'b) Owl_dense_matrix_generic.t -> - 'a -> - ('a, 'b) Owl_dense_matrix_generic.t
val (!=.$) : - ('a, 'b) Owl_dense_matrix_generic.t -> - 'a -> - ('a, 'b) Owl_dense_matrix_generic.t
val (<>.$) : - ('a, 'b) Owl_dense_matrix_generic.t -> - 'a -> - ('a, 'b) Owl_dense_matrix_generic.t
val (<.$) : - ('a, 'b) Owl_dense_matrix_generic.t -> - 'a -> - ('a, 'b) Owl_dense_matrix_generic.t
val (>.$) : - ('a, 'b) Owl_dense_matrix_generic.t -> - 'a -> - ('a, 'b) Owl_dense_matrix_generic.t
val (<=.$) : - ('a, 'b) Owl_dense_matrix_generic.t -> - 'a -> - ('a, 'b) Owl_dense_matrix_generic.t
val (>=.$) : - ('a, 'b) Owl_dense_matrix_generic.t -> - 'a -> - ('a, 'b) Owl_dense_matrix_generic.t
val (=~) : - ?eps:float -> - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - bool
val (=~$) : ?eps:float -> ('a, 'b) Owl_dense_matrix_generic.t -> 'a -> bool
val (=~.) : - ?eps:float -> - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t
val (=~.$) : - ?eps:float -> - ('a, 'b) Owl_dense_matrix_generic.t -> - 'a -> - ('a, 'b) Owl_dense_matrix_generic.t
val (%) : - (float, 'a) Owl_dense_matrix_generic.t -> - (float, 'a) Owl_dense_matrix_generic.t -> - (float, 'a) Owl_dense_matrix_generic.t
val (%$) : - (float, 'a) Owl_dense_matrix_generic.t -> - float -> - (float, 'a) Owl_dense_matrix_generic.t
val (**) : - (float, 'a) Owl_dense_matrix_generic.t -> - (float, 'a) Owl_dense_matrix_generic.t -> - (float, 'a) Owl_dense_matrix_generic.t
val ($**) : - float -> - (float, 'a) Owl_dense_matrix_generic.t -> - (float, 'a) Owl_dense_matrix_generic.t
val (**$) : - (float, 'a) Owl_dense_matrix_generic.t -> - float -> - (float, 'a) Owl_dense_matrix_generic.t
val (+=) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - unit
val (-=) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - unit
val (*=) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - unit
val (/=) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - unit
val (+$=) : ('a, 'b) Owl_dense_matrix_generic.t -> 'a -> unit
val (-$=) : ('a, 'b) Owl_dense_matrix_generic.t -> 'a -> unit
val (*$=) : ('a, 'b) Owl_dense_matrix_generic.t -> 'a -> unit
val (/$=) : ('a, 'b) Owl_dense_matrix_generic.t -> 'a -> unit
val (@=) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t
val (@||) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t
val (.!{;..}) : - ('a, 'b) Owl_dense_matrix_generic.t -> - Owl_types.index array -> - ('a, 'b) Owl_dense_matrix_generic.t
val (.!{;..}<-) : - ('a, 'b) Owl_dense_matrix_generic.t -> - Owl_types.index array -> - ('a, 'b) Owl_dense_matrix_generic.t -> - unit
val (.${}) : - ('a, 'b) Owl_dense_matrix_generic.t -> - int list -> - ('a, 'b) Owl_dense_matrix_generic.t
val (.${;..}) : - ('a, 'b) Owl_dense_matrix_generic.t -> - int list array -> - ('a, 'b) Owl_dense_matrix_generic.t
val (.${}<-) : - ('a, 'b) Owl_dense_matrix_generic.t -> - int list -> - ('a, 'b) Owl_dense_matrix_generic.t -> - unit
val (.${;..}<-) : - ('a, 'b) Owl_dense_matrix_generic.t -> - int list array -> - ('a, 'b) Owl_dense_matrix_generic.t -> - unit
include sig ... end
val (*@) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t
val (.%{}) : ('a, 'b) Owl_dense_matrix_generic.t -> (int * int) -> 'a
val (.%{;..}) : ('a, 'b) Owl_dense_matrix_generic.t -> int array -> 'a
val (.%{}<-) : ('a, 'b) Owl_dense_matrix_generic.t -> (int * int) -> 'a -> unit
val (.%{;..}<-) : - ('a, 'b) Owl_dense_matrix_generic.t -> - int array -> - 'a -> - unit
include sig ... end
val (**@) : - ('a, 'b) Owl_linalg_generic.t -> - float -> - ('a, 'b) Owl_linalg_generic.t
val (/@) : - ('a, 'b) Owl_linalg_generic.t -> - ('a, 'b) Owl_linalg_generic.t -> - ('a, 'b) Owl_linalg_generic.t
val inv : ('a, 'b) Owl_linalg_generic.t -> ('a, 'b) Owl_linalg_generic.t
val mpow : - ('a, 'b) Owl_linalg_generic.t -> - float -> - ('a, 'b) Owl_linalg_generic.t
\ No newline at end of file diff --git a/owl/Owl_dense_matrix/Operator/index.html b/owl/Owl_dense_matrix/Operator/index.html deleted file mode 100644 index b4256131c..000000000 --- a/owl/Owl_dense_matrix/Operator/index.html +++ /dev/null @@ -1,180 +0,0 @@ - -Operator (owl.Owl_dense_matrix.Operator)

Module Owl_dense_matrix.Operator

include sig ... end
val (+$) : - ('a, 'b) Owl_dense_matrix_generic.t -> - 'a -> - ('a, 'b) Owl_dense_matrix_generic.t
val (-$) : - ('a, 'b) Owl_dense_matrix_generic.t -> - 'a -> - ('a, 'b) Owl_dense_matrix_generic.t
val (*$) : - ('a, 'b) Owl_dense_matrix_generic.t -> - 'a -> - ('a, 'b) Owl_dense_matrix_generic.t
val (/$) : - ('a, 'b) Owl_dense_matrix_generic.t -> - 'a -> - ('a, 'b) Owl_dense_matrix_generic.t
val ($+) : - 'a -> - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t
val ($-) : - 'a -> - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t
val ($*) : - 'a -> - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t
val ($/) : - 'a -> - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t
val (=) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - bool
val (!=) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - bool
val (<>) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - bool
val (>) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - bool
val (<) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - bool
val (>=) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - bool
val (<=) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - bool
include sig ... end
val (=$) : ('a, 'b) Owl_dense_matrix_generic.t -> 'a -> bool
val (!=$) : ('a, 'b) Owl_dense_matrix_generic.t -> 'a -> bool
val (<>$) : ('a, 'b) Owl_dense_matrix_generic.t -> 'a -> bool
val (<$) : ('a, 'b) Owl_dense_matrix_generic.t -> 'a -> bool
val (>$) : ('a, 'b) Owl_dense_matrix_generic.t -> 'a -> bool
val (<=$) : ('a, 'b) Owl_dense_matrix_generic.t -> 'a -> bool
val (>=$) : ('a, 'b) Owl_dense_matrix_generic.t -> 'a -> bool
val (=.) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t
val (!=.) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t
val (<>.) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t
val (<.) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t
val (>.) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t
val (<=.) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t
val (>=.) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t
val (=.$) : - ('a, 'b) Owl_dense_matrix_generic.t -> - 'a -> - ('a, 'b) Owl_dense_matrix_generic.t
val (!=.$) : - ('a, 'b) Owl_dense_matrix_generic.t -> - 'a -> - ('a, 'b) Owl_dense_matrix_generic.t
val (<>.$) : - ('a, 'b) Owl_dense_matrix_generic.t -> - 'a -> - ('a, 'b) Owl_dense_matrix_generic.t
val (<.$) : - ('a, 'b) Owl_dense_matrix_generic.t -> - 'a -> - ('a, 'b) Owl_dense_matrix_generic.t
val (>.$) : - ('a, 'b) Owl_dense_matrix_generic.t -> - 'a -> - ('a, 'b) Owl_dense_matrix_generic.t
val (<=.$) : - ('a, 'b) Owl_dense_matrix_generic.t -> - 'a -> - ('a, 'b) Owl_dense_matrix_generic.t
val (>=.$) : - ('a, 'b) Owl_dense_matrix_generic.t -> - 'a -> - ('a, 'b) Owl_dense_matrix_generic.t
val (=~) : - ?eps:float -> - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - bool
val (=~$) : ?eps:float -> ('a, 'b) Owl_dense_matrix_generic.t -> 'a -> bool
val (=~.) : - ?eps:float -> - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t
val (=~.$) : - ?eps:float -> - ('a, 'b) Owl_dense_matrix_generic.t -> - 'a -> - ('a, 'b) Owl_dense_matrix_generic.t
val (%) : - (float, 'a) Owl_dense_matrix_generic.t -> - (float, 'a) Owl_dense_matrix_generic.t -> - (float, 'a) Owl_dense_matrix_generic.t
val (%$) : - (float, 'a) Owl_dense_matrix_generic.t -> - float -> - (float, 'a) Owl_dense_matrix_generic.t
val (**) : - (float, 'a) Owl_dense_matrix_generic.t -> - (float, 'a) Owl_dense_matrix_generic.t -> - (float, 'a) Owl_dense_matrix_generic.t
val ($**) : - float -> - (float, 'a) Owl_dense_matrix_generic.t -> - (float, 'a) Owl_dense_matrix_generic.t
val (**$) : - (float, 'a) Owl_dense_matrix_generic.t -> - float -> - (float, 'a) Owl_dense_matrix_generic.t
val (+=) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - unit
val (-=) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - unit
val (*=) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - unit
val (/=) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - unit
val (+$=) : ('a, 'b) Owl_dense_matrix_generic.t -> 'a -> unit
val (-$=) : ('a, 'b) Owl_dense_matrix_generic.t -> 'a -> unit
val (*$=) : ('a, 'b) Owl_dense_matrix_generic.t -> 'a -> unit
val (/$=) : ('a, 'b) Owl_dense_matrix_generic.t -> 'a -> unit
val (@=) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t
val (@||) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t
val (.!{;..}) : - ('a, 'b) Owl_dense_matrix_generic.t -> - Owl_types.index array -> - ('a, 'b) Owl_dense_matrix_generic.t
val (.!{;..}<-) : - ('a, 'b) Owl_dense_matrix_generic.t -> - Owl_types.index array -> - ('a, 'b) Owl_dense_matrix_generic.t -> - unit
val (.${}) : - ('a, 'b) Owl_dense_matrix_generic.t -> - int list -> - ('a, 'b) Owl_dense_matrix_generic.t
val (.${;..}) : - ('a, 'b) Owl_dense_matrix_generic.t -> - int list array -> - ('a, 'b) Owl_dense_matrix_generic.t
val (.${}<-) : - ('a, 'b) Owl_dense_matrix_generic.t -> - int list -> - ('a, 'b) Owl_dense_matrix_generic.t -> - unit
val (.${;..}<-) : - ('a, 'b) Owl_dense_matrix_generic.t -> - int list array -> - ('a, 'b) Owl_dense_matrix_generic.t -> - unit
include sig ... end
val (*@) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t
val (.%{}) : ('a, 'b) Owl_dense_matrix_generic.t -> (int * int) -> 'a
val (.%{;..}) : ('a, 'b) Owl_dense_matrix_generic.t -> int array -> 'a
val (.%{}<-) : ('a, 'b) Owl_dense_matrix_generic.t -> (int * int) -> 'a -> unit
val (.%{;..}<-) : - ('a, 'b) Owl_dense_matrix_generic.t -> - int array -> - 'a -> - unit
include sig ... end
val (**@) : - ('a, 'b) Owl_linalg_generic.t -> - float -> - ('a, 'b) Owl_linalg_generic.t
val (/@) : - ('a, 'b) Owl_linalg_generic.t -> - ('a, 'b) Owl_linalg_generic.t -> - ('a, 'b) Owl_linalg_generic.t
\ No newline at end of file diff --git a/owl/Owl_dense_matrix/S/index.html b/owl/Owl_dense_matrix/S/index.html deleted file mode 100644 index 71991630c..000000000 --- a/owl/Owl_dense_matrix/S/index.html +++ /dev/null @@ -1,225 +0,0 @@ - -S (owl.Owl_dense_matrix.S)

Module Owl_dense_matrix.S

include module type of struct include Owl_dense_matrix_s end
type elt = float
type mat = (float, Stdlib.Bigarray.float32_elt) Owl_dense_matrix_generic.t
include Owl_dense_matrix_intf.Common with type elt := elt and type mat := mat
Create dense matrices
val empty : int -> int -> mat
val create : int -> int -> elt -> mat
val init : int -> int -> (int -> elt) -> mat
val init_2d : int -> int -> (int -> int -> elt) -> mat
val zeros : int -> int -> mat
val ones : int -> int -> mat
val eye : int -> mat
val sequential : ?a:elt -> ?step:elt -> int -> int -> mat
val uniform : ?a:elt -> ?b:elt -> int -> int -> mat
val gaussian : ?mu:elt -> ?sigma:elt -> int -> int -> mat
val bernoulli : ?p:float -> int -> int -> mat
val unit_basis : int -> int -> mat
val diagm : ?k:int -> mat -> mat
val triu : ?k:int -> mat -> mat
val tril : ?k:int -> mat -> mat
val symmetric : ?upper:bool -> mat -> mat
val bidiagonal : ?upper:bool -> mat -> mat -> mat
val toeplitz : ?c:mat -> mat -> mat
val hankel : ?r:mat -> mat -> mat
val hadamard : int -> mat
val magic : int -> mat
Dense row vectors and meshgrids
val vector : int -> mat
val vector_zeros : int -> mat
val vector_ones : int -> mat
val vector_uniform : int -> mat
val linspace : elt -> elt -> int -> mat
val logspace : ?base:float -> elt -> elt -> int -> mat
val meshgrid : elt -> elt -> elt -> elt -> int -> int -> mat * mat
val meshup : mat -> mat -> mat * mat
Obtain the basic properties of a matrix
val shape : mat -> int * int
val row_num : mat -> int
val col_num : mat -> int
val numel : mat -> int
val nnz : mat -> int
val density : mat -> float
val size_in_bytes : mat -> int
val same_shape : mat -> mat -> bool
val same_data : mat -> mat -> bool
Manipulate a matrix
val get : mat -> int -> int -> elt
val set : mat -> int -> int -> elt -> unit
val get_index : mat -> int array array -> elt array
val set_index : mat -> int array array -> elt array -> unit
val get_fancy : Owl_types.index list -> mat -> mat
val set_fancy : Owl_types.index list -> mat -> mat -> unit
val get_slice : int list list -> mat -> mat
val set_slice : int list list -> mat -> mat -> unit
val row : mat -> int -> mat
val col : mat -> int -> mat
val rows : mat -> int array -> mat
val cols : mat -> int array -> mat
val resize : ?head:bool -> mat -> int array -> mat
val reshape : mat -> int array -> mat
val flatten : mat -> mat
val reverse : mat -> mat
val flip : ?axis:int -> mat -> mat
val rotate : mat -> int -> mat
val reset : mat -> unit
val fill : mat -> elt -> unit
val copy : mat -> mat
val copy_row_to : mat -> mat -> int -> unit
val copy_col_to : mat -> mat -> int -> unit
val concat_vertical : mat -> mat -> mat
val concat_horizontal : mat -> mat -> mat
val concat_vh : mat array array -> mat
val concatenate : ?axis:int -> mat array -> mat
val split : ?axis:int -> int array -> mat -> mat array
val split_vh : (int * int) array array -> mat -> mat array array
val transpose : mat -> mat
val ctranspose : mat -> mat
val swap_rows : mat -> int -> int -> unit
val swap_cols : mat -> int -> int -> unit
val tile : mat -> int array -> mat
val repeat : mat -> int array -> mat
val pad : ?v:elt -> int list list -> mat -> mat
val dropout : ?rate:float -> mat -> mat
val top : mat -> int -> int array array
val bottom : mat -> int -> int array array
val sort : mat -> mat
val argsort : - mat -> - (int64, Stdlib.Bigarray.int64_elt, Stdlib.Bigarray.c_layout) - Stdlib.Bigarray.Genarray.t
Iterate elements, columns, and rows.
val iteri : (int -> elt -> unit) -> mat -> unit
val iter : (elt -> unit) -> mat -> unit
val mapi : (int -> elt -> elt) -> mat -> mat
val map : (elt -> elt) -> mat -> mat
val foldi : ?axis:int -> (int -> elt -> elt -> elt) -> elt -> mat -> mat
val fold : ?axis:int -> (elt -> elt -> elt) -> elt -> mat -> mat
val scani : ?axis:int -> (int -> elt -> elt -> elt) -> mat -> mat
val scan : ?axis:int -> (elt -> elt -> elt) -> mat -> mat
val filteri : (int -> elt -> bool) -> mat -> int array
val filter : (elt -> bool) -> mat -> int array
val iteri_2d : (int -> int -> elt -> unit) -> mat -> unit
val mapi_2d : (int -> int -> elt -> elt) -> mat -> mat
val foldi_2d : - ?axis:int -> - (int -> int -> elt -> elt -> elt) -> - elt -> - mat -> - mat
val scani_2d : ?axis:int -> (int -> int -> elt -> elt -> elt) -> mat -> mat
val filteri_2d : (int -> int -> elt -> bool) -> mat -> (int * int) array
val iter2i_2d : (int -> int -> elt -> elt -> unit) -> mat -> mat -> unit
val map2i_2d : (int -> int -> elt -> elt -> elt) -> mat -> mat -> mat
val iter2i : (int -> elt -> elt -> unit) -> mat -> mat -> unit
val iter2 : (elt -> elt -> unit) -> mat -> mat -> unit
val map2i : (int -> elt -> elt -> elt) -> mat -> mat -> mat
val map2 : (elt -> elt -> elt) -> mat -> mat -> mat
val iteri_rows : (int -> mat -> unit) -> mat -> unit
val iter_rows : (mat -> unit) -> mat -> unit
val iter2i_rows : (int -> mat -> mat -> unit) -> mat -> mat -> unit
val iter2_rows : (mat -> mat -> unit) -> mat -> mat -> unit
val iteri_cols : (int -> mat -> unit) -> mat -> unit
val iter_cols : (mat -> unit) -> mat -> unit
val filteri_rows : (int -> mat -> bool) -> mat -> int array
val filter_rows : (mat -> bool) -> mat -> int array
val filteri_cols : (int -> mat -> bool) -> mat -> int array
val filter_cols : (mat -> bool) -> mat -> int array
val fold_rows : ('a -> mat -> 'a) -> 'a -> mat -> 'a
val fold_cols : ('a -> mat -> 'a) -> 'a -> mat -> 'a
val mapi_rows : (int -> mat -> 'a) -> mat -> 'a array
val map_rows : (mat -> 'a) -> mat -> 'a array
val mapi_cols : (int -> mat -> 'a) -> mat -> 'a array
val map_cols : (mat -> 'a) -> mat -> 'a array
val mapi_by_row : int -> (int -> mat -> mat) -> mat -> mat
val map_by_row : int -> (mat -> mat) -> mat -> mat
val mapi_by_col : int -> (int -> mat -> mat) -> mat -> mat
val map_by_col : int -> (mat -> mat) -> mat -> mat
val mapi_at_row : (int -> elt -> elt) -> mat -> int -> mat
val map_at_row : (elt -> elt) -> mat -> int -> mat
val mapi_at_col : (int -> elt -> elt) -> mat -> int -> mat
val map_at_col : (elt -> elt) -> mat -> int -> mat
Examine elements and compare two matrices
val exists : (elt -> bool) -> mat -> bool
val not_exists : (elt -> bool) -> mat -> bool
val for_all : (elt -> bool) -> mat -> bool
val is_zero : mat -> bool
val is_positive : mat -> bool
val is_negative : mat -> bool
val is_nonpositive : mat -> bool
val is_nonnegative : mat -> bool
val is_normal : mat -> bool
val not_nan : mat -> bool
val not_inf : mat -> bool
val equal : mat -> mat -> bool
val not_equal : mat -> mat -> bool
val greater : mat -> mat -> bool
val less : mat -> mat -> bool
val greater_equal : mat -> mat -> bool
val less_equal : mat -> mat -> bool
val elt_equal : mat -> mat -> mat
val elt_not_equal : mat -> mat -> mat
val elt_less : mat -> mat -> mat
val elt_greater : mat -> mat -> mat
val elt_less_equal : mat -> mat -> mat
val elt_greater_equal : mat -> mat -> mat
val equal_scalar : mat -> elt -> bool
val not_equal_scalar : mat -> elt -> bool
val less_scalar : mat -> elt -> bool
val greater_scalar : mat -> elt -> bool
val less_equal_scalar : mat -> elt -> bool
val greater_equal_scalar : mat -> elt -> bool
val elt_equal_scalar : mat -> elt -> mat
val elt_not_equal_scalar : mat -> elt -> mat
val elt_less_scalar : mat -> elt -> mat
val elt_greater_scalar : mat -> elt -> mat
val elt_less_equal_scalar : mat -> elt -> mat
val elt_greater_equal_scalar : mat -> elt -> mat
val approx_equal : ?eps:float -> mat -> mat -> bool
val approx_equal_scalar : ?eps:float -> mat -> elt -> bool
val approx_elt_equal : ?eps:float -> mat -> mat -> mat
val approx_elt_equal_scalar : ?eps:float -> mat -> elt -> mat
Randomisation functions
val draw_rows : ?replacement:bool -> mat -> int -> mat * int array
val draw_cols : ?replacement:bool -> mat -> int -> mat * int array
val draw_rows2 : - ?replacement:bool -> - mat -> - mat -> - int -> - mat * mat * int array
val draw_cols2 : - ?replacement:bool -> - mat -> - mat -> - int -> - mat * mat * int array
val shuffle_rows : mat -> mat
val shuffle_cols : mat -> mat
val shuffle : mat -> mat
Input/Output and helper functions
val to_array : mat -> elt array
val of_array : elt array -> int -> int -> mat
val to_arrays : mat -> elt array array
val of_arrays : elt array array -> mat
val to_rows : mat -> mat array
val of_rows : mat array -> mat
val to_cols : mat -> mat array
val of_cols : mat array -> mat
val print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(elt -> string) -> - mat -> - unit
val save : out:string -> mat -> unit
val load : string -> mat
val save_txt : ?sep:string -> ?append:bool -> out:string -> mat -> unit
val load_txt : ?sep:string -> string -> mat
val save_npy : out:string -> mat -> unit
val load_npy : string -> mat
Unary mathematical operations
val min : ?axis:int -> ?keep_dims:bool -> mat -> mat
val min' : mat -> elt
val max : ?axis:int -> ?keep_dims:bool -> mat -> mat
val max' : mat -> elt
val minmax : ?axis:int -> ?keep_dims:bool -> mat -> mat * mat
val minmax' : mat -> elt * elt
val min_i : mat -> elt * int array
val max_i : mat -> elt * int array
val minmax_i : mat -> (elt * int array) * (elt * int array)
val trace : mat -> elt
val sum : ?axis:int -> ?keep_dims:bool -> mat -> mat
val sum' : mat -> elt
val prod : ?axis:int -> ?keep_dims:bool -> mat -> mat
val prod' : mat -> elt
val mean : ?axis:int -> ?keep_dims:bool -> mat -> mat
val mean' : mat -> elt
val var' : mat -> elt
val std' : mat -> elt
val sem : ?axis:int -> ?keep_dims:bool -> mat -> mat
val sem' : mat -> elt
val sum_rows : ?keep_dims:bool -> mat -> mat
val sum_cols : ?keep_dims:bool -> mat -> mat
val mean_rows : ?keep_dims:bool -> mat -> mat
val mean_cols : ?keep_dims:bool -> mat -> mat
val abs : mat -> mat
val abs2 : mat -> mat
val conj : mat -> mat
val neg : mat -> mat
val reci : mat -> mat
val reci_tol : ?tol:elt -> mat -> mat
val sqr : mat -> mat
val sqrt : mat -> mat
val cbrt : mat -> mat
val exp : mat -> mat
val exp2 : mat -> mat
val exp10 : mat -> mat
val expm1 : mat -> mat
val log : mat -> mat
val log10 : mat -> mat
val log2 : mat -> mat
val log1p : mat -> mat
val sin : mat -> mat
val cos : mat -> mat
val tan : mat -> mat
val asin : mat -> mat
val acos : mat -> mat
val atan : mat -> mat
val sinh : mat -> mat
val cosh : mat -> mat
val tanh : mat -> mat
val asinh : mat -> mat
val acosh : mat -> mat
val atanh : mat -> mat
val floor : mat -> mat
val ceil : mat -> mat
val round : mat -> mat
val trunc : mat -> mat
val fix : mat -> mat
val modf : mat -> mat * mat
val l1norm : ?axis:int -> ?keep_dims:bool -> mat -> mat
val l1norm' : mat -> elt
val l2norm : ?axis:int -> ?keep_dims:bool -> mat -> mat
val l2norm' : mat -> elt
val l2norm_sqr : ?axis:int -> ?keep_dims:bool -> mat -> mat
val l2norm_sqr' : mat -> elt
val vecnorm : ?axis:int -> ?p:float -> ?keep_dims:bool -> mat -> mat
val vecnorm' : ?p:float -> mat -> elt
val cumsum : ?axis:int -> mat -> mat
val cumprod : ?axis:int -> mat -> mat
val cummin : ?axis:int -> mat -> mat
val cummax : ?axis:int -> mat -> mat
val diff : ?axis:int -> ?n:int -> mat -> mat
val var : ?axis:int -> ?keep_dims:bool -> mat -> mat
val std : ?axis:int -> ?keep_dims:bool -> mat -> mat
val mat2gray : ?amin:elt -> ?amax:elt -> mat -> mat
val lgamma : mat -> mat
val dawsn : mat -> mat
Binary mathematical operations
val add : mat -> mat -> mat
val sub : mat -> mat -> mat
val mul : mat -> mat -> mat
val div : mat -> mat -> mat
val add_scalar : mat -> elt -> mat
val sub_scalar : mat -> elt -> mat
val mul_scalar : mat -> elt -> mat
val div_scalar : mat -> elt -> mat
val scalar_add : elt -> mat -> mat
val scalar_sub : elt -> mat -> mat
val scalar_mul : elt -> mat -> mat
val scalar_div : elt -> mat -> mat
val dot : mat -> mat -> mat
val add_diag : mat -> elt -> mat
val pow : mat -> mat -> mat
val scalar_pow : elt -> mat -> mat
val pow_scalar : mat -> elt -> mat
val min2 : mat -> mat -> mat
val max2 : mat -> mat -> mat
val ssqr' : mat -> elt -> elt
val ssqr_diff' : mat -> mat -> elt
val clip_by_value : ?amin:elt -> ?amax:elt -> mat -> mat
val cov : ?b:mat -> a:mat -> mat
val kron : mat -> mat -> mat
val fma : mat -> mat -> mat -> mat
Functions of in-place modification
val create_ : out:mat -> elt -> unit
val uniform_ : ?a:elt -> ?b:elt -> out:mat -> unit
val bernoulli_ : ?p:float -> out:mat -> unit
val zeros_ : out:mat -> unit
val ones_ : out:mat -> unit
val sort_ : mat -> unit
val one_hot_ : out:mat -> int -> mat -> unit
val copy_ : out:mat -> mat -> unit
val reshape_ : out:mat -> mat -> unit
val transpose_ : out:mat -> ?axis:int array -> mat -> unit
val sum_ : out:mat -> axis:int -> mat -> unit
val min_ : out:mat -> axis:int -> mat -> unit
val max_ : out:mat -> axis:int -> mat -> unit
val add_ : ?out:mat -> mat -> mat -> unit
val sub_ : ?out:mat -> mat -> mat -> unit
val mul_ : ?out:mat -> mat -> mat -> unit
val div_ : ?out:mat -> mat -> mat -> unit
val pow_ : ?out:mat -> mat -> mat -> unit
val atan2_ : ?out:mat -> mat -> mat -> unit
val hypot_ : ?out:mat -> mat -> mat -> unit
val fmod_ : ?out:mat -> mat -> mat -> unit
val min2_ : ?out:mat -> mat -> mat -> unit
val max2_ : ?out:mat -> mat -> mat -> unit
val add_scalar_ : ?out:mat -> mat -> elt -> unit
val sub_scalar_ : ?out:mat -> mat -> elt -> unit
val mul_scalar_ : ?out:mat -> mat -> elt -> unit
val div_scalar_ : ?out:mat -> mat -> elt -> unit
val pow_scalar_ : ?out:mat -> mat -> elt -> unit
val atan2_scalar_ : ?out:mat -> mat -> elt -> unit
val fmod_scalar_ : ?out:mat -> mat -> elt -> unit
val scalar_add_ : ?out:mat -> elt -> mat -> unit
val scalar_sub_ : ?out:mat -> elt -> mat -> unit
val scalar_mul_ : ?out:mat -> elt -> mat -> unit
val scalar_div_ : ?out:mat -> elt -> mat -> unit
val scalar_pow_ : ?out:mat -> elt -> mat -> unit
val scalar_atan2_ : ?out:mat -> elt -> mat -> unit
val scalar_fmod_ : ?out:mat -> elt -> mat -> unit
val fma_ : ?out:mat -> mat -> mat -> mat -> unit
val dot_ : - ?transa:bool -> - ?transb:bool -> - ?alpha:elt -> - ?beta:elt -> - c:mat -> - mat -> - mat -> - unit
val conj_ : ?out:mat -> mat -> unit
val abs_ : ?out:mat -> mat -> unit
val neg_ : ?out:mat -> mat -> unit
val reci_ : ?out:mat -> mat -> unit
val signum_ : ?out:mat -> mat -> unit
val sqr_ : ?out:mat -> mat -> unit
val sqrt_ : ?out:mat -> mat -> unit
val cbrt_ : ?out:mat -> mat -> unit
val exp_ : ?out:mat -> mat -> unit
val exp2_ : ?out:mat -> mat -> unit
val exp10_ : ?out:mat -> mat -> unit
val expm1_ : ?out:mat -> mat -> unit
val log_ : ?out:mat -> mat -> unit
val log2_ : ?out:mat -> mat -> unit
val log10_ : ?out:mat -> mat -> unit
val log1p_ : ?out:mat -> mat -> unit
val sin_ : ?out:mat -> mat -> unit
val cos_ : ?out:mat -> mat -> unit
val tan_ : ?out:mat -> mat -> unit
val asin_ : ?out:mat -> mat -> unit
val acos_ : ?out:mat -> mat -> unit
val atan_ : ?out:mat -> mat -> unit
val sinh_ : ?out:mat -> mat -> unit
val cosh_ : ?out:mat -> mat -> unit
val tanh_ : ?out:mat -> mat -> unit
val asinh_ : ?out:mat -> mat -> unit
val acosh_ : ?out:mat -> mat -> unit
val atanh_ : ?out:mat -> mat -> unit
val floor_ : ?out:mat -> mat -> unit
val ceil_ : ?out:mat -> mat -> unit
val round_ : ?out:mat -> mat -> unit
val trunc_ : ?out:mat -> mat -> unit
val fix_ : ?out:mat -> mat -> unit
val erf_ : ?out:mat -> mat -> unit
val erfc_ : ?out:mat -> mat -> unit
val relu_ : ?out:mat -> mat -> unit
val softplus_ : ?out:mat -> mat -> unit
val softsign_ : ?out:mat -> mat -> unit
val sigmoid_ : ?out:mat -> mat -> unit
val softmax_ : ?out:mat -> ?axis:int -> mat -> unit
val cumsum_ : ?out:mat -> ?axis:int -> mat -> unit
val cumprod_ : ?out:mat -> ?axis:int -> mat -> unit
val cummin_ : ?out:mat -> ?axis:int -> mat -> unit
val cummax_ : ?out:mat -> ?axis:int -> mat -> unit
val dropout_ : ?out:mat -> ?rate:float -> mat -> unit
val elt_equal_ : ?out:mat -> mat -> mat -> unit
val elt_not_equal_ : ?out:mat -> mat -> mat -> unit
val elt_less_ : ?out:mat -> mat -> mat -> unit
val elt_greater_ : ?out:mat -> mat -> mat -> unit
val elt_less_equal_ : ?out:mat -> mat -> mat -> unit
val elt_greater_equal_ : ?out:mat -> mat -> mat -> unit
val elt_equal_scalar_ : ?out:mat -> mat -> elt -> unit
val elt_not_equal_scalar_ : ?out:mat -> mat -> elt -> unit
val elt_less_scalar_ : ?out:mat -> mat -> elt -> unit
val elt_greater_scalar_ : ?out:mat -> mat -> elt -> unit
val elt_less_equal_scalar_ : ?out:mat -> mat -> elt -> unit
val elt_greater_equal_scalar_ : ?out:mat -> mat -> elt -> unit
include Owl_dense_matrix_intf.Real with type elt := elt and type mat := mat
Specific real functions
val i0 : mat -> mat
val i0e : mat -> mat
val i1 : mat -> mat
val i1e : mat -> mat
val iv : v:mat -> mat -> mat
val scalar_iv : v:elt -> mat -> mat
val iv_scalar : v:mat -> elt -> mat
val j0 : mat -> mat
val j1 : mat -> mat
val jv : v:mat -> mat -> mat
val scalar_jv : v:elt -> mat -> mat
val jv_scalar : v:mat -> elt -> mat
val semidef : int -> mat
val min_rows : mat -> (elt * int * int) array
val min_cols : mat -> (elt * int * int) array
val max_rows : mat -> (elt * int * int) array
val max_cols : mat -> (elt * int * int) array
val signum : mat -> mat
val erf : mat -> mat
val erfc : mat -> mat
val logistic : mat -> mat
val relu : mat -> mat
val elu : ?alpha:elt -> mat -> mat
val leaky_relu : ?alpha:elt -> mat -> mat
val softplus : mat -> mat
val softsign : mat -> mat
val softmax : ?axis:int -> mat -> mat
val sigmoid : mat -> mat
val log_sum_exp' : mat -> elt
val log_sum_exp : ?axis:int -> ?keep_dims:bool -> mat -> mat
val max_pool : - ?padding:Owl_types.padding -> - mat -> - int array -> - int array -> - mat
val avg_pool : - ?padding:Owl_types.padding -> - mat -> - int array -> - int array -> - mat
val atan2 : mat -> mat -> mat
val scalar_atan2 : elt -> mat -> mat
val atan2_scalar : mat -> elt -> mat
val hypot : mat -> mat -> mat
val fmod : mat -> mat -> mat
val fmod_scalar : mat -> elt -> mat
val scalar_fmod : elt -> mat -> mat
val cross_entropy' : mat -> mat -> elt
val clip_by_l2norm : elt -> mat -> mat
val poisson : mu:elt -> int -> int -> mat
include module type of struct include Operator end
include sig ... end
val (+$) : - ('a, 'b) Owl_dense_matrix_generic.t -> - 'a -> - ('a, 'b) Owl_dense_matrix_generic.t
val (-$) : - ('a, 'b) Owl_dense_matrix_generic.t -> - 'a -> - ('a, 'b) Owl_dense_matrix_generic.t
val (*$) : - ('a, 'b) Owl_dense_matrix_generic.t -> - 'a -> - ('a, 'b) Owl_dense_matrix_generic.t
val (/$) : - ('a, 'b) Owl_dense_matrix_generic.t -> - 'a -> - ('a, 'b) Owl_dense_matrix_generic.t
val ($+) : - 'a -> - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t
val ($-) : - 'a -> - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t
val ($*) : - 'a -> - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t
val ($/) : - 'a -> - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t
val (=) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - bool
val (!=) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - bool
val (<>) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - bool
val (>) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - bool
val (<) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - bool
val (>=) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - bool
val (<=) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - bool
include sig ... end
val (=$) : ('a, 'b) Owl_dense_matrix_generic.t -> 'a -> bool
val (!=$) : ('a, 'b) Owl_dense_matrix_generic.t -> 'a -> bool
val (<>$) : ('a, 'b) Owl_dense_matrix_generic.t -> 'a -> bool
val (<$) : ('a, 'b) Owl_dense_matrix_generic.t -> 'a -> bool
val (>$) : ('a, 'b) Owl_dense_matrix_generic.t -> 'a -> bool
val (<=$) : ('a, 'b) Owl_dense_matrix_generic.t -> 'a -> bool
val (>=$) : ('a, 'b) Owl_dense_matrix_generic.t -> 'a -> bool
val (=.) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t
val (!=.) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t
val (<>.) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t
val (<.) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t
val (>.) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t
val (<=.) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t
val (>=.) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t
val (=.$) : - ('a, 'b) Owl_dense_matrix_generic.t -> - 'a -> - ('a, 'b) Owl_dense_matrix_generic.t
val (!=.$) : - ('a, 'b) Owl_dense_matrix_generic.t -> - 'a -> - ('a, 'b) Owl_dense_matrix_generic.t
val (<>.$) : - ('a, 'b) Owl_dense_matrix_generic.t -> - 'a -> - ('a, 'b) Owl_dense_matrix_generic.t
val (<.$) : - ('a, 'b) Owl_dense_matrix_generic.t -> - 'a -> - ('a, 'b) Owl_dense_matrix_generic.t
val (>.$) : - ('a, 'b) Owl_dense_matrix_generic.t -> - 'a -> - ('a, 'b) Owl_dense_matrix_generic.t
val (<=.$) : - ('a, 'b) Owl_dense_matrix_generic.t -> - 'a -> - ('a, 'b) Owl_dense_matrix_generic.t
val (>=.$) : - ('a, 'b) Owl_dense_matrix_generic.t -> - 'a -> - ('a, 'b) Owl_dense_matrix_generic.t
val (=~) : - ?eps:float -> - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - bool
val (=~$) : ?eps:float -> ('a, 'b) Owl_dense_matrix_generic.t -> 'a -> bool
val (=~.) : - ?eps:float -> - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t
val (=~.$) : - ?eps:float -> - ('a, 'b) Owl_dense_matrix_generic.t -> - 'a -> - ('a, 'b) Owl_dense_matrix_generic.t
val (%) : - (float, 'a) Owl_dense_matrix_generic.t -> - (float, 'a) Owl_dense_matrix_generic.t -> - (float, 'a) Owl_dense_matrix_generic.t
val (%$) : - (float, 'a) Owl_dense_matrix_generic.t -> - float -> - (float, 'a) Owl_dense_matrix_generic.t
val (**) : - (float, 'a) Owl_dense_matrix_generic.t -> - (float, 'a) Owl_dense_matrix_generic.t -> - (float, 'a) Owl_dense_matrix_generic.t
val ($**) : - float -> - (float, 'a) Owl_dense_matrix_generic.t -> - (float, 'a) Owl_dense_matrix_generic.t
val (**$) : - (float, 'a) Owl_dense_matrix_generic.t -> - float -> - (float, 'a) Owl_dense_matrix_generic.t
val (+=) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - unit
val (-=) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - unit
val (*=) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - unit
val (/=) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - unit
val (+$=) : ('a, 'b) Owl_dense_matrix_generic.t -> 'a -> unit
val (-$=) : ('a, 'b) Owl_dense_matrix_generic.t -> 'a -> unit
val (*$=) : ('a, 'b) Owl_dense_matrix_generic.t -> 'a -> unit
val (/$=) : ('a, 'b) Owl_dense_matrix_generic.t -> 'a -> unit
val (@=) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t
val (@||) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t
val (.!{;..}) : - ('a, 'b) Owl_dense_matrix_generic.t -> - Owl_types.index array -> - ('a, 'b) Owl_dense_matrix_generic.t
val (.!{;..}<-) : - ('a, 'b) Owl_dense_matrix_generic.t -> - Owl_types.index array -> - ('a, 'b) Owl_dense_matrix_generic.t -> - unit
val (.${}) : - ('a, 'b) Owl_dense_matrix_generic.t -> - int list -> - ('a, 'b) Owl_dense_matrix_generic.t
val (.${;..}) : - ('a, 'b) Owl_dense_matrix_generic.t -> - int list array -> - ('a, 'b) Owl_dense_matrix_generic.t
val (.${}<-) : - ('a, 'b) Owl_dense_matrix_generic.t -> - int list -> - ('a, 'b) Owl_dense_matrix_generic.t -> - unit
val (.${;..}<-) : - ('a, 'b) Owl_dense_matrix_generic.t -> - int list array -> - ('a, 'b) Owl_dense_matrix_generic.t -> - unit
include sig ... end
val (*@) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t
val (.%{}) : ('a, 'b) Owl_dense_matrix_generic.t -> (int * int) -> 'a
val (.%{;..}) : ('a, 'b) Owl_dense_matrix_generic.t -> int array -> 'a
val (.%{}<-) : ('a, 'b) Owl_dense_matrix_generic.t -> (int * int) -> 'a -> unit
val (.%{;..}<-) : - ('a, 'b) Owl_dense_matrix_generic.t -> - int array -> - 'a -> - unit
include sig ... end
val (**@) : - ('a, 'b) Owl_linalg_generic.t -> - float -> - ('a, 'b) Owl_linalg_generic.t
val (/@) : - ('a, 'b) Owl_linalg_generic.t -> - ('a, 'b) Owl_linalg_generic.t -> - ('a, 'b) Owl_linalg_generic.t
val mpow : Owl_linalg_s.mat -> float -> Owl_linalg_s.mat
val diag : - ?k:int -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t
\ No newline at end of file diff --git a/owl/Owl_dense_matrix/Z/index.html b/owl/Owl_dense_matrix/Z/index.html deleted file mode 100644 index 3d0c69521..000000000 --- a/owl/Owl_dense_matrix/Z/index.html +++ /dev/null @@ -1,215 +0,0 @@ - -Z (owl.Owl_dense_matrix.Z)

Module Owl_dense_matrix.Z

include module type of struct include Owl_dense_matrix_z end
type elt = Stdlib.Complex.t
type mat = - (Stdlib.Complex.t, Stdlib.Bigarray.complex64_elt) Owl_dense_matrix_generic.t
type cast_mat = (float, Stdlib.Bigarray.float64_elt) Owl_dense_matrix_generic.t
include Owl_dense_matrix_intf.Common with type elt := elt and type mat := mat
Create dense matrices
val empty : int -> int -> mat
val create : int -> int -> elt -> mat
val init : int -> int -> (int -> elt) -> mat
val init_2d : int -> int -> (int -> int -> elt) -> mat
val zeros : int -> int -> mat
val ones : int -> int -> mat
val eye : int -> mat
val sequential : ?a:elt -> ?step:elt -> int -> int -> mat
val uniform : ?a:elt -> ?b:elt -> int -> int -> mat
val gaussian : ?mu:elt -> ?sigma:elt -> int -> int -> mat
val bernoulli : ?p:float -> int -> int -> mat
val unit_basis : int -> int -> mat
val diagm : ?k:int -> mat -> mat
val triu : ?k:int -> mat -> mat
val tril : ?k:int -> mat -> mat
val symmetric : ?upper:bool -> mat -> mat
val bidiagonal : ?upper:bool -> mat -> mat -> mat
val toeplitz : ?c:mat -> mat -> mat
val hankel : ?r:mat -> mat -> mat
val hadamard : int -> mat
val magic : int -> mat
Dense row vectors and meshgrids
val vector : int -> mat
val vector_zeros : int -> mat
val vector_ones : int -> mat
val vector_uniform : int -> mat
val linspace : elt -> elt -> int -> mat
val logspace : ?base:float -> elt -> elt -> int -> mat
val meshgrid : elt -> elt -> elt -> elt -> int -> int -> mat * mat
val meshup : mat -> mat -> mat * mat
Obtain the basic properties of a matrix
val shape : mat -> int * int
val row_num : mat -> int
val col_num : mat -> int
val numel : mat -> int
val nnz : mat -> int
val density : mat -> float
val size_in_bytes : mat -> int
val same_shape : mat -> mat -> bool
val same_data : mat -> mat -> bool
Manipulate a matrix
val get : mat -> int -> int -> elt
val set : mat -> int -> int -> elt -> unit
val get_index : mat -> int array array -> elt array
val set_index : mat -> int array array -> elt array -> unit
val get_fancy : Owl_types.index list -> mat -> mat
val set_fancy : Owl_types.index list -> mat -> mat -> unit
val get_slice : int list list -> mat -> mat
val set_slice : int list list -> mat -> mat -> unit
val row : mat -> int -> mat
val col : mat -> int -> mat
val rows : mat -> int array -> mat
val cols : mat -> int array -> mat
val resize : ?head:bool -> mat -> int array -> mat
val reshape : mat -> int array -> mat
val flatten : mat -> mat
val reverse : mat -> mat
val flip : ?axis:int -> mat -> mat
val rotate : mat -> int -> mat
val reset : mat -> unit
val fill : mat -> elt -> unit
val copy : mat -> mat
val copy_row_to : mat -> mat -> int -> unit
val copy_col_to : mat -> mat -> int -> unit
val concat_vertical : mat -> mat -> mat
val concat_horizontal : mat -> mat -> mat
val concat_vh : mat array array -> mat
val concatenate : ?axis:int -> mat array -> mat
val split : ?axis:int -> int array -> mat -> mat array
val split_vh : (int * int) array array -> mat -> mat array array
val transpose : mat -> mat
val ctranspose : mat -> mat
val diag : ?k:int -> mat -> mat
val swap_rows : mat -> int -> int -> unit
val swap_cols : mat -> int -> int -> unit
val tile : mat -> int array -> mat
val repeat : mat -> int array -> mat
val pad : ?v:elt -> int list list -> mat -> mat
val dropout : ?rate:float -> mat -> mat
val top : mat -> int -> int array array
val bottom : mat -> int -> int array array
val sort : mat -> mat
val argsort : - mat -> - (int64, Stdlib.Bigarray.int64_elt, Stdlib.Bigarray.c_layout) - Stdlib.Bigarray.Genarray.t
Iterate elements, columns, and rows.
val iteri : (int -> elt -> unit) -> mat -> unit
val iter : (elt -> unit) -> mat -> unit
val mapi : (int -> elt -> elt) -> mat -> mat
val map : (elt -> elt) -> mat -> mat
val foldi : ?axis:int -> (int -> elt -> elt -> elt) -> elt -> mat -> mat
val fold : ?axis:int -> (elt -> elt -> elt) -> elt -> mat -> mat
val scani : ?axis:int -> (int -> elt -> elt -> elt) -> mat -> mat
val scan : ?axis:int -> (elt -> elt -> elt) -> mat -> mat
val filteri : (int -> elt -> bool) -> mat -> int array
val filter : (elt -> bool) -> mat -> int array
val iteri_2d : (int -> int -> elt -> unit) -> mat -> unit
val mapi_2d : (int -> int -> elt -> elt) -> mat -> mat
val foldi_2d : - ?axis:int -> - (int -> int -> elt -> elt -> elt) -> - elt -> - mat -> - mat
val scani_2d : ?axis:int -> (int -> int -> elt -> elt -> elt) -> mat -> mat
val filteri_2d : (int -> int -> elt -> bool) -> mat -> (int * int) array
val iter2i_2d : (int -> int -> elt -> elt -> unit) -> mat -> mat -> unit
val map2i_2d : (int -> int -> elt -> elt -> elt) -> mat -> mat -> mat
val iter2i : (int -> elt -> elt -> unit) -> mat -> mat -> unit
val iter2 : (elt -> elt -> unit) -> mat -> mat -> unit
val map2i : (int -> elt -> elt -> elt) -> mat -> mat -> mat
val map2 : (elt -> elt -> elt) -> mat -> mat -> mat
val iteri_rows : (int -> mat -> unit) -> mat -> unit
val iter_rows : (mat -> unit) -> mat -> unit
val iter2i_rows : (int -> mat -> mat -> unit) -> mat -> mat -> unit
val iter2_rows : (mat -> mat -> unit) -> mat -> mat -> unit
val iteri_cols : (int -> mat -> unit) -> mat -> unit
val iter_cols : (mat -> unit) -> mat -> unit
val filteri_rows : (int -> mat -> bool) -> mat -> int array
val filter_rows : (mat -> bool) -> mat -> int array
val filteri_cols : (int -> mat -> bool) -> mat -> int array
val filter_cols : (mat -> bool) -> mat -> int array
val fold_rows : ('a -> mat -> 'a) -> 'a -> mat -> 'a
val fold_cols : ('a -> mat -> 'a) -> 'a -> mat -> 'a
val mapi_rows : (int -> mat -> 'a) -> mat -> 'a array
val map_rows : (mat -> 'a) -> mat -> 'a array
val mapi_cols : (int -> mat -> 'a) -> mat -> 'a array
val map_cols : (mat -> 'a) -> mat -> 'a array
val mapi_by_row : int -> (int -> mat -> mat) -> mat -> mat
val map_by_row : int -> (mat -> mat) -> mat -> mat
val mapi_by_col : int -> (int -> mat -> mat) -> mat -> mat
val map_by_col : int -> (mat -> mat) -> mat -> mat
val mapi_at_row : (int -> elt -> elt) -> mat -> int -> mat
val map_at_row : (elt -> elt) -> mat -> int -> mat
val mapi_at_col : (int -> elt -> elt) -> mat -> int -> mat
val map_at_col : (elt -> elt) -> mat -> int -> mat
Examine elements and compare two matrices
val exists : (elt -> bool) -> mat -> bool
val not_exists : (elt -> bool) -> mat -> bool
val for_all : (elt -> bool) -> mat -> bool
val is_zero : mat -> bool
val is_positive : mat -> bool
val is_negative : mat -> bool
val is_nonpositive : mat -> bool
val is_nonnegative : mat -> bool
val is_normal : mat -> bool
val not_nan : mat -> bool
val not_inf : mat -> bool
val equal : mat -> mat -> bool
val not_equal : mat -> mat -> bool
val greater : mat -> mat -> bool
val less : mat -> mat -> bool
val greater_equal : mat -> mat -> bool
val less_equal : mat -> mat -> bool
val elt_equal : mat -> mat -> mat
val elt_not_equal : mat -> mat -> mat
val elt_less : mat -> mat -> mat
val elt_greater : mat -> mat -> mat
val elt_less_equal : mat -> mat -> mat
val elt_greater_equal : mat -> mat -> mat
val equal_scalar : mat -> elt -> bool
val not_equal_scalar : mat -> elt -> bool
val less_scalar : mat -> elt -> bool
val greater_scalar : mat -> elt -> bool
val less_equal_scalar : mat -> elt -> bool
val greater_equal_scalar : mat -> elt -> bool
val elt_equal_scalar : mat -> elt -> mat
val elt_not_equal_scalar : mat -> elt -> mat
val elt_less_scalar : mat -> elt -> mat
val elt_greater_scalar : mat -> elt -> mat
val elt_less_equal_scalar : mat -> elt -> mat
val elt_greater_equal_scalar : mat -> elt -> mat
val approx_equal : ?eps:float -> mat -> mat -> bool
val approx_equal_scalar : ?eps:float -> mat -> elt -> bool
val approx_elt_equal : ?eps:float -> mat -> mat -> mat
val approx_elt_equal_scalar : ?eps:float -> mat -> elt -> mat
Randomisation functions
val draw_rows : ?replacement:bool -> mat -> int -> mat * int array
val draw_cols : ?replacement:bool -> mat -> int -> mat * int array
val draw_rows2 : - ?replacement:bool -> - mat -> - mat -> - int -> - mat * mat * int array
val draw_cols2 : - ?replacement:bool -> - mat -> - mat -> - int -> - mat * mat * int array
val shuffle_rows : mat -> mat
val shuffle_cols : mat -> mat
val shuffle : mat -> mat
Input/Output and helper functions
val to_array : mat -> elt array
val of_array : elt array -> int -> int -> mat
val to_arrays : mat -> elt array array
val of_arrays : elt array array -> mat
val to_rows : mat -> mat array
val of_rows : mat array -> mat
val to_cols : mat -> mat array
val of_cols : mat array -> mat
val print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(elt -> string) -> - mat -> - unit
val save : out:string -> mat -> unit
val load : string -> mat
val save_txt : ?sep:string -> ?append:bool -> out:string -> mat -> unit
val load_txt : ?sep:string -> string -> mat
val save_npy : out:string -> mat -> unit
val load_npy : string -> mat
Unary mathematical operations
val min : ?axis:int -> ?keep_dims:bool -> mat -> mat
val min' : mat -> elt
val max : ?axis:int -> ?keep_dims:bool -> mat -> mat
val max' : mat -> elt
val minmax : ?axis:int -> ?keep_dims:bool -> mat -> mat * mat
val minmax' : mat -> elt * elt
val min_i : mat -> elt * int array
val max_i : mat -> elt * int array
val minmax_i : mat -> (elt * int array) * (elt * int array)
val trace : mat -> elt
val sum : ?axis:int -> ?keep_dims:bool -> mat -> mat
val sum' : mat -> elt
val prod : ?axis:int -> ?keep_dims:bool -> mat -> mat
val prod' : mat -> elt
val mean : ?axis:int -> ?keep_dims:bool -> mat -> mat
val mean' : mat -> elt
val var' : mat -> elt
val std' : mat -> elt
val sem : ?axis:int -> ?keep_dims:bool -> mat -> mat
val sem' : mat -> elt
val sum_rows : ?keep_dims:bool -> mat -> mat
val sum_cols : ?keep_dims:bool -> mat -> mat
val mean_rows : ?keep_dims:bool -> mat -> mat
val mean_cols : ?keep_dims:bool -> mat -> mat
val abs : mat -> mat
val abs2 : mat -> mat
val conj : mat -> mat
val neg : mat -> mat
val reci : mat -> mat
val reci_tol : ?tol:elt -> mat -> mat
val sqr : mat -> mat
val sqrt : mat -> mat
val cbrt : mat -> mat
val exp : mat -> mat
val exp2 : mat -> mat
val exp10 : mat -> mat
val expm1 : mat -> mat
val log : mat -> mat
val log10 : mat -> mat
val log2 : mat -> mat
val log1p : mat -> mat
val sin : mat -> mat
val cos : mat -> mat
val tan : mat -> mat
val asin : mat -> mat
val acos : mat -> mat
val atan : mat -> mat
val sinh : mat -> mat
val cosh : mat -> mat
val tanh : mat -> mat
val asinh : mat -> mat
val acosh : mat -> mat
val atanh : mat -> mat
val floor : mat -> mat
val ceil : mat -> mat
val round : mat -> mat
val trunc : mat -> mat
val fix : mat -> mat
val modf : mat -> mat * mat
val l1norm : ?axis:int -> ?keep_dims:bool -> mat -> mat
val l1norm' : mat -> elt
val l2norm : ?axis:int -> ?keep_dims:bool -> mat -> mat
val l2norm' : mat -> elt
val l2norm_sqr : ?axis:int -> ?keep_dims:bool -> mat -> mat
val l2norm_sqr' : mat -> elt
val vecnorm : ?axis:int -> ?p:float -> ?keep_dims:bool -> mat -> mat
val vecnorm' : ?p:float -> mat -> elt
val cumsum : ?axis:int -> mat -> mat
val cumprod : ?axis:int -> mat -> mat
val cummin : ?axis:int -> mat -> mat
val cummax : ?axis:int -> mat -> mat
val diff : ?axis:int -> ?n:int -> mat -> mat
val var : ?axis:int -> ?keep_dims:bool -> mat -> mat
val std : ?axis:int -> ?keep_dims:bool -> mat -> mat
val mat2gray : ?amin:elt -> ?amax:elt -> mat -> mat
val lgamma : mat -> mat
val dawsn : mat -> mat
Binary mathematical operations
val add : mat -> mat -> mat
val sub : mat -> mat -> mat
val mul : mat -> mat -> mat
val div : mat -> mat -> mat
val add_scalar : mat -> elt -> mat
val sub_scalar : mat -> elt -> mat
val mul_scalar : mat -> elt -> mat
val div_scalar : mat -> elt -> mat
val scalar_add : elt -> mat -> mat
val scalar_sub : elt -> mat -> mat
val scalar_mul : elt -> mat -> mat
val scalar_div : elt -> mat -> mat
val dot : mat -> mat -> mat
val add_diag : mat -> elt -> mat
val pow : mat -> mat -> mat
val scalar_pow : elt -> mat -> mat
val pow_scalar : mat -> elt -> mat
val min2 : mat -> mat -> mat
val max2 : mat -> mat -> mat
val ssqr' : mat -> elt -> elt
val ssqr_diff' : mat -> mat -> elt
val clip_by_value : ?amin:elt -> ?amax:elt -> mat -> mat
val cov : ?b:mat -> a:mat -> mat
val kron : mat -> mat -> mat
val fma : mat -> mat -> mat -> mat
Functions of in-place modification
val create_ : out:mat -> elt -> unit
val uniform_ : ?a:elt -> ?b:elt -> out:mat -> unit
val bernoulli_ : ?p:float -> out:mat -> unit
val zeros_ : out:mat -> unit
val ones_ : out:mat -> unit
val sort_ : mat -> unit
val one_hot_ : out:mat -> int -> mat -> unit
val copy_ : out:mat -> mat -> unit
val reshape_ : out:mat -> mat -> unit
val transpose_ : out:mat -> ?axis:int array -> mat -> unit
val sum_ : out:mat -> axis:int -> mat -> unit
val min_ : out:mat -> axis:int -> mat -> unit
val max_ : out:mat -> axis:int -> mat -> unit
val add_ : ?out:mat -> mat -> mat -> unit
val sub_ : ?out:mat -> mat -> mat -> unit
val mul_ : ?out:mat -> mat -> mat -> unit
val div_ : ?out:mat -> mat -> mat -> unit
val pow_ : ?out:mat -> mat -> mat -> unit
val atan2_ : ?out:mat -> mat -> mat -> unit
val hypot_ : ?out:mat -> mat -> mat -> unit
val fmod_ : ?out:mat -> mat -> mat -> unit
val min2_ : ?out:mat -> mat -> mat -> unit
val max2_ : ?out:mat -> mat -> mat -> unit
val add_scalar_ : ?out:mat -> mat -> elt -> unit
val sub_scalar_ : ?out:mat -> mat -> elt -> unit
val mul_scalar_ : ?out:mat -> mat -> elt -> unit
val div_scalar_ : ?out:mat -> mat -> elt -> unit
val pow_scalar_ : ?out:mat -> mat -> elt -> unit
val atan2_scalar_ : ?out:mat -> mat -> elt -> unit
val fmod_scalar_ : ?out:mat -> mat -> elt -> unit
val scalar_add_ : ?out:mat -> elt -> mat -> unit
val scalar_sub_ : ?out:mat -> elt -> mat -> unit
val scalar_mul_ : ?out:mat -> elt -> mat -> unit
val scalar_div_ : ?out:mat -> elt -> mat -> unit
val scalar_pow_ : ?out:mat -> elt -> mat -> unit
val scalar_atan2_ : ?out:mat -> elt -> mat -> unit
val scalar_fmod_ : ?out:mat -> elt -> mat -> unit
val fma_ : ?out:mat -> mat -> mat -> mat -> unit
val dot_ : - ?transa:bool -> - ?transb:bool -> - ?alpha:elt -> - ?beta:elt -> - c:mat -> - mat -> - mat -> - unit
val conj_ : ?out:mat -> mat -> unit
val abs_ : ?out:mat -> mat -> unit
val neg_ : ?out:mat -> mat -> unit
val reci_ : ?out:mat -> mat -> unit
val signum_ : ?out:mat -> mat -> unit
val sqr_ : ?out:mat -> mat -> unit
val sqrt_ : ?out:mat -> mat -> unit
val cbrt_ : ?out:mat -> mat -> unit
val exp_ : ?out:mat -> mat -> unit
val exp2_ : ?out:mat -> mat -> unit
val exp10_ : ?out:mat -> mat -> unit
val expm1_ : ?out:mat -> mat -> unit
val log_ : ?out:mat -> mat -> unit
val log2_ : ?out:mat -> mat -> unit
val log10_ : ?out:mat -> mat -> unit
val log1p_ : ?out:mat -> mat -> unit
val sin_ : ?out:mat -> mat -> unit
val cos_ : ?out:mat -> mat -> unit
val tan_ : ?out:mat -> mat -> unit
val asin_ : ?out:mat -> mat -> unit
val acos_ : ?out:mat -> mat -> unit
val atan_ : ?out:mat -> mat -> unit
val sinh_ : ?out:mat -> mat -> unit
val cosh_ : ?out:mat -> mat -> unit
val tanh_ : ?out:mat -> mat -> unit
val asinh_ : ?out:mat -> mat -> unit
val acosh_ : ?out:mat -> mat -> unit
val atanh_ : ?out:mat -> mat -> unit
val floor_ : ?out:mat -> mat -> unit
val ceil_ : ?out:mat -> mat -> unit
val round_ : ?out:mat -> mat -> unit
val trunc_ : ?out:mat -> mat -> unit
val fix_ : ?out:mat -> mat -> unit
val erf_ : ?out:mat -> mat -> unit
val erfc_ : ?out:mat -> mat -> unit
val relu_ : ?out:mat -> mat -> unit
val softplus_ : ?out:mat -> mat -> unit
val softsign_ : ?out:mat -> mat -> unit
val sigmoid_ : ?out:mat -> mat -> unit
val softmax_ : ?out:mat -> ?axis:int -> mat -> unit
val cumsum_ : ?out:mat -> ?axis:int -> mat -> unit
val cumprod_ : ?out:mat -> ?axis:int -> mat -> unit
val cummin_ : ?out:mat -> ?axis:int -> mat -> unit
val cummax_ : ?out:mat -> ?axis:int -> mat -> unit
val dropout_ : ?out:mat -> ?rate:float -> mat -> unit
val elt_equal_ : ?out:mat -> mat -> mat -> unit
val elt_not_equal_ : ?out:mat -> mat -> mat -> unit
val elt_less_ : ?out:mat -> mat -> mat -> unit
val elt_greater_ : ?out:mat -> mat -> mat -> unit
val elt_less_equal_ : ?out:mat -> mat -> mat -> unit
val elt_greater_equal_ : ?out:mat -> mat -> mat -> unit
val elt_equal_scalar_ : ?out:mat -> mat -> elt -> unit
val elt_not_equal_scalar_ : ?out:mat -> mat -> elt -> unit
val elt_less_scalar_ : ?out:mat -> mat -> elt -> unit
val elt_greater_scalar_ : ?out:mat -> mat -> elt -> unit
val elt_less_equal_scalar_ : ?out:mat -> mat -> elt -> unit
val elt_greater_equal_scalar_ : ?out:mat -> mat -> elt -> unit
include Owl_dense_matrix_intf.Complex - with type mat := mat - and type cast_mat := cast_mat
Specific complex functions
val complex : cast_mat -> cast_mat -> mat
val polar : cast_mat -> cast_mat -> mat
val re : mat -> cast_mat
val im : mat -> cast_mat
include module type of struct include Operator end
include sig ... end
val (+$) : - ('a, 'b) Owl_dense_matrix_generic.t -> - 'a -> - ('a, 'b) Owl_dense_matrix_generic.t
val (-$) : - ('a, 'b) Owl_dense_matrix_generic.t -> - 'a -> - ('a, 'b) Owl_dense_matrix_generic.t
val (*$) : - ('a, 'b) Owl_dense_matrix_generic.t -> - 'a -> - ('a, 'b) Owl_dense_matrix_generic.t
val (/$) : - ('a, 'b) Owl_dense_matrix_generic.t -> - 'a -> - ('a, 'b) Owl_dense_matrix_generic.t
val ($+) : - 'a -> - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t
val ($-) : - 'a -> - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t
val ($*) : - 'a -> - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t
val ($/) : - 'a -> - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t
val (=) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - bool
val (!=) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - bool
val (<>) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - bool
val (>) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - bool
val (<) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - bool
val (>=) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - bool
val (<=) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - bool
include sig ... end
val (=$) : ('a, 'b) Owl_dense_matrix_generic.t -> 'a -> bool
val (!=$) : ('a, 'b) Owl_dense_matrix_generic.t -> 'a -> bool
val (<>$) : ('a, 'b) Owl_dense_matrix_generic.t -> 'a -> bool
val (<$) : ('a, 'b) Owl_dense_matrix_generic.t -> 'a -> bool
val (>$) : ('a, 'b) Owl_dense_matrix_generic.t -> 'a -> bool
val (<=$) : ('a, 'b) Owl_dense_matrix_generic.t -> 'a -> bool
val (>=$) : ('a, 'b) Owl_dense_matrix_generic.t -> 'a -> bool
val (=.) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t
val (!=.) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t
val (<>.) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t
val (<.) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t
val (>.) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t
val (<=.) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t
val (>=.) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t
val (=.$) : - ('a, 'b) Owl_dense_matrix_generic.t -> - 'a -> - ('a, 'b) Owl_dense_matrix_generic.t
val (!=.$) : - ('a, 'b) Owl_dense_matrix_generic.t -> - 'a -> - ('a, 'b) Owl_dense_matrix_generic.t
val (<>.$) : - ('a, 'b) Owl_dense_matrix_generic.t -> - 'a -> - ('a, 'b) Owl_dense_matrix_generic.t
val (<.$) : - ('a, 'b) Owl_dense_matrix_generic.t -> - 'a -> - ('a, 'b) Owl_dense_matrix_generic.t
val (>.$) : - ('a, 'b) Owl_dense_matrix_generic.t -> - 'a -> - ('a, 'b) Owl_dense_matrix_generic.t
val (<=.$) : - ('a, 'b) Owl_dense_matrix_generic.t -> - 'a -> - ('a, 'b) Owl_dense_matrix_generic.t
val (>=.$) : - ('a, 'b) Owl_dense_matrix_generic.t -> - 'a -> - ('a, 'b) Owl_dense_matrix_generic.t
val (=~) : - ?eps:float -> - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - bool
val (=~$) : ?eps:float -> ('a, 'b) Owl_dense_matrix_generic.t -> 'a -> bool
val (=~.) : - ?eps:float -> - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t
val (=~.$) : - ?eps:float -> - ('a, 'b) Owl_dense_matrix_generic.t -> - 'a -> - ('a, 'b) Owl_dense_matrix_generic.t
val (%) : - (float, 'a) Owl_dense_matrix_generic.t -> - (float, 'a) Owl_dense_matrix_generic.t -> - (float, 'a) Owl_dense_matrix_generic.t
val (%$) : - (float, 'a) Owl_dense_matrix_generic.t -> - float -> - (float, 'a) Owl_dense_matrix_generic.t
val (**) : - (float, 'a) Owl_dense_matrix_generic.t -> - (float, 'a) Owl_dense_matrix_generic.t -> - (float, 'a) Owl_dense_matrix_generic.t
val ($**) : - float -> - (float, 'a) Owl_dense_matrix_generic.t -> - (float, 'a) Owl_dense_matrix_generic.t
val (**$) : - (float, 'a) Owl_dense_matrix_generic.t -> - float -> - (float, 'a) Owl_dense_matrix_generic.t
val (+=) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - unit
val (-=) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - unit
val (*=) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - unit
val (/=) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - unit
val (+$=) : ('a, 'b) Owl_dense_matrix_generic.t -> 'a -> unit
val (-$=) : ('a, 'b) Owl_dense_matrix_generic.t -> 'a -> unit
val (*$=) : ('a, 'b) Owl_dense_matrix_generic.t -> 'a -> unit
val (/$=) : ('a, 'b) Owl_dense_matrix_generic.t -> 'a -> unit
val (@=) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t
val (@||) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t
val (.!{;..}) : - ('a, 'b) Owl_dense_matrix_generic.t -> - Owl_types.index array -> - ('a, 'b) Owl_dense_matrix_generic.t
val (.!{;..}<-) : - ('a, 'b) Owl_dense_matrix_generic.t -> - Owl_types.index array -> - ('a, 'b) Owl_dense_matrix_generic.t -> - unit
val (.${}) : - ('a, 'b) Owl_dense_matrix_generic.t -> - int list -> - ('a, 'b) Owl_dense_matrix_generic.t
val (.${;..}) : - ('a, 'b) Owl_dense_matrix_generic.t -> - int list array -> - ('a, 'b) Owl_dense_matrix_generic.t
val (.${}<-) : - ('a, 'b) Owl_dense_matrix_generic.t -> - int list -> - ('a, 'b) Owl_dense_matrix_generic.t -> - unit
val (.${;..}<-) : - ('a, 'b) Owl_dense_matrix_generic.t -> - int list array -> - ('a, 'b) Owl_dense_matrix_generic.t -> - unit
include sig ... end
val (*@) : - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t -> - ('a, 'b) Owl_dense_matrix_generic.t
val (.%{}) : ('a, 'b) Owl_dense_matrix_generic.t -> (int * int) -> 'a
val (.%{;..}) : ('a, 'b) Owl_dense_matrix_generic.t -> int array -> 'a
val (.%{}<-) : ('a, 'b) Owl_dense_matrix_generic.t -> (int * int) -> 'a -> unit
val (.%{;..}<-) : - ('a, 'b) Owl_dense_matrix_generic.t -> - int array -> - 'a -> - unit
include sig ... end
val (**@) : - ('a, 'b) Owl_linalg_generic.t -> - float -> - ('a, 'b) Owl_linalg_generic.t
val (/@) : - ('a, 'b) Owl_linalg_generic.t -> - ('a, 'b) Owl_linalg_generic.t -> - ('a, 'b) Owl_linalg_generic.t
val mpow : Owl_linalg_z.mat -> float -> Owl_linalg_z.mat
\ No newline at end of file diff --git a/owl/Owl_dense_matrix_c/.dummy b/owl/Owl_dense_matrix_c/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl/Owl_dense_matrix_d/.dummy b/owl/Owl_dense_matrix_d/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl/Owl_dense_matrix_generic/.dummy b/owl/Owl_dense_matrix_generic/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl/Owl_dense_matrix_intf/.dummy b/owl/Owl_dense_matrix_intf/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl/Owl_dense_matrix_intf/module-type-Common/index.html b/owl/Owl_dense_matrix_intf/module-type-Common/index.html deleted file mode 100644 index 2338f9ddd..000000000 --- a/owl/Owl_dense_matrix_intf/module-type-Common/index.html +++ /dev/null @@ -1,34 +0,0 @@ - -Common (owl.Owl_dense_matrix_intf.Common)

Module type Owl_dense_matrix_intf.Common

type elt
type mat
Create dense matrices
val empty : int -> int -> mat
val create : int -> int -> elt -> mat
val init : int -> int -> (int -> elt) -> mat
val init_2d : int -> int -> (int -> int -> elt) -> mat
val zeros : int -> int -> mat
val ones : int -> int -> mat
val eye : int -> mat
val sequential : ?a:elt -> ?step:elt -> int -> int -> mat
val uniform : ?a:elt -> ?b:elt -> int -> int -> mat
val gaussian : ?mu:elt -> ?sigma:elt -> int -> int -> mat
val bernoulli : ?p:float -> int -> int -> mat
val unit_basis : int -> int -> mat
val diagm : ?k:int -> mat -> mat
val triu : ?k:int -> mat -> mat
val tril : ?k:int -> mat -> mat
val symmetric : ?upper:bool -> mat -> mat
val bidiagonal : ?upper:bool -> mat -> mat -> mat
val toeplitz : ?c:mat -> mat -> mat
val hankel : ?r:mat -> mat -> mat
val hadamard : int -> mat
val magic : int -> mat
Dense row vectors and meshgrids
val vector : int -> mat
val vector_zeros : int -> mat
val vector_ones : int -> mat
val vector_uniform : int -> mat
val linspace : elt -> elt -> int -> mat
val logspace : ?base:float -> elt -> elt -> int -> mat
val meshgrid : elt -> elt -> elt -> elt -> int -> int -> mat * mat
val meshup : mat -> mat -> mat * mat
Obtain the basic properties of a matrix
val shape : mat -> int * int
val row_num : mat -> int
val col_num : mat -> int
val numel : mat -> int
val nnz : mat -> int
val density : mat -> float
val size_in_bytes : mat -> int
val same_shape : mat -> mat -> bool
val same_data : mat -> mat -> bool
Manipulate a matrix
val get : mat -> int -> int -> elt
val set : mat -> int -> int -> elt -> unit
val get_index : mat -> int array array -> elt array
val set_index : mat -> int array array -> elt array -> unit
val get_fancy : Owl_types.index list -> mat -> mat
val set_fancy : Owl_types.index list -> mat -> mat -> unit
val get_slice : int list list -> mat -> mat
val set_slice : int list list -> mat -> mat -> unit
val row : mat -> int -> mat
val col : mat -> int -> mat
val rows : mat -> int array -> mat
val cols : mat -> int array -> mat
val resize : ?head:bool -> mat -> int array -> mat
val reshape : mat -> int array -> mat
val flatten : mat -> mat
val reverse : mat -> mat
val flip : ?axis:int -> mat -> mat
val rotate : mat -> int -> mat
val reset : mat -> unit
val fill : mat -> elt -> unit
val copy : mat -> mat
val copy_row_to : mat -> mat -> int -> unit
val copy_col_to : mat -> mat -> int -> unit
val concat_vertical : mat -> mat -> mat
val concat_horizontal : mat -> mat -> mat
val concat_vh : mat array array -> mat
val concatenate : ?axis:int -> mat array -> mat
val split : ?axis:int -> int array -> mat -> mat array
val split_vh : (int * int) array array -> mat -> mat array array
val transpose : mat -> mat
val ctranspose : mat -> mat
val diag : ?k:int -> mat -> mat
val swap_rows : mat -> int -> int -> unit
val swap_cols : mat -> int -> int -> unit
val tile : mat -> int array -> mat
val repeat : mat -> int array -> mat
val pad : ?v:elt -> int list list -> mat -> mat
val dropout : ?rate:float -> mat -> mat
val top : mat -> int -> int array array
val bottom : mat -> int -> int array array
val sort : mat -> mat
val argsort : - mat -> - (int64, Stdlib.Bigarray.int64_elt, Stdlib.Bigarray.c_layout) - Stdlib.Bigarray.Genarray.t
Iterate elements, columns, and rows.
val iteri : (int -> elt -> unit) -> mat -> unit
val iter : (elt -> unit) -> mat -> unit
val mapi : (int -> elt -> elt) -> mat -> mat
val map : (elt -> elt) -> mat -> mat
val foldi : ?axis:int -> (int -> elt -> elt -> elt) -> elt -> mat -> mat
val fold : ?axis:int -> (elt -> elt -> elt) -> elt -> mat -> mat
val scani : ?axis:int -> (int -> elt -> elt -> elt) -> mat -> mat
val scan : ?axis:int -> (elt -> elt -> elt) -> mat -> mat
val filteri : (int -> elt -> bool) -> mat -> int array
val filter : (elt -> bool) -> mat -> int array
val iteri_2d : (int -> int -> elt -> unit) -> mat -> unit
val mapi_2d : (int -> int -> elt -> elt) -> mat -> mat
val foldi_2d : - ?axis:int -> - (int -> int -> elt -> elt -> elt) -> - elt -> - mat -> - mat
val scani_2d : ?axis:int -> (int -> int -> elt -> elt -> elt) -> mat -> mat
val filteri_2d : (int -> int -> elt -> bool) -> mat -> (int * int) array
val iter2i_2d : (int -> int -> elt -> elt -> unit) -> mat -> mat -> unit
val map2i_2d : (int -> int -> elt -> elt -> elt) -> mat -> mat -> mat
val iter2i : (int -> elt -> elt -> unit) -> mat -> mat -> unit
val iter2 : (elt -> elt -> unit) -> mat -> mat -> unit
val map2i : (int -> elt -> elt -> elt) -> mat -> mat -> mat
val map2 : (elt -> elt -> elt) -> mat -> mat -> mat
val iteri_rows : (int -> mat -> unit) -> mat -> unit
val iter_rows : (mat -> unit) -> mat -> unit
val iter2i_rows : (int -> mat -> mat -> unit) -> mat -> mat -> unit
val iter2_rows : (mat -> mat -> unit) -> mat -> mat -> unit
val iteri_cols : (int -> mat -> unit) -> mat -> unit
val iter_cols : (mat -> unit) -> mat -> unit
val filteri_rows : (int -> mat -> bool) -> mat -> int array
val filter_rows : (mat -> bool) -> mat -> int array
val filteri_cols : (int -> mat -> bool) -> mat -> int array
val filter_cols : (mat -> bool) -> mat -> int array
val fold_rows : ('a -> mat -> 'a) -> 'a -> mat -> 'a
val fold_cols : ('a -> mat -> 'a) -> 'a -> mat -> 'a
val mapi_rows : (int -> mat -> 'a) -> mat -> 'a array
val map_rows : (mat -> 'a) -> mat -> 'a array
val mapi_cols : (int -> mat -> 'a) -> mat -> 'a array
val map_cols : (mat -> 'a) -> mat -> 'a array
val mapi_by_row : int -> (int -> mat -> mat) -> mat -> mat
val map_by_row : int -> (mat -> mat) -> mat -> mat
val mapi_by_col : int -> (int -> mat -> mat) -> mat -> mat
val map_by_col : int -> (mat -> mat) -> mat -> mat
val mapi_at_row : (int -> elt -> elt) -> mat -> int -> mat
val map_at_row : (elt -> elt) -> mat -> int -> mat
val mapi_at_col : (int -> elt -> elt) -> mat -> int -> mat
val map_at_col : (elt -> elt) -> mat -> int -> mat
Examine elements and compare two matrices
val exists : (elt -> bool) -> mat -> bool
val not_exists : (elt -> bool) -> mat -> bool
val for_all : (elt -> bool) -> mat -> bool
val is_zero : mat -> bool
val is_positive : mat -> bool
val is_negative : mat -> bool
val is_nonpositive : mat -> bool
val is_nonnegative : mat -> bool
val is_normal : mat -> bool
val not_nan : mat -> bool
val not_inf : mat -> bool
val equal : mat -> mat -> bool
val not_equal : mat -> mat -> bool
val greater : mat -> mat -> bool
val less : mat -> mat -> bool
val greater_equal : mat -> mat -> bool
val less_equal : mat -> mat -> bool
val elt_equal : mat -> mat -> mat
val elt_not_equal : mat -> mat -> mat
val elt_less : mat -> mat -> mat
val elt_greater : mat -> mat -> mat
val elt_less_equal : mat -> mat -> mat
val elt_greater_equal : mat -> mat -> mat
val equal_scalar : mat -> elt -> bool
val not_equal_scalar : mat -> elt -> bool
val less_scalar : mat -> elt -> bool
val greater_scalar : mat -> elt -> bool
val less_equal_scalar : mat -> elt -> bool
val greater_equal_scalar : mat -> elt -> bool
val elt_equal_scalar : mat -> elt -> mat
val elt_not_equal_scalar : mat -> elt -> mat
val elt_less_scalar : mat -> elt -> mat
val elt_greater_scalar : mat -> elt -> mat
val elt_less_equal_scalar : mat -> elt -> mat
val elt_greater_equal_scalar : mat -> elt -> mat
val approx_equal : ?eps:float -> mat -> mat -> bool
val approx_equal_scalar : ?eps:float -> mat -> elt -> bool
val approx_elt_equal : ?eps:float -> mat -> mat -> mat
val approx_elt_equal_scalar : ?eps:float -> mat -> elt -> mat
Randomisation functions
val draw_rows : ?replacement:bool -> mat -> int -> mat * int array
val draw_cols : ?replacement:bool -> mat -> int -> mat * int array
val draw_rows2 : - ?replacement:bool -> - mat -> - mat -> - int -> - mat * mat * int array
val draw_cols2 : - ?replacement:bool -> - mat -> - mat -> - int -> - mat * mat * int array
val shuffle_rows : mat -> mat
val shuffle_cols : mat -> mat
val shuffle : mat -> mat
Input/Output and helper functions
val to_array : mat -> elt array
val of_array : elt array -> int -> int -> mat
val to_arrays : mat -> elt array array
val of_arrays : elt array array -> mat
val to_rows : mat -> mat array
val of_rows : mat array -> mat
val to_cols : mat -> mat array
val of_cols : mat array -> mat
val print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(elt -> string) -> - mat -> - unit
val save : out:string -> mat -> unit
val load : string -> mat
val save_txt : ?sep:string -> ?append:bool -> out:string -> mat -> unit
val load_txt : ?sep:string -> string -> mat
val save_npy : out:string -> mat -> unit
val load_npy : string -> mat
Unary mathematical operations
val min : ?axis:int -> ?keep_dims:bool -> mat -> mat
val min' : mat -> elt
val max : ?axis:int -> ?keep_dims:bool -> mat -> mat
val max' : mat -> elt
val minmax : ?axis:int -> ?keep_dims:bool -> mat -> mat * mat
val minmax' : mat -> elt * elt
val min_i : mat -> elt * int array
val max_i : mat -> elt * int array
val minmax_i : mat -> (elt * int array) * (elt * int array)
val trace : mat -> elt
val sum : ?axis:int -> ?keep_dims:bool -> mat -> mat
val sum' : mat -> elt
val prod : ?axis:int -> ?keep_dims:bool -> mat -> mat
val prod' : mat -> elt
val mean : ?axis:int -> ?keep_dims:bool -> mat -> mat
val mean' : mat -> elt
val var' : mat -> elt
val std' : mat -> elt
val sem : ?axis:int -> ?keep_dims:bool -> mat -> mat
val sem' : mat -> elt
val sum_rows : ?keep_dims:bool -> mat -> mat
val sum_cols : ?keep_dims:bool -> mat -> mat
val mean_rows : ?keep_dims:bool -> mat -> mat
val mean_cols : ?keep_dims:bool -> mat -> mat
val abs : mat -> mat
val abs2 : mat -> mat
val conj : mat -> mat
val neg : mat -> mat
val reci : mat -> mat
val reci_tol : ?tol:elt -> mat -> mat
val sqr : mat -> mat
val sqrt : mat -> mat
val cbrt : mat -> mat
val exp : mat -> mat
val exp2 : mat -> mat
val exp10 : mat -> mat
val expm1 : mat -> mat
val log : mat -> mat
val log10 : mat -> mat
val log2 : mat -> mat
val log1p : mat -> mat
val sin : mat -> mat
val cos : mat -> mat
val tan : mat -> mat
val asin : mat -> mat
val acos : mat -> mat
val atan : mat -> mat
val sinh : mat -> mat
val cosh : mat -> mat
val tanh : mat -> mat
val asinh : mat -> mat
val acosh : mat -> mat
val atanh : mat -> mat
val floor : mat -> mat
val ceil : mat -> mat
val round : mat -> mat
val trunc : mat -> mat
val fix : mat -> mat
val modf : mat -> mat * mat
val l1norm : ?axis:int -> ?keep_dims:bool -> mat -> mat
val l1norm' : mat -> elt
val l2norm : ?axis:int -> ?keep_dims:bool -> mat -> mat
val l2norm' : mat -> elt
val l2norm_sqr : ?axis:int -> ?keep_dims:bool -> mat -> mat
val l2norm_sqr' : mat -> elt
val vecnorm : ?axis:int -> ?p:float -> ?keep_dims:bool -> mat -> mat
val vecnorm' : ?p:float -> mat -> elt
val cumsum : ?axis:int -> mat -> mat
val cumprod : ?axis:int -> mat -> mat
val cummin : ?axis:int -> mat -> mat
val cummax : ?axis:int -> mat -> mat
val diff : ?axis:int -> ?n:int -> mat -> mat
val var : ?axis:int -> ?keep_dims:bool -> mat -> mat
val std : ?axis:int -> ?keep_dims:bool -> mat -> mat
val mat2gray : ?amin:elt -> ?amax:elt -> mat -> mat
val lgamma : mat -> mat
val dawsn : mat -> mat
Binary mathematical operations
val add : mat -> mat -> mat
val sub : mat -> mat -> mat
val mul : mat -> mat -> mat
val div : mat -> mat -> mat
val add_scalar : mat -> elt -> mat
val sub_scalar : mat -> elt -> mat
val mul_scalar : mat -> elt -> mat
val div_scalar : mat -> elt -> mat
val scalar_add : elt -> mat -> mat
val scalar_sub : elt -> mat -> mat
val scalar_mul : elt -> mat -> mat
val scalar_div : elt -> mat -> mat
val dot : mat -> mat -> mat
val add_diag : mat -> elt -> mat
val pow : mat -> mat -> mat
val scalar_pow : elt -> mat -> mat
val pow_scalar : mat -> elt -> mat
val min2 : mat -> mat -> mat
val max2 : mat -> mat -> mat
val ssqr' : mat -> elt -> elt
val ssqr_diff' : mat -> mat -> elt
val clip_by_value : ?amin:elt -> ?amax:elt -> mat -> mat
val cov : ?b:mat -> a:mat -> mat
val kron : mat -> mat -> mat
val fma : mat -> mat -> mat -> mat
Functions of in-place modification
val create_ : out:mat -> elt -> unit
val uniform_ : ?a:elt -> ?b:elt -> out:mat -> unit
val bernoulli_ : ?p:float -> out:mat -> unit
val zeros_ : out:mat -> unit
val ones_ : out:mat -> unit
val sort_ : mat -> unit
val one_hot_ : out:mat -> int -> mat -> unit
val copy_ : out:mat -> mat -> unit
val reshape_ : out:mat -> mat -> unit
val transpose_ : out:mat -> ?axis:int array -> mat -> unit
val sum_ : out:mat -> axis:int -> mat -> unit
val min_ : out:mat -> axis:int -> mat -> unit
val max_ : out:mat -> axis:int -> mat -> unit
val add_ : ?out:mat -> mat -> mat -> unit
val sub_ : ?out:mat -> mat -> mat -> unit
val mul_ : ?out:mat -> mat -> mat -> unit
val div_ : ?out:mat -> mat -> mat -> unit
val pow_ : ?out:mat -> mat -> mat -> unit
val atan2_ : ?out:mat -> mat -> mat -> unit
val hypot_ : ?out:mat -> mat -> mat -> unit
val fmod_ : ?out:mat -> mat -> mat -> unit
val min2_ : ?out:mat -> mat -> mat -> unit
val max2_ : ?out:mat -> mat -> mat -> unit
val add_scalar_ : ?out:mat -> mat -> elt -> unit
val sub_scalar_ : ?out:mat -> mat -> elt -> unit
val mul_scalar_ : ?out:mat -> mat -> elt -> unit
val div_scalar_ : ?out:mat -> mat -> elt -> unit
val pow_scalar_ : ?out:mat -> mat -> elt -> unit
val atan2_scalar_ : ?out:mat -> mat -> elt -> unit
val fmod_scalar_ : ?out:mat -> mat -> elt -> unit
val scalar_add_ : ?out:mat -> elt -> mat -> unit
val scalar_sub_ : ?out:mat -> elt -> mat -> unit
val scalar_mul_ : ?out:mat -> elt -> mat -> unit
val scalar_div_ : ?out:mat -> elt -> mat -> unit
val scalar_pow_ : ?out:mat -> elt -> mat -> unit
val scalar_atan2_ : ?out:mat -> elt -> mat -> unit
val scalar_fmod_ : ?out:mat -> elt -> mat -> unit
val fma_ : ?out:mat -> mat -> mat -> mat -> unit
val dot_ : - ?transa:bool -> - ?transb:bool -> - ?alpha:elt -> - ?beta:elt -> - c:mat -> - mat -> - mat -> - unit
val conj_ : ?out:mat -> mat -> unit
val abs_ : ?out:mat -> mat -> unit
val neg_ : ?out:mat -> mat -> unit
val reci_ : ?out:mat -> mat -> unit
val signum_ : ?out:mat -> mat -> unit
val sqr_ : ?out:mat -> mat -> unit
val sqrt_ : ?out:mat -> mat -> unit
val cbrt_ : ?out:mat -> mat -> unit
val exp_ : ?out:mat -> mat -> unit
val exp2_ : ?out:mat -> mat -> unit
val exp10_ : ?out:mat -> mat -> unit
val expm1_ : ?out:mat -> mat -> unit
val log_ : ?out:mat -> mat -> unit
val log2_ : ?out:mat -> mat -> unit
val log10_ : ?out:mat -> mat -> unit
val log1p_ : ?out:mat -> mat -> unit
val sin_ : ?out:mat -> mat -> unit
val cos_ : ?out:mat -> mat -> unit
val tan_ : ?out:mat -> mat -> unit
val asin_ : ?out:mat -> mat -> unit
val acos_ : ?out:mat -> mat -> unit
val atan_ : ?out:mat -> mat -> unit
val sinh_ : ?out:mat -> mat -> unit
val cosh_ : ?out:mat -> mat -> unit
val tanh_ : ?out:mat -> mat -> unit
val asinh_ : ?out:mat -> mat -> unit
val acosh_ : ?out:mat -> mat -> unit
val atanh_ : ?out:mat -> mat -> unit
val floor_ : ?out:mat -> mat -> unit
val ceil_ : ?out:mat -> mat -> unit
val round_ : ?out:mat -> mat -> unit
val trunc_ : ?out:mat -> mat -> unit
val fix_ : ?out:mat -> mat -> unit
val erf_ : ?out:mat -> mat -> unit
val erfc_ : ?out:mat -> mat -> unit
val relu_ : ?out:mat -> mat -> unit
val softplus_ : ?out:mat -> mat -> unit
val softsign_ : ?out:mat -> mat -> unit
val sigmoid_ : ?out:mat -> mat -> unit
val softmax_ : ?out:mat -> ?axis:int -> mat -> unit
val cumsum_ : ?out:mat -> ?axis:int -> mat -> unit
val cumprod_ : ?out:mat -> ?axis:int -> mat -> unit
val cummin_ : ?out:mat -> ?axis:int -> mat -> unit
val cummax_ : ?out:mat -> ?axis:int -> mat -> unit
val dropout_ : ?out:mat -> ?rate:float -> mat -> unit
val elt_equal_ : ?out:mat -> mat -> mat -> unit
val elt_not_equal_ : ?out:mat -> mat -> mat -> unit
val elt_less_ : ?out:mat -> mat -> mat -> unit
val elt_greater_ : ?out:mat -> mat -> mat -> unit
val elt_less_equal_ : ?out:mat -> mat -> mat -> unit
val elt_greater_equal_ : ?out:mat -> mat -> mat -> unit
val elt_equal_scalar_ : ?out:mat -> mat -> elt -> unit
val elt_not_equal_scalar_ : ?out:mat -> mat -> elt -> unit
val elt_less_scalar_ : ?out:mat -> mat -> elt -> unit
val elt_greater_scalar_ : ?out:mat -> mat -> elt -> unit
val elt_less_equal_scalar_ : ?out:mat -> mat -> elt -> unit
val elt_greater_equal_scalar_ : ?out:mat -> mat -> elt -> unit
\ No newline at end of file diff --git a/owl/Owl_dense_matrix_intf/module-type-Complex/index.html b/owl/Owl_dense_matrix_intf/module-type-Complex/index.html deleted file mode 100644 index cffec2ee9..000000000 --- a/owl/Owl_dense_matrix_intf/module-type-Complex/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Complex (owl.Owl_dense_matrix_intf.Complex)

Module type Owl_dense_matrix_intf.Complex

type mat
type cast_mat
Specific complex functions
val complex : cast_mat -> cast_mat -> mat
val polar : cast_mat -> cast_mat -> mat
val re : mat -> cast_mat
val im : mat -> cast_mat
\ No newline at end of file diff --git a/owl/Owl_dense_matrix_intf/module-type-Real/index.html b/owl/Owl_dense_matrix_intf/module-type-Real/index.html deleted file mode 100644 index b6597c0ae..000000000 --- a/owl/Owl_dense_matrix_intf/module-type-Real/index.html +++ /dev/null @@ -1,12 +0,0 @@ - -Real (owl.Owl_dense_matrix_intf.Real)

Module type Owl_dense_matrix_intf.Real

type elt
type mat
Specific real functions
val i0 : mat -> mat
val i0e : mat -> mat
val i1 : mat -> mat
val i1e : mat -> mat
val iv : v:mat -> mat -> mat
val scalar_iv : v:elt -> mat -> mat
val iv_scalar : v:mat -> elt -> mat
val j0 : mat -> mat
val j1 : mat -> mat
val jv : v:mat -> mat -> mat
val scalar_jv : v:elt -> mat -> mat
val jv_scalar : v:mat -> elt -> mat
val semidef : int -> mat
val min_rows : mat -> (elt * int * int) array
val min_cols : mat -> (elt * int * int) array
val max_rows : mat -> (elt * int * int) array
val max_cols : mat -> (elt * int * int) array
val signum : mat -> mat
val erf : mat -> mat
val erfc : mat -> mat
val logistic : mat -> mat
val relu : mat -> mat
val elu : ?alpha:elt -> mat -> mat
val leaky_relu : ?alpha:elt -> mat -> mat
val softplus : mat -> mat
val softsign : mat -> mat
val softmax : ?axis:int -> mat -> mat
val sigmoid : mat -> mat
val log_sum_exp' : mat -> elt
val log_sum_exp : ?axis:int -> ?keep_dims:bool -> mat -> mat
val max_pool : - ?padding:Owl_types.padding -> - mat -> - int array -> - int array -> - mat
val avg_pool : - ?padding:Owl_types.padding -> - mat -> - int array -> - int array -> - mat
val atan2 : mat -> mat -> mat
val scalar_atan2 : elt -> mat -> mat
val atan2_scalar : mat -> elt -> mat
val hypot : mat -> mat -> mat
val fmod : mat -> mat -> mat
val fmod_scalar : mat -> elt -> mat
val scalar_fmod : elt -> mat -> mat
val cross_entropy' : mat -> mat -> elt
val clip_by_l2norm : elt -> mat -> mat
val poisson : mu:elt -> int -> int -> mat
\ No newline at end of file diff --git a/owl/Owl_dense_matrix_s/.dummy b/owl/Owl_dense_matrix_s/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl/Owl_dense_matrix_z/.dummy b/owl/Owl_dense_matrix_z/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl/Owl_dense_ndarray/.dummy b/owl/Owl_dense_ndarray/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl/Owl_dense_ndarray/Any/index.html b/owl/Owl_dense_ndarray/Any/index.html deleted file mode 100644 index 56c5db81e..000000000 --- a/owl/Owl_dense_ndarray/Any/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -Any (owl.Owl_dense_ndarray.Any)

Module Owl_dense_ndarray.Any

include module type of struct include Owl_dense_ndarray_a end
type 'a arr = 'a Owl_dense_ndarray_a.arr = {
  1. mutable shape : int array;
  2. mutable stride : int array;
  3. mutable data : 'a array;
}
Create N-dimensional array
val create : int array -> 'a -> 'a arr
val init : int array -> (int -> 'a) -> 'a arr
val init_nd : int array -> (int array -> 'a) -> 'a arr
val sequential : ?a:float -> ?step:float -> int array -> float arr
val zeros : int array -> float arr
val ones : int array -> float arr
Obtain basic properties
val shape : 'a arr -> int array
val num_dims : 'a arr -> int
val nth_dim : 'a arr -> int -> int
val numel : 'a arr -> int
val same_shape : 'a arr -> 'a arr -> bool
val strides : 'a arr -> int array
val slice_size : 'a arr -> int array
val index_1d_nd : int -> int array -> int array
val index_nd_1d : int array -> int array -> int
Manipulate a N-dimensional array
val get : 'a arr -> int array -> 'a
val set : 'a arr -> int array -> 'a -> unit
val get_index : 'a arr -> int array array -> 'a array
val set_index : 'a arr -> int array array -> 'a array -> unit
val get_fancy : Owl_types.index list -> 'a arr -> 'a arr
val set_fancy : Owl_types.index list -> 'a arr -> 'a arr -> unit
val get_slice : int list list -> 'a arr -> 'a arr
val set_slice : int list list -> 'a arr -> 'a arr -> unit
val fill : 'a arr -> 'a -> unit
val copy_ : out:'a arr -> 'a arr -> unit
val copy : 'a arr -> 'a arr
val reshape : 'a arr -> int array -> 'a arr
val flatten : 'a arr -> 'a arr
val sub_left : 'a arr -> int array -> 'a arr
val squeeze : ?axis:int array -> 'a arr -> 'a arr
val expand : ?hi:bool -> 'a arr -> int -> 'a arr
val reverse : 'a arr -> 'a arr
val transpose : ?axis:int array -> 'a arr -> 'a arr
val swap : int -> int -> 'a arr -> 'a arr
val repeat : 'a arr -> int array -> 'a arr
val tile : 'a arr -> int array -> 'a arr
val concatenate : ?axis:int -> 'a arr array -> 'a arr
val pad : 'a -> int list list -> 'a arr -> 'a arr
Iterate array elements
val iter : ('a -> unit) -> 'a arr -> unit
val iteri : (int -> 'a -> unit) -> 'a arr -> unit
val map : ('a -> 'b) -> 'a arr -> 'b arr
val mapi : (int -> 'a -> 'b) -> 'a arr -> 'b arr
val filter : ('a -> bool) -> 'a arr -> int array
val filteri : (int -> 'a -> bool) -> 'a arr -> int array
val fold : ('a -> 'b -> 'a) -> 'a -> 'b arr -> 'a
val foldi : (int -> 'a -> 'b -> 'a) -> 'a -> 'b arr -> 'a
val iter2 : ('a -> 'b -> unit) -> 'a arr -> 'b arr -> unit
val iter2i : (int -> 'a -> 'b -> unit) -> 'a arr -> 'b arr -> unit
val map2 : ('a -> 'b -> 'c) -> 'a arr -> 'b arr -> 'c arr
val map2i : (int -> 'a -> 'b -> 'c) -> 'a arr -> 'b arr -> 'c arr
Examine array elements or compare two arrays
val exists : ('a -> bool) -> 'a arr -> bool
val not_exists : ('a -> bool) -> 'a arr -> bool
val for_all : ('a -> bool) -> 'a arr -> bool
val is_equal : ?cmp:('a -> 'a -> int) -> 'a arr -> 'a arr -> bool
val not_equal : ?cmp:('a -> 'a -> int) -> 'a arr -> 'a arr -> bool
val greater : ?cmp:('a -> 'a -> int) -> 'a arr -> 'a arr -> bool
val less : ?cmp:('a -> 'a -> int) -> 'a arr -> 'a arr -> bool
val greater_equal : ?cmp:('a -> 'a -> int) -> 'a arr -> 'a arr -> bool
val less_equal : ?cmp:('a -> 'a -> int) -> 'a arr -> 'a arr -> bool
val elt_equal : ?cmp:('a -> 'a -> int) -> 'a arr -> 'a arr -> bool arr
val elt_not_equal : ?cmp:('a -> 'a -> int) -> 'a arr -> 'a arr -> bool arr
val elt_greater : ?cmp:('a -> 'a -> int) -> 'a arr -> 'a arr -> bool arr
val elt_less : ?cmp:('a -> 'a -> int) -> 'a arr -> 'a arr -> bool arr
val elt_greater_equal : ?cmp:('a -> 'a -> int) -> 'a arr -> 'a arr -> bool arr
val elt_less_equal : ?cmp:('a -> 'a -> int) -> 'a arr -> 'a arr -> bool arr
val elt_equal_scalar : ?cmp:('a -> 'a -> int) -> 'a arr -> 'a -> bool arr
val elt_not_equal_scalar : ?cmp:('a -> 'a -> int) -> 'a arr -> 'a -> bool arr
val elt_greater_scalar : ?cmp:('a -> 'a -> int) -> 'a arr -> 'a -> bool arr
val elt_less_scalar : ?cmp:('a -> 'a -> int) -> 'a arr -> 'a -> bool arr
val elt_greater_equal_scalar : - ?cmp:('a -> 'a -> int) -> - 'a arr -> - 'a -> - bool arr
val elt_less_equal_scalar : ?cmp:('a -> 'a -> int) -> 'a arr -> 'a -> bool arr
val sort : ?cmp:('a -> 'a -> int) -> 'a arr -> unit
val min : ?cmp:('a -> 'a -> int) -> 'a arr -> 'a
val max : ?cmp:('a -> 'a -> int) -> 'a arr -> 'a
val min_i : ?cmp:('a -> 'a -> int) -> 'a arr -> 'a * int
val max_i : ?cmp:('a -> 'a -> int) -> 'a arr -> 'a * int
Input/Output functions
val of_array : 'a array -> int array -> 'a arr
val to_array : 'a arr -> 'a array
\ No newline at end of file diff --git a/owl/Owl_dense_ndarray/C/index.html b/owl/Owl_dense_ndarray/C/index.html deleted file mode 100644 index 17ddd063d..000000000 --- a/owl/Owl_dense_ndarray/C/index.html +++ /dev/null @@ -1,582 +0,0 @@ - -C (owl.Owl_dense_ndarray.C)

Module Owl_dense_ndarray.C

include module type of struct include Owl_dense_ndarray_c end
type elt = Stdlib.Complex.t
type arr = - (Stdlib.Complex.t, Stdlib.Bigarray.complex32_elt, Stdlib.Bigarray.c_layout) - Stdlib.Bigarray.Genarray.t
type cast_arr = - (float, Stdlib.Bigarray.float32_elt, Stdlib.Bigarray.c_layout) - Stdlib.Bigarray.Genarray.t
include Owl_dense_ndarray_intf.Common with type elt := elt and type arr := arr
include Owl_base_dense_ndarray_intf.Common - with type elt := elt - with type arr := arr
val empty : int array -> arr
val zeros : int array -> arr
val ones : int array -> arr
val create : int array -> elt -> arr
val init : int array -> (int -> elt) -> arr
val init_nd : int array -> (int array -> elt) -> arr
val sequential : ?a:elt -> ?step:elt -> int array -> arr
val uniform : ?a:elt -> ?b:elt -> int array -> arr
val gaussian : ?mu:elt -> ?sigma:elt -> int array -> arr
val bernoulli : ?p:float -> int array -> arr
val shape : arr -> int array
val numel : arr -> int
val strides : arr -> int array

Refer to :doc:`owl_dense_ndarray_generic`

val slice_size : arr -> int array

Refer to :doc:`owl_dense_ndarray_generic`

val get : arr -> int array -> elt
val set : arr -> int array -> elt -> unit
val get_slice : int list list -> arr -> arr
val set_slice : int list list -> arr -> arr -> unit
val copy : arr -> arr
val copy_ : out:arr -> arr -> unit
val reset : arr -> unit
val reshape : arr -> int array -> arr
val flatten : arr -> arr
val reverse : arr -> arr
val tile : arr -> int array -> arr
val repeat : arr -> int array -> arr
val concatenate : ?axis:int -> arr array -> arr
val stack : ?axis:int -> arr array -> arr
val squeeze : ?axis:int array -> arr -> arr
val expand : ?hi:bool -> arr -> int -> arr
val split : ?axis:int -> int array -> arr -> arr array
val draw : ?axis:int -> arr -> int -> arr * int array
val pad : ?v:elt -> int list list -> arr -> arr
val one_hot : int -> arr -> arr
val print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(elt -> string) -> - arr -> - unit
val abs : arr -> arr
val neg : arr -> arr
val floor : arr -> arr
val ceil : arr -> arr
val round : arr -> arr
val sqr : arr -> arr
val sqrt : arr -> arr
val log : arr -> arr
val log2 : arr -> arr
val log10 : arr -> arr
val exp : arr -> arr
val sin : arr -> arr
val cos : arr -> arr
val tan : arr -> arr
val sinh : arr -> arr
val cosh : arr -> arr
val tanh : arr -> arr
val asin : arr -> arr
val acos : arr -> arr
val atan : arr -> arr
val asinh : arr -> arr
val acosh : arr -> arr
val atanh : arr -> arr
val min : ?axis:int -> ?keep_dims:bool -> arr -> arr
val max : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum_reduce : ?axis:int array -> arr -> arr
val min' : arr -> elt
val max' : arr -> elt
val pow : arr -> arr -> arr
val scalar_pow : elt -> arr -> arr
val pow_scalar : arr -> elt -> arr
val add : arr -> arr -> arr
val sub : arr -> arr -> arr
val mul : arr -> arr -> arr
val div : arr -> arr -> arr
val add_scalar : arr -> elt -> arr
val sub_scalar : arr -> elt -> arr
val mul_scalar : arr -> elt -> arr
val div_scalar : arr -> elt -> arr
val scalar_add : elt -> arr -> arr
val scalar_sub : elt -> arr -> arr
val scalar_mul : elt -> arr -> arr
val scalar_div : elt -> arr -> arr
val fma : arr -> arr -> arr -> arr
Iterate array elements
val iteri : (int -> elt -> unit) -> arr -> unit
val iter : (elt -> unit) -> arr -> unit
val mapi : (int -> elt -> elt) -> arr -> arr
val map : (elt -> elt) -> arr -> arr
val filteri : (int -> elt -> bool) -> arr -> int array
val filter : (elt -> bool) -> arr -> int array
val foldi : ?axis:int -> (int -> elt -> elt -> elt) -> elt -> arr -> arr
val fold : ?axis:int -> (elt -> elt -> elt) -> elt -> arr -> arr
val scani : ?axis:int -> (int -> elt -> elt -> elt) -> arr -> arr
val scan : ?axis:int -> (elt -> elt -> elt) -> arr -> arr
Examination & Comparison
val exists : (elt -> bool) -> arr -> bool
val not_exists : (elt -> bool) -> arr -> bool
val for_all : (elt -> bool) -> arr -> bool
val is_zero : arr -> bool
val is_positive : arr -> bool
val is_negative : arr -> bool
val is_nonpositive : arr -> bool
val is_nonnegative : arr -> bool
val is_normal : arr -> bool
val not_nan : arr -> bool
val not_inf : arr -> bool
val equal : arr -> arr -> bool
val not_equal : arr -> arr -> bool
val greater : arr -> arr -> bool
val less : arr -> arr -> bool
val greater_equal : arr -> arr -> bool
val less_equal : arr -> arr -> bool
val elt_equal : arr -> arr -> arr
val elt_not_equal : arr -> arr -> arr
val elt_less : arr -> arr -> arr
val elt_greater : arr -> arr -> arr
val elt_less_equal : arr -> arr -> arr
val elt_greater_equal : arr -> arr -> arr
val equal_scalar : arr -> elt -> bool
val not_equal_scalar : arr -> elt -> bool
val less_scalar : arr -> elt -> bool
val greater_scalar : arr -> elt -> bool
val less_equal_scalar : arr -> elt -> bool
val greater_equal_scalar : arr -> elt -> bool
val elt_equal_scalar : arr -> elt -> arr
val elt_not_equal_scalar : arr -> elt -> arr
val elt_less_scalar : arr -> elt -> arr
val elt_greater_scalar : arr -> elt -> arr
val elt_less_equal_scalar : arr -> elt -> arr
val elt_greater_equal_scalar : arr -> elt -> arr
val row_num : arr -> int
val col_num : arr -> int
val row : arr -> int -> arr
val rows : arr -> int array -> arr
val copy_row_to : arr -> arr -> int -> unit
val copy_col_to : arr -> arr -> int -> unit
val diag : ?k:int -> arr -> arr
val transpose : ?axis:int array -> arr -> arr
val to_rows : arr -> arr array
val of_rows : arr array -> arr
val to_cols : arr -> arr array
val of_cols : arr array -> arr
val of_array : elt array -> int array -> arr
val of_arrays : elt array array -> arr
Create N-dimensional array
val linspace : elt -> elt -> int -> arr

linspace k 0. 9. 10 ...

val logspace : ?base:float -> elt -> elt -> int -> arr

logspace k 0. 9. 10 ...

val unit_basis : int -> int -> arr

unit_basis k n i returns a unit basis vector with ith element set to 1.

Obtain basic properties
val num_dims : arr -> int
val nth_dim : arr -> int -> int
val nnz : arr -> int
val density : arr -> float
val size_in_bytes : arr -> int
val same_shape : arr -> arr -> bool
val same_data : arr -> arr -> bool
val ind : arr -> int -> int array
val i1d : arr -> int array -> int
Manipulate a N-dimensional array
val get_index : arr -> int array array -> elt array
val set_index : arr -> int array array -> elt array -> unit
val get_fancy : Owl_types.index list -> arr -> arr
val set_fancy : Owl_types.index list -> arr -> arr -> unit
val sub_left : arr -> int -> int -> arr
val sub_ndarray : int array -> arr -> arr array
val slice_left : arr -> int array -> arr
val fill : arr -> elt -> unit
val resize : ?head:bool -> arr -> int array -> arr
val flip : ?axis:int -> arr -> arr
val rotate : arr -> int -> arr
val swap : int -> int -> arr -> arr
val concat_vertical : arr -> arr -> arr
val concat_horizontal : arr -> arr -> arr
val concat_vh : arr array array -> arr
val split_vh : (int * int) array array -> arr -> arr array array
val dropout : ?rate:float -> arr -> arr
val top : arr -> int -> int array array
val bottom : arr -> int -> int array array
val sort : arr -> arr
val sort1 : ?axis:int -> arr -> arr
val argsort : - arr -> - (int64, Stdlib.Bigarray.int64_elt, Stdlib.Bigarray.c_layout) - Stdlib.Bigarray.Genarray.t
val mmap : Unix.file_descr -> ?pos:int64 -> bool -> int array -> arr
Iterate array elements
val iter2i : (int -> elt -> elt -> unit) -> arr -> arr -> unit
val iter2 : (elt -> elt -> unit) -> arr -> arr -> unit
val map2i : (int -> elt -> elt -> elt) -> arr -> arr -> arr
val map2 : (elt -> elt -> elt) -> arr -> arr -> arr
val iteri_nd : (int array -> elt -> unit) -> arr -> unit
val mapi_nd : (int array -> elt -> elt) -> arr -> arr
val foldi_nd : - ?axis:int -> - (int array -> elt -> elt -> elt) -> - elt -> - arr -> - arr
val scani_nd : ?axis:int -> (int array -> elt -> elt -> elt) -> arr -> arr
val filteri_nd : (int array -> elt -> bool) -> arr -> int array array
val iter2i_nd : (int array -> elt -> elt -> unit) -> arr -> arr -> unit
val map2i_nd : (int array -> elt -> elt -> elt) -> arr -> arr -> arr
val iteri_slice : ?axis:int -> (int -> arr -> unit) -> arr -> unit
val iter_slice : ?axis:int -> (arr -> unit) -> arr -> unit
val mapi_slice : ?axis:int -> (int -> arr -> 'c) -> arr -> 'c array
val map_slice : ?axis:int -> (arr -> 'c) -> arr -> 'c array
val filteri_slice : ?axis:int -> (int -> arr -> bool) -> arr -> arr array
val filter_slice : ?axis:int -> (arr -> bool) -> arr -> arr array
val foldi_slice : ?axis:int -> (int -> 'c -> arr -> 'c) -> 'c -> arr -> 'c
val fold_slice : ?axis:int -> ('c -> arr -> 'c) -> 'c -> arr -> 'c
Examine array elements or compare two arrays
val approx_equal : ?eps:float -> arr -> arr -> bool
val approx_equal_scalar : ?eps:float -> arr -> elt -> bool
val approx_elt_equal : ?eps:float -> arr -> arr -> arr
val approx_elt_equal_scalar : ?eps:float -> arr -> elt -> arr
Input/Output functions
val to_array : arr -> elt array
val save : out:string -> arr -> unit
val load : string -> arr
val save_npy : out:string -> arr -> unit
val load_npy : string -> arr
Unary mathematical operations
val prod : ?axis:int -> ?keep_dims:bool -> arr -> arr
val prod' : arr -> elt
val mean : ?axis:int -> ?keep_dims:bool -> arr -> arr
val mean' : arr -> elt
val median' : arr -> elt
val median : ?axis:int -> ?keep_dims:bool -> arr -> arr
val var : ?axis:int -> ?keep_dims:bool -> arr -> arr
val var' : arr -> elt
val std : ?axis:int -> ?keep_dims:bool -> arr -> arr
val std' : arr -> elt
val sem : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sem' : arr -> elt
val minmax : ?axis:int -> ?keep_dims:bool -> arr -> arr * arr
val minmax' : arr -> elt * elt
val min_i : arr -> elt * int array
val max_i : arr -> elt * int array
val minmax_i : arr -> (elt * int array) * (elt * int array)
val abs2 : arr -> arr
val conj : arr -> arr
val reci : arr -> arr
val reci_tol : ?tol:elt -> arr -> arr
val cbrt : arr -> arr
val exp2 : arr -> arr
val exp10 : arr -> arr
val expm1 : arr -> arr
val log1p : arr -> arr
val trunc : arr -> arr
val fix : arr -> arr
val modf : arr -> arr * arr
val l1norm : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm' : arr -> elt
val l2norm : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l2norm' : arr -> elt
val l2norm_sqr : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l2norm_sqr' : arr -> elt
val vecnorm : ?axis:int -> ?p:float -> ?keep_dims:bool -> arr -> arr
val vecnorm' : ?p:float -> arr -> elt
val cumsum : ?axis:int -> arr -> arr
val cumprod : ?axis:int -> arr -> arr
val cummin : ?axis:int -> arr -> arr
val cummax : ?axis:int -> arr -> arr
val diff : ?axis:int -> ?n:int -> arr -> arr
val lgamma : arr -> arr
Binary mathematical operations
val min2 : arr -> arr -> arr
val max2 : arr -> arr -> arr
val ssqr' : arr -> elt -> elt
val ssqr_diff' : arr -> arr -> elt
val clip_by_value : ?amin:elt -> ?amax:elt -> arr -> arr
val clip_by_l2norm : elt -> arr -> arr
Tensor Calculus
val contract1 : (int * int) array -> arr -> arr
val contract2 : (int * int) array -> arr -> arr -> arr
Experimental functions
val sum_slices : ?axis:int -> arr -> arr
val slide : ?axis:int -> ?ofs:int -> ?step:int -> window:int -> arr -> arr
Functions of in-place modification
val create_ : out:arr -> elt -> unit
val uniform_ : ?a:elt -> ?b:elt -> out:arr -> unit
val gaussian_ : ?mu:elt -> ?sigma:elt -> out:arr -> unit
val sequential_ : ?a:elt -> ?step:elt -> out:arr -> unit
val bernoulli_ : ?p:float -> out:arr -> unit
val zeros_ : out:arr -> unit
val ones_ : out:arr -> unit
val sort_ : arr -> unit
val one_hot_ : out:arr -> int -> arr -> unit
val get_fancy_ : out:arr -> Owl_types.index list -> arr -> unit
val set_fancy_ : out:arr -> Owl_types.index list -> arr -> arr -> unit
val get_slice_ : out:arr -> int list list -> arr -> unit
val set_slice_ : out:arr -> int list list -> arr -> arr -> unit
val reshape_ : out:arr -> arr -> unit
val reverse_ : out:arr -> arr -> unit
val transpose_ : out:arr -> ?axis:int array -> arr -> unit
val repeat_ : out:arr -> arr -> int array -> unit
val tile_ : out:arr -> arr -> int array -> unit
val pad_ : out:arr -> ?v:elt -> int list list -> arr -> unit
val sum_ : out:arr -> axis:int -> arr -> unit
val min_ : out:arr -> axis:int -> arr -> unit
val max_ : out:arr -> axis:int -> arr -> unit
val add_ : ?out:arr -> arr -> arr -> unit
val sub_ : ?out:arr -> arr -> arr -> unit
val mul_ : ?out:arr -> arr -> arr -> unit
val div_ : ?out:arr -> arr -> arr -> unit
val pow_ : ?out:arr -> arr -> arr -> unit
val atan2_ : ?out:arr -> arr -> arr -> unit
val hypot_ : ?out:arr -> arr -> arr -> unit
val fmod_ : ?out:arr -> arr -> arr -> unit
val min2_ : ?out:arr -> arr -> arr -> unit
val max2_ : ?out:arr -> arr -> arr -> unit
val add_scalar_ : ?out:arr -> arr -> elt -> unit
val sub_scalar_ : ?out:arr -> arr -> elt -> unit
val mul_scalar_ : ?out:arr -> arr -> elt -> unit
val div_scalar_ : ?out:arr -> arr -> elt -> unit
val pow_scalar_ : ?out:arr -> arr -> elt -> unit
val atan2_scalar_ : ?out:arr -> arr -> elt -> unit
val fmod_scalar_ : ?out:arr -> arr -> elt -> unit
val scalar_add_ : ?out:arr -> elt -> arr -> unit
val scalar_sub_ : ?out:arr -> elt -> arr -> unit
val scalar_mul_ : ?out:arr -> elt -> arr -> unit
val scalar_div_ : ?out:arr -> elt -> arr -> unit
val scalar_pow_ : ?out:arr -> elt -> arr -> unit
val scalar_atan2_ : ?out:arr -> elt -> arr -> unit
val scalar_fmod_ : ?out:arr -> elt -> arr -> unit
val fma_ : ?out:arr -> arr -> arr -> arr -> unit
val clip_by_value_ : ?out:arr -> ?amin:elt -> ?amax:elt -> arr -> unit
val clip_by_l2norm_ : ?out:arr -> elt -> arr -> unit
val dot_ : - ?transa:bool -> - ?transb:bool -> - ?alpha:elt -> - ?beta:elt -> - c:arr -> - arr -> - arr -> - unit
val conj_ : ?out:arr -> arr -> unit
val abs_ : ?out:arr -> arr -> unit
val neg_ : ?out:arr -> arr -> unit
val reci_ : ?out:arr -> arr -> unit
val signum_ : ?out:arr -> arr -> unit
val sqr_ : ?out:arr -> arr -> unit
val sqrt_ : ?out:arr -> arr -> unit
val cbrt_ : ?out:arr -> arr -> unit
val exp_ : ?out:arr -> arr -> unit
val exp2_ : ?out:arr -> arr -> unit
val exp10_ : ?out:arr -> arr -> unit
val expm1_ : ?out:arr -> arr -> unit
val log_ : ?out:arr -> arr -> unit
val log2_ : ?out:arr -> arr -> unit
val log10_ : ?out:arr -> arr -> unit
val log1p_ : ?out:arr -> arr -> unit
val sin_ : ?out:arr -> arr -> unit
val cos_ : ?out:arr -> arr -> unit
val tan_ : ?out:arr -> arr -> unit
val asin_ : ?out:arr -> arr -> unit
val acos_ : ?out:arr -> arr -> unit
val atan_ : ?out:arr -> arr -> unit
val sinh_ : ?out:arr -> arr -> unit
val cosh_ : ?out:arr -> arr -> unit
val tanh_ : ?out:arr -> arr -> unit
val asinh_ : ?out:arr -> arr -> unit
val acosh_ : ?out:arr -> arr -> unit
val atanh_ : ?out:arr -> arr -> unit
val floor_ : ?out:arr -> arr -> unit
val ceil_ : ?out:arr -> arr -> unit
val round_ : ?out:arr -> arr -> unit
val trunc_ : ?out:arr -> arr -> unit
val fix_ : ?out:arr -> arr -> unit
val erf_ : ?out:arr -> arr -> unit
val erfc_ : ?out:arr -> arr -> unit
val relu_ : ?out:arr -> arr -> unit
val softplus_ : ?out:arr -> arr -> unit
val softsign_ : ?out:arr -> arr -> unit
val sigmoid_ : ?out:arr -> arr -> unit
val softmax_ : ?out:arr -> ?axis:int -> arr -> unit
val cumsum_ : ?out:arr -> ?axis:int -> arr -> unit
val cumprod_ : ?out:arr -> ?axis:int -> arr -> unit
val cummin_ : ?out:arr -> ?axis:int -> arr -> unit
val cummax_ : ?out:arr -> ?axis:int -> arr -> unit
val dropout_ : ?out:arr -> ?rate:float -> arr -> unit
val elt_equal_ : ?out:arr -> arr -> arr -> unit
val elt_not_equal_ : ?out:arr -> arr -> arr -> unit
val elt_less_ : ?out:arr -> arr -> arr -> unit
val elt_greater_ : ?out:arr -> arr -> arr -> unit
val elt_less_equal_ : ?out:arr -> arr -> arr -> unit
val elt_greater_equal_ : ?out:arr -> arr -> arr -> unit
val elt_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_not_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_less_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_greater_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_less_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_greater_equal_scalar_ : ?out:arr -> arr -> elt -> unit

Matrix functions

val col : arr -> int -> arr
val cols : arr -> int array -> arr
val dot : arr -> arr -> arr
val trace : arr -> elt
val to_arrays : arr -> elt array array
val draw_rows : ?replacement:bool -> arr -> int -> arr * int array
val draw_cols : ?replacement:bool -> arr -> int -> arr * int array
val draw_rows2 : - ?replacement:bool -> - arr -> - arr -> - int -> - arr * arr * int array
val draw_cols2 : - ?replacement:bool -> - arr -> - arr -> - int -> - arr * arr * int array
include Owl_dense_ndarray_intf.NN with type arr := arr
include Owl_base_dense_ndarray_intf.NN with type arr := arr
val conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val dilated_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val transpose_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val max_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val upsampling2d : arr -> int array -> arr
val conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val dilated_conv1d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv1d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val transpose_conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val max_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val upsampling2d_backward : arr -> int array -> arr -> arr
val max_pool2d_argmax : - ?padding:Owl_types.padding -> - arr -> - int array -> - int array -> - arr - * (int64, Stdlib.Bigarray.int64_elt, Stdlib.Bigarray.c_layout) - Stdlib.Bigarray.Genarray.t
val conv1d_ : - out:arr -> - ?padding:Owl_types.padding -> - arr -> - arr -> - int array -> - unit
val conv2d_ : - out:arr -> - ?padding:Owl_types.padding -> - arr -> - arr -> - int array -> - unit
val conv3d_ : - out:arr -> - ?padding:Owl_types.padding -> - arr -> - arr -> - int array -> - unit
val dilated_conv1d_ : - out:arr -> - ?padding:Owl_types.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val dilated_conv2d_ : - out:arr -> - ?padding:Owl_types.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val dilated_conv3d_ : - out:arr -> - ?padding:Owl_types.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val transpose_conv1d_ : - out:arr -> - ?padding:Owl_types.padding -> - arr -> - arr -> - int array -> - unit
val transpose_conv2d_ : - out:arr -> - ?padding:Owl_types.padding -> - arr -> - arr -> - int array -> - unit
val transpose_conv3d_ : - out:arr -> - ?padding:Owl_types.padding -> - arr -> - arr -> - int array -> - unit
val max_pool1d_ : - out:arr -> - ?padding:Owl_types.padding -> - arr -> - int array -> - int array -> - unit
val max_pool2d_ : - out:arr -> - ?padding:Owl_types.padding -> - arr -> - int array -> - int array -> - unit
val max_pool3d_ : - out:arr -> - ?padding:Owl_types.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool1d_ : - out:arr -> - ?padding:Owl_types.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool2d_ : - out:arr -> - ?padding:Owl_types.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool3d_ : - out:arr -> - ?padding:Owl_types.padding -> - arr -> - int array -> - int array -> - unit
val upsampling2d_ : out:arr -> arr -> int array -> unit
val conv1d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv1d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv2d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv2d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv3d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv3d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val dilated_conv1d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv1d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv2d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv2d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv3d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv3d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val transpose_conv1d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv1d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv2d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv2d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv3d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv3d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val max_pool1d_backward_ : - out:arr -> - Owl_types.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val max_pool2d_backward_ : - out:arr -> - Owl_types.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val max_pool3d_backward_ : - out:arr -> - Owl_types.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool1d_backward_ : - out:arr -> - Owl_types.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool2d_backward_ : - out:arr -> - Owl_types.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool3d_backward_ : - out:arr -> - Owl_types.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val upsampling2d_backward_ : out:arr -> arr -> int array -> arr -> unit
include Owl_dense_ndarray_intf.Complex - with type elt := elt - and type arr := arr - and type cast_arr := cast_arr
Complex operations
val complex : cast_arr -> cast_arr -> arr

complex re im constructs a complex ndarray/matrix from re and im. re and im contain the real and imaginary part of x respectively.

Note that both re and im can be complex but must have same type. The real part of re will be the real part of x and the imaginary part of im will be the imaginary part of x.

val polar : cast_arr -> cast_arr -> arr

polar rho theta constructs a complex ndarray/matrix from polar coordinates rho and theta. rho contains the magnitudes and theta contains phase angles. Note that the behaviour is undefined if rho has negative elelments or theta has infinity elelments.

val re : arr -> cast_arr
val im : arr -> cast_arr
val sum' : arr -> elt
include module type of struct include Operator end
include sig ... end
val (+$) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (-$) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (*$) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (/$) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t
val ($+) : - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t
val ($-) : - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t
val ($*) : - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t
val ($/) : - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (=) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - bool
val (!=) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - bool
val (<>) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - bool
val (>) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - bool
val (<) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - bool
val (>=) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - bool
val (<=) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - bool
include sig ... end
val (=$) : ('a, 'b) Owl_dense_ndarray_generic.t -> 'a -> bool
val (!=$) : ('a, 'b) Owl_dense_ndarray_generic.t -> 'a -> bool
val (<>$) : ('a, 'b) Owl_dense_ndarray_generic.t -> 'a -> bool
val (<$) : ('a, 'b) Owl_dense_ndarray_generic.t -> 'a -> bool
val (>$) : ('a, 'b) Owl_dense_ndarray_generic.t -> 'a -> bool
val (<=$) : ('a, 'b) Owl_dense_ndarray_generic.t -> 'a -> bool
val (>=$) : ('a, 'b) Owl_dense_ndarray_generic.t -> 'a -> bool
val (=.$) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (!=.$) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (<>.$) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (<.$) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (>.$) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (<=.$) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (>=.$) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (=~) : - ?eps:float -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - bool
val (=~$) : ?eps:float -> ('a, 'b) Owl_dense_ndarray_generic.t -> 'a -> bool
val (=~.) : - ?eps:float -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (=~.$) : - ?eps:float -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (%) : - (float, 'a) Owl_dense_ndarray_generic.t -> - (float, 'a) Owl_dense_ndarray_generic.t -> - (float, 'a) Owl_dense_ndarray_generic.t
val (%$) : - (float, 'a) Owl_dense_ndarray_generic.t -> - float -> - (float, 'a) Owl_dense_ndarray_generic.t
val (**) : - (float, 'a) Owl_dense_ndarray_generic.t -> - (float, 'a) Owl_dense_ndarray_generic.t -> - (float, 'a) Owl_dense_ndarray_generic.t
val ($**) : - float -> - (float, 'a) Owl_dense_ndarray_generic.t -> - (float, 'a) Owl_dense_ndarray_generic.t
val (**$) : - (float, 'a) Owl_dense_ndarray_generic.t -> - float -> - (float, 'a) Owl_dense_ndarray_generic.t
val (+=) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - unit
val (-=) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - unit
val (*=) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - unit
val (/=) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - unit
val (+$=) : ('a, 'b) Owl_dense_ndarray_generic.t -> 'a -> unit
val (-$=) : ('a, 'b) Owl_dense_ndarray_generic.t -> 'a -> unit
val (*$=) : ('a, 'b) Owl_dense_ndarray_generic.t -> 'a -> unit
val (/$=) : ('a, 'b) Owl_dense_ndarray_generic.t -> 'a -> unit
val (.!{;..}) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - Owl_types.index array -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (.!{;..}<-) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - Owl_types.index array -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - unit
val (.${}) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - int list -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (.${;..}) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - int list array -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (.${}<-) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - int list -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - unit
val (.${;..}<-) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - int list array -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - unit
include sig ... end
val (.%{}) : ('a, 'b) Owl_dense_ndarray_generic.t -> int -> 'a
val (.%{;..}) : ('a, 'b) Owl_dense_ndarray_generic.t -> int array -> 'a
val (.%{}<-) : ('a, 'b) Owl_dense_ndarray_generic.t -> int -> 'a -> unit
val (.%{;..}<-) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - int array -> - 'a -> - unit
val mpow : Owl_linalg_c.mat -> float -> Owl_linalg_c.mat
\ No newline at end of file diff --git a/owl/Owl_dense_ndarray/D/index.html b/owl/Owl_dense_ndarray/D/index.html deleted file mode 100644 index db0d72a5a..000000000 --- a/owl/Owl_dense_ndarray/D/index.html +++ /dev/null @@ -1,579 +0,0 @@ - -D (owl.Owl_dense_ndarray.D)

Module Owl_dense_ndarray.D

include module type of struct include Owl_dense_ndarray_d end
type elt = float
type arr = - (float, Stdlib.Bigarray.float64_elt, Stdlib.Bigarray.c_layout) - Stdlib.Bigarray.Genarray.t
include Owl_dense_ndarray_intf.Common with type elt := elt and type arr := arr
include Owl_base_dense_ndarray_intf.Common - with type elt := elt - with type arr := arr
val empty : int array -> arr
val zeros : int array -> arr
val ones : int array -> arr
val create : int array -> elt -> arr
val init : int array -> (int -> elt) -> arr
val init_nd : int array -> (int array -> elt) -> arr
val sequential : ?a:elt -> ?step:elt -> int array -> arr
val uniform : ?a:elt -> ?b:elt -> int array -> arr
val gaussian : ?mu:elt -> ?sigma:elt -> int array -> arr
val bernoulli : ?p:float -> int array -> arr
val shape : arr -> int array
val numel : arr -> int
val strides : arr -> int array

Refer to :doc:`owl_dense_ndarray_generic`

val slice_size : arr -> int array

Refer to :doc:`owl_dense_ndarray_generic`

val get : arr -> int array -> elt
val set : arr -> int array -> elt -> unit
val get_slice : int list list -> arr -> arr
val set_slice : int list list -> arr -> arr -> unit
val copy : arr -> arr
val copy_ : out:arr -> arr -> unit
val reset : arr -> unit
val reshape : arr -> int array -> arr
val flatten : arr -> arr
val reverse : arr -> arr
val tile : arr -> int array -> arr
val repeat : arr -> int array -> arr
val concatenate : ?axis:int -> arr array -> arr
val stack : ?axis:int -> arr array -> arr
val squeeze : ?axis:int array -> arr -> arr
val expand : ?hi:bool -> arr -> int -> arr
val split : ?axis:int -> int array -> arr -> arr array
val draw : ?axis:int -> arr -> int -> arr * int array
val pad : ?v:elt -> int list list -> arr -> arr
val one_hot : int -> arr -> arr
val print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(elt -> string) -> - arr -> - unit
val abs : arr -> arr
val neg : arr -> arr
val floor : arr -> arr
val ceil : arr -> arr
val round : arr -> arr
val sqr : arr -> arr
val sqrt : arr -> arr
val log : arr -> arr
val log2 : arr -> arr
val log10 : arr -> arr
val exp : arr -> arr
val sin : arr -> arr
val cos : arr -> arr
val tan : arr -> arr
val sinh : arr -> arr
val cosh : arr -> arr
val tanh : arr -> arr
val asin : arr -> arr
val acos : arr -> arr
val atan : arr -> arr
val asinh : arr -> arr
val acosh : arr -> arr
val atanh : arr -> arr
val min : ?axis:int -> ?keep_dims:bool -> arr -> arr
val max : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum_reduce : ?axis:int array -> arr -> arr
val min' : arr -> elt
val max' : arr -> elt
val sum' : arr -> elt
val pow : arr -> arr -> arr
val scalar_pow : elt -> arr -> arr
val pow_scalar : arr -> elt -> arr
val add : arr -> arr -> arr
val sub : arr -> arr -> arr
val mul : arr -> arr -> arr
val div : arr -> arr -> arr
val add_scalar : arr -> elt -> arr
val sub_scalar : arr -> elt -> arr
val mul_scalar : arr -> elt -> arr
val div_scalar : arr -> elt -> arr
val scalar_add : elt -> arr -> arr
val scalar_sub : elt -> arr -> arr
val scalar_mul : elt -> arr -> arr
val scalar_div : elt -> arr -> arr
val fma : arr -> arr -> arr -> arr
Iterate array elements
val iteri : (int -> elt -> unit) -> arr -> unit
val iter : (elt -> unit) -> arr -> unit
val mapi : (int -> elt -> elt) -> arr -> arr
val map : (elt -> elt) -> arr -> arr
val filteri : (int -> elt -> bool) -> arr -> int array
val filter : (elt -> bool) -> arr -> int array
val foldi : ?axis:int -> (int -> elt -> elt -> elt) -> elt -> arr -> arr
val fold : ?axis:int -> (elt -> elt -> elt) -> elt -> arr -> arr
val scani : ?axis:int -> (int -> elt -> elt -> elt) -> arr -> arr
val scan : ?axis:int -> (elt -> elt -> elt) -> arr -> arr
Examination & Comparison
val exists : (elt -> bool) -> arr -> bool
val not_exists : (elt -> bool) -> arr -> bool
val for_all : (elt -> bool) -> arr -> bool
val is_zero : arr -> bool
val is_positive : arr -> bool
val is_negative : arr -> bool
val is_nonpositive : arr -> bool
val is_nonnegative : arr -> bool
val is_normal : arr -> bool
val not_nan : arr -> bool
val not_inf : arr -> bool
val equal : arr -> arr -> bool
val not_equal : arr -> arr -> bool
val greater : arr -> arr -> bool
val less : arr -> arr -> bool
val greater_equal : arr -> arr -> bool
val less_equal : arr -> arr -> bool
val elt_equal : arr -> arr -> arr
val elt_not_equal : arr -> arr -> arr
val elt_less : arr -> arr -> arr
val elt_greater : arr -> arr -> arr
val elt_less_equal : arr -> arr -> arr
val elt_greater_equal : arr -> arr -> arr
val equal_scalar : arr -> elt -> bool
val not_equal_scalar : arr -> elt -> bool
val less_scalar : arr -> elt -> bool
val greater_scalar : arr -> elt -> bool
val less_equal_scalar : arr -> elt -> bool
val greater_equal_scalar : arr -> elt -> bool
val elt_equal_scalar : arr -> elt -> arr
val elt_not_equal_scalar : arr -> elt -> arr
val elt_less_scalar : arr -> elt -> arr
val elt_greater_scalar : arr -> elt -> arr
val elt_less_equal_scalar : arr -> elt -> arr
val elt_greater_equal_scalar : arr -> elt -> arr
val row_num : arr -> int
val col_num : arr -> int
val row : arr -> int -> arr
val rows : arr -> int array -> arr
val copy_row_to : arr -> arr -> int -> unit
val copy_col_to : arr -> arr -> int -> unit
val diag : ?k:int -> arr -> arr
val transpose : ?axis:int array -> arr -> arr
val to_rows : arr -> arr array
val of_rows : arr array -> arr
val to_cols : arr -> arr array
val of_cols : arr array -> arr
val of_array : elt array -> int array -> arr
val of_arrays : elt array array -> arr
Create N-dimensional array
val linspace : elt -> elt -> int -> arr

linspace k 0. 9. 10 ...

val logspace : ?base:float -> elt -> elt -> int -> arr

logspace k 0. 9. 10 ...

val unit_basis : int -> int -> arr

unit_basis k n i returns a unit basis vector with ith element set to 1.

Obtain basic properties
val num_dims : arr -> int
val nth_dim : arr -> int -> int
val nnz : arr -> int
val density : arr -> float
val size_in_bytes : arr -> int
val same_shape : arr -> arr -> bool
val same_data : arr -> arr -> bool
val ind : arr -> int -> int array
val i1d : arr -> int array -> int
Manipulate a N-dimensional array
val get_index : arr -> int array array -> elt array
val set_index : arr -> int array array -> elt array -> unit
val get_fancy : Owl_types.index list -> arr -> arr
val set_fancy : Owl_types.index list -> arr -> arr -> unit
val sub_left : arr -> int -> int -> arr
val sub_ndarray : int array -> arr -> arr array
val slice_left : arr -> int array -> arr
val fill : arr -> elt -> unit
val resize : ?head:bool -> arr -> int array -> arr
val flip : ?axis:int -> arr -> arr
val rotate : arr -> int -> arr
val swap : int -> int -> arr -> arr
val concat_vertical : arr -> arr -> arr
val concat_horizontal : arr -> arr -> arr
val concat_vh : arr array array -> arr
val split_vh : (int * int) array array -> arr -> arr array array
val dropout : ?rate:float -> arr -> arr
val top : arr -> int -> int array array
val bottom : arr -> int -> int array array
val sort : arr -> arr
val sort1 : ?axis:int -> arr -> arr
val argsort : - arr -> - (int64, Stdlib.Bigarray.int64_elt, Stdlib.Bigarray.c_layout) - Stdlib.Bigarray.Genarray.t
val mmap : Unix.file_descr -> ?pos:int64 -> bool -> int array -> arr
Iterate array elements
val iter2i : (int -> elt -> elt -> unit) -> arr -> arr -> unit
val iter2 : (elt -> elt -> unit) -> arr -> arr -> unit
val map2i : (int -> elt -> elt -> elt) -> arr -> arr -> arr
val map2 : (elt -> elt -> elt) -> arr -> arr -> arr
val iteri_nd : (int array -> elt -> unit) -> arr -> unit
val mapi_nd : (int array -> elt -> elt) -> arr -> arr
val foldi_nd : - ?axis:int -> - (int array -> elt -> elt -> elt) -> - elt -> - arr -> - arr
val scani_nd : ?axis:int -> (int array -> elt -> elt -> elt) -> arr -> arr
val filteri_nd : (int array -> elt -> bool) -> arr -> int array array
val iter2i_nd : (int array -> elt -> elt -> unit) -> arr -> arr -> unit
val map2i_nd : (int array -> elt -> elt -> elt) -> arr -> arr -> arr
val iteri_slice : ?axis:int -> (int -> arr -> unit) -> arr -> unit
val iter_slice : ?axis:int -> (arr -> unit) -> arr -> unit
val mapi_slice : ?axis:int -> (int -> arr -> 'c) -> arr -> 'c array
val map_slice : ?axis:int -> (arr -> 'c) -> arr -> 'c array
val filteri_slice : ?axis:int -> (int -> arr -> bool) -> arr -> arr array
val filter_slice : ?axis:int -> (arr -> bool) -> arr -> arr array
val foldi_slice : ?axis:int -> (int -> 'c -> arr -> 'c) -> 'c -> arr -> 'c
val fold_slice : ?axis:int -> ('c -> arr -> 'c) -> 'c -> arr -> 'c
Examine array elements or compare two arrays
Input/Output functions
val to_array : arr -> elt array
val save : out:string -> arr -> unit
val load : string -> arr
val save_npy : out:string -> arr -> unit
val load_npy : string -> arr
Unary mathematical operations
val prod : ?axis:int -> ?keep_dims:bool -> arr -> arr
val prod' : arr -> elt
val mean : ?axis:int -> ?keep_dims:bool -> arr -> arr
val mean' : arr -> elt
val median' : arr -> elt
val median : ?axis:int -> ?keep_dims:bool -> arr -> arr
val var : ?axis:int -> ?keep_dims:bool -> arr -> arr
val var' : arr -> elt
val std : ?axis:int -> ?keep_dims:bool -> arr -> arr
val std' : arr -> elt
val sem : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sem' : arr -> elt
val minmax : ?axis:int -> ?keep_dims:bool -> arr -> arr * arr
val minmax' : arr -> elt * elt
val min_i : arr -> elt * int array
val max_i : arr -> elt * int array
val minmax_i : arr -> (elt * int array) * (elt * int array)
val abs2 : arr -> arr
val conj : arr -> arr
val reci : arr -> arr
val reci_tol : ?tol:elt -> arr -> arr
val cbrt : arr -> arr
val exp2 : arr -> arr
val exp10 : arr -> arr
val expm1 : arr -> arr
val log1p : arr -> arr
val trunc : arr -> arr
val fix : arr -> arr
val modf : arr -> arr * arr
val l1norm : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l2norm : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l2norm_sqr : ?axis:int -> ?keep_dims:bool -> arr -> arr
val vecnorm : ?axis:int -> ?p:float -> ?keep_dims:bool -> arr -> arr
val vecnorm' : ?p:float -> arr -> elt
val cumsum : ?axis:int -> arr -> arr
val cumprod : ?axis:int -> arr -> arr
val cummin : ?axis:int -> arr -> arr
val cummax : ?axis:int -> arr -> arr
val diff : ?axis:int -> ?n:int -> arr -> arr
val lgamma : arr -> arr
Binary mathematical operations
val min2 : arr -> arr -> arr
val max2 : arr -> arr -> arr
val ssqr' : arr -> elt -> elt
val ssqr_diff' : arr -> arr -> elt
Tensor Calculus
val contract1 : (int * int) array -> arr -> arr
val contract2 : (int * int) array -> arr -> arr -> arr
Experimental functions
val slide : ?axis:int -> ?ofs:int -> ?step:int -> window:int -> arr -> arr
Functions of in-place modification
val create_ : out:arr -> elt -> unit
val uniform_ : ?a:elt -> ?b:elt -> out:arr -> unit
val gaussian_ : ?mu:elt -> ?sigma:elt -> out:arr -> unit
val sequential_ : ?a:elt -> ?step:elt -> out:arr -> unit
val bernoulli_ : ?p:float -> out:arr -> unit
val zeros_ : out:arr -> unit
val ones_ : out:arr -> unit
val sort_ : arr -> unit
val one_hot_ : out:arr -> int -> arr -> unit
val get_fancy_ : out:arr -> Owl_types.index list -> arr -> unit
val set_fancy_ : out:arr -> Owl_types.index list -> arr -> arr -> unit
val get_slice_ : out:arr -> int list list -> arr -> unit
val set_slice_ : out:arr -> int list list -> arr -> arr -> unit
val reshape_ : out:arr -> arr -> unit
val reverse_ : out:arr -> arr -> unit
val transpose_ : out:arr -> ?axis:int array -> arr -> unit
val repeat_ : out:arr -> arr -> int array -> unit
val tile_ : out:arr -> arr -> int array -> unit
val pad_ : out:arr -> ?v:elt -> int list list -> arr -> unit
val sum_ : out:arr -> axis:int -> arr -> unit
val min_ : out:arr -> axis:int -> arr -> unit
val max_ : out:arr -> axis:int -> arr -> unit
val add_ : ?out:arr -> arr -> arr -> unit
val sub_ : ?out:arr -> arr -> arr -> unit
val mul_ : ?out:arr -> arr -> arr -> unit
val div_ : ?out:arr -> arr -> arr -> unit
val pow_ : ?out:arr -> arr -> arr -> unit
val atan2_ : ?out:arr -> arr -> arr -> unit
val hypot_ : ?out:arr -> arr -> arr -> unit
val fmod_ : ?out:arr -> arr -> arr -> unit
val min2_ : ?out:arr -> arr -> arr -> unit
val max2_ : ?out:arr -> arr -> arr -> unit
val add_scalar_ : ?out:arr -> arr -> elt -> unit
val sub_scalar_ : ?out:arr -> arr -> elt -> unit
val mul_scalar_ : ?out:arr -> arr -> elt -> unit
val div_scalar_ : ?out:arr -> arr -> elt -> unit
val pow_scalar_ : ?out:arr -> arr -> elt -> unit
val atan2_scalar_ : ?out:arr -> arr -> elt -> unit
val fmod_scalar_ : ?out:arr -> arr -> elt -> unit
val scalar_add_ : ?out:arr -> elt -> arr -> unit
val scalar_sub_ : ?out:arr -> elt -> arr -> unit
val scalar_mul_ : ?out:arr -> elt -> arr -> unit
val scalar_div_ : ?out:arr -> elt -> arr -> unit
val scalar_pow_ : ?out:arr -> elt -> arr -> unit
val scalar_atan2_ : ?out:arr -> elt -> arr -> unit
val scalar_fmod_ : ?out:arr -> elt -> arr -> unit
val fma_ : ?out:arr -> arr -> arr -> arr -> unit
val clip_by_value_ : ?out:arr -> ?amin:elt -> ?amax:elt -> arr -> unit
val clip_by_l2norm_ : ?out:arr -> elt -> arr -> unit
val dot_ : - ?transa:bool -> - ?transb:bool -> - ?alpha:elt -> - ?beta:elt -> - c:arr -> - arr -> - arr -> - unit
val conj_ : ?out:arr -> arr -> unit
val abs_ : ?out:arr -> arr -> unit
val neg_ : ?out:arr -> arr -> unit
val reci_ : ?out:arr -> arr -> unit
val signum_ : ?out:arr -> arr -> unit
val sqr_ : ?out:arr -> arr -> unit
val sqrt_ : ?out:arr -> arr -> unit
val cbrt_ : ?out:arr -> arr -> unit
val exp_ : ?out:arr -> arr -> unit
val exp2_ : ?out:arr -> arr -> unit
val exp10_ : ?out:arr -> arr -> unit
val expm1_ : ?out:arr -> arr -> unit
val log_ : ?out:arr -> arr -> unit
val log2_ : ?out:arr -> arr -> unit
val log10_ : ?out:arr -> arr -> unit
val log1p_ : ?out:arr -> arr -> unit
val sin_ : ?out:arr -> arr -> unit
val cos_ : ?out:arr -> arr -> unit
val tan_ : ?out:arr -> arr -> unit
val asin_ : ?out:arr -> arr -> unit
val acos_ : ?out:arr -> arr -> unit
val atan_ : ?out:arr -> arr -> unit
val sinh_ : ?out:arr -> arr -> unit
val cosh_ : ?out:arr -> arr -> unit
val tanh_ : ?out:arr -> arr -> unit
val asinh_ : ?out:arr -> arr -> unit
val acosh_ : ?out:arr -> arr -> unit
val atanh_ : ?out:arr -> arr -> unit
val floor_ : ?out:arr -> arr -> unit
val ceil_ : ?out:arr -> arr -> unit
val round_ : ?out:arr -> arr -> unit
val trunc_ : ?out:arr -> arr -> unit
val fix_ : ?out:arr -> arr -> unit
val erf_ : ?out:arr -> arr -> unit
val erfc_ : ?out:arr -> arr -> unit
val relu_ : ?out:arr -> arr -> unit
val softplus_ : ?out:arr -> arr -> unit
val softsign_ : ?out:arr -> arr -> unit
val sigmoid_ : ?out:arr -> arr -> unit
val softmax_ : ?out:arr -> ?axis:int -> arr -> unit
val cumsum_ : ?out:arr -> ?axis:int -> arr -> unit
val cumprod_ : ?out:arr -> ?axis:int -> arr -> unit
val cummin_ : ?out:arr -> ?axis:int -> arr -> unit
val cummax_ : ?out:arr -> ?axis:int -> arr -> unit
val dropout_ : ?out:arr -> ?rate:float -> arr -> unit
val elt_equal_ : ?out:arr -> arr -> arr -> unit
val elt_not_equal_ : ?out:arr -> arr -> arr -> unit
val elt_less_ : ?out:arr -> arr -> arr -> unit
val elt_greater_ : ?out:arr -> arr -> arr -> unit
val elt_less_equal_ : ?out:arr -> arr -> arr -> unit
val elt_greater_equal_ : ?out:arr -> arr -> arr -> unit
val elt_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_not_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_less_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_greater_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_less_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_greater_equal_scalar_ : ?out:arr -> arr -> elt -> unit

Matrix functions

val col : arr -> int -> arr
val cols : arr -> int array -> arr
val to_arrays : arr -> elt array array
val draw_rows : ?replacement:bool -> arr -> int -> arr * int array
val draw_cols : ?replacement:bool -> arr -> int -> arr * int array
val draw_rows2 : - ?replacement:bool -> - arr -> - arr -> - int -> - arr * arr * int array
val draw_cols2 : - ?replacement:bool -> - arr -> - arr -> - int -> - arr * arr * int array
include Owl_dense_ndarray_intf.Real with type elt := elt and type arr := arr
include Owl_base_dense_ndarray_intf.Real - with type elt := elt - with type arr := arr
val sum_slices : ?axis:int -> arr -> arr
val signum : arr -> arr
val relu : arr -> arr
val dawsn : arr -> arr
val l1norm' : arr -> elt
val l2norm' : arr -> elt
val l2norm_sqr' : arr -> elt
val clip_by_value : ?amin:elt -> ?amax:elt -> arr -> arr
val clip_by_l2norm : elt -> arr -> arr
val atan2 : arr -> arr -> arr
val approx_equal : ?eps:float -> arr -> arr -> bool
val approx_equal_scalar : ?eps:float -> arr -> float -> bool
val approx_elt_equal : ?eps:float -> arr -> arr -> arr
val approx_elt_equal_scalar : ?eps:float -> arr -> float -> arr
val dot : arr -> arr -> arr
val trace : arr -> elt
Helper functions
val float_to_elt : float -> elt
val elt_to_float : elt -> float
Real operations
val i0 : arr -> arr
val i0e : arr -> arr
val i1 : arr -> arr
val i1e : arr -> arr
val iv : v:arr -> arr -> arr
val scalar_iv : v:elt -> arr -> arr
val iv_scalar : v:arr -> elt -> arr
val j0 : arr -> arr
val j1 : arr -> arr
val jv : v:arr -> arr -> arr
val scalar_jv : v:elt -> arr -> arr
val jv_scalar : v:arr -> elt -> arr
val erf : arr -> arr
val erfc : arr -> arr
val logistic : arr -> arr
val elu : ?alpha:elt -> arr -> arr
val leaky_relu : ?alpha:elt -> arr -> arr
val softplus : arr -> arr
val softsign : arr -> arr
val softmax : ?axis:int -> arr -> arr
val sigmoid : arr -> arr
val log_sum_exp' : arr -> float
val log_sum_exp : ?axis:int -> ?keep_dims:bool -> arr -> arr
val scalar_atan2 : elt -> arr -> arr
val atan2_scalar : arr -> elt -> arr
val hypot : arr -> arr -> arr
val fmod : arr -> arr -> arr
val fmod_scalar : arr -> elt -> arr
val scalar_fmod : elt -> arr -> arr
val cross_entropy' : arr -> arr -> float
val fused_adagrad_ : ?out:arr -> rate:float -> eps:float -> arr -> unit
val poisson : mu:elt -> int array -> arr
val poisson_ : mu:elt -> out:arr -> unit
include Owl_dense_ndarray_intf.NN with type arr := arr
include Owl_base_dense_ndarray_intf.NN with type arr := arr
val conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val dilated_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val transpose_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val max_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val upsampling2d : arr -> int array -> arr
val conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val dilated_conv1d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv1d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val transpose_conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val max_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val upsampling2d_backward : arr -> int array -> arr -> arr
val max_pool2d_argmax : - ?padding:Owl_types.padding -> - arr -> - int array -> - int array -> - arr - * (int64, Stdlib.Bigarray.int64_elt, Stdlib.Bigarray.c_layout) - Stdlib.Bigarray.Genarray.t
val conv1d_ : - out:arr -> - ?padding:Owl_types.padding -> - arr -> - arr -> - int array -> - unit
val conv2d_ : - out:arr -> - ?padding:Owl_types.padding -> - arr -> - arr -> - int array -> - unit
val conv3d_ : - out:arr -> - ?padding:Owl_types.padding -> - arr -> - arr -> - int array -> - unit
val dilated_conv1d_ : - out:arr -> - ?padding:Owl_types.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val dilated_conv2d_ : - out:arr -> - ?padding:Owl_types.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val dilated_conv3d_ : - out:arr -> - ?padding:Owl_types.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val transpose_conv1d_ : - out:arr -> - ?padding:Owl_types.padding -> - arr -> - arr -> - int array -> - unit
val transpose_conv2d_ : - out:arr -> - ?padding:Owl_types.padding -> - arr -> - arr -> - int array -> - unit
val transpose_conv3d_ : - out:arr -> - ?padding:Owl_types.padding -> - arr -> - arr -> - int array -> - unit
val max_pool1d_ : - out:arr -> - ?padding:Owl_types.padding -> - arr -> - int array -> - int array -> - unit
val max_pool2d_ : - out:arr -> - ?padding:Owl_types.padding -> - arr -> - int array -> - int array -> - unit
val max_pool3d_ : - out:arr -> - ?padding:Owl_types.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool1d_ : - out:arr -> - ?padding:Owl_types.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool2d_ : - out:arr -> - ?padding:Owl_types.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool3d_ : - out:arr -> - ?padding:Owl_types.padding -> - arr -> - int array -> - int array -> - unit
val upsampling2d_ : out:arr -> arr -> int array -> unit
val conv1d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv1d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv2d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv2d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv3d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv3d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val dilated_conv1d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv1d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv2d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv2d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv3d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv3d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val transpose_conv1d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv1d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv2d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv2d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv3d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv3d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val max_pool1d_backward_ : - out:arr -> - Owl_types.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val max_pool2d_backward_ : - out:arr -> - Owl_types.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val max_pool3d_backward_ : - out:arr -> - Owl_types.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool1d_backward_ : - out:arr -> - Owl_types.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool2d_backward_ : - out:arr -> - Owl_types.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool3d_backward_ : - out:arr -> - Owl_types.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val upsampling2d_backward_ : out:arr -> arr -> int array -> arr -> unit
include Owl_dense_ndarray_intf.Distribution with type arr := arr
Stats & distribution functions
val uniform_rvs : a:arr -> b:arr -> n:int -> arr
val uniform_pdf : a:arr -> b:arr -> arr -> arr
val uniform_logpdf : a:arr -> b:arr -> arr -> arr
val uniform_cdf : a:arr -> b:arr -> arr -> arr
val uniform_logcdf : a:arr -> b:arr -> arr -> arr
val uniform_ppf : a:arr -> b:arr -> arr -> arr
val uniform_sf : a:arr -> b:arr -> arr -> arr
val uniform_logsf : a:arr -> b:arr -> arr -> arr
val uniform_isf : a:arr -> b:arr -> arr -> arr
val gaussian_rvs : mu:arr -> sigma:arr -> n:int -> arr
val gaussian_pdf : mu:arr -> sigma:arr -> arr -> arr
val gaussian_logpdf : mu:arr -> sigma:arr -> arr -> arr
val gaussian_cdf : mu:arr -> sigma:arr -> arr -> arr
val gaussian_logcdf : mu:arr -> sigma:arr -> arr -> arr
val gaussian_ppf : mu:arr -> sigma:arr -> arr -> arr
val gaussian_sf : mu:arr -> sigma:arr -> arr -> arr
val gaussian_logsf : mu:arr -> sigma:arr -> arr -> arr
val gaussian_isf : mu:arr -> sigma:arr -> arr -> arr
val exponential_rvs : lambda:arr -> n:int -> arr
val exponential_pdf : lambda:arr -> arr -> arr
val exponential_logpdf : lambda:arr -> arr -> arr
val exponential_cdf : lambda:arr -> arr -> arr
val exponential_logcdf : lambda:arr -> arr -> arr
val exponential_ppf : lambda:arr -> arr -> arr
val exponential_sf : lambda:arr -> arr -> arr
val exponential_logsf : lambda:arr -> arr -> arr
val exponential_isf : lambda:arr -> arr -> arr
val gamma_rvs : shape:arr -> scale:arr -> n:int -> arr
val gamma_pdf : shape:arr -> scale:arr -> arr -> arr
val gamma_logpdf : shape:arr -> scale:arr -> arr -> arr
val gamma_cdf : shape:arr -> scale:arr -> arr -> arr
val gamma_logcdf : shape:arr -> scale:arr -> arr -> arr
val gamma_ppf : shape:arr -> scale:arr -> arr -> arr
val gamma_sf : shape:arr -> scale:arr -> arr -> arr
val gamma_logsf : shape:arr -> scale:arr -> arr -> arr
val gamma_isf : shape:arr -> scale:arr -> arr -> arr
val beta_rvs : a:arr -> b:arr -> n:int -> arr
val beta_pdf : a:arr -> b:arr -> arr -> arr
val beta_logpdf : a:arr -> b:arr -> arr -> arr
val beta_cdf : a:arr -> b:arr -> arr -> arr
val beta_logcdf : a:arr -> b:arr -> arr -> arr
val beta_ppf : a:arr -> b:arr -> arr -> arr
val beta_sf : a:arr -> b:arr -> arr -> arr
val beta_logsf : a:arr -> b:arr -> arr -> arr
val beta_isf : a:arr -> b:arr -> arr -> arr
val chi2_rvs : df:arr -> n:int -> arr
val chi2_pdf : df:arr -> arr -> arr
val chi2_logpdf : df:arr -> arr -> arr
val chi2_cdf : df:arr -> arr -> arr
val chi2_logcdf : df:arr -> arr -> arr
val chi2_ppf : df:arr -> arr -> arr
val chi2_sf : df:arr -> arr -> arr
val chi2_logsf : df:arr -> arr -> arr
val chi2_isf : df:arr -> arr -> arr
val f_rvs : dfnum:arr -> dfden:arr -> n:int -> arr
val f_pdf : dfnum:arr -> dfden:arr -> arr -> arr
val f_logpdf : dfnum:arr -> dfden:arr -> arr -> arr
val f_cdf : dfnum:arr -> dfden:arr -> arr -> arr
val f_logcdf : dfnum:arr -> dfden:arr -> arr -> arr
val f_ppf : dfnum:arr -> dfden:arr -> arr -> arr
val f_sf : dfnum:arr -> dfden:arr -> arr -> arr
val f_logsf : dfnum:arr -> dfden:arr -> arr -> arr
val f_isf : dfnum:arr -> dfden:arr -> arr -> arr
val cauchy_rvs : loc:arr -> scale:arr -> n:int -> arr
val cauchy_pdf : loc:arr -> scale:arr -> arr -> arr
val cauchy_logpdf : loc:arr -> scale:arr -> arr -> arr
val cauchy_cdf : loc:arr -> scale:arr -> arr -> arr
val cauchy_logcdf : loc:arr -> scale:arr -> arr -> arr
val cauchy_ppf : loc:arr -> scale:arr -> arr -> arr
val cauchy_sf : loc:arr -> scale:arr -> arr -> arr
val cauchy_logsf : loc:arr -> scale:arr -> arr -> arr
val cauchy_isf : loc:arr -> scale:arr -> arr -> arr
val lomax_rvs : shape:arr -> scale:arr -> n:int -> arr
val lomax_pdf : shape:arr -> scale:arr -> arr -> arr
val lomax_logpdf : shape:arr -> scale:arr -> arr -> arr
val lomax_cdf : shape:arr -> scale:arr -> arr -> arr
val lomax_logcdf : shape:arr -> scale:arr -> arr -> arr
val lomax_ppf : shape:arr -> scale:arr -> arr -> arr
val lomax_sf : shape:arr -> scale:arr -> arr -> arr
val lomax_logsf : shape:arr -> scale:arr -> arr -> arr
val lomax_isf : shape:arr -> scale:arr -> arr -> arr
val weibull_rvs : shape:arr -> scale:arr -> n:int -> arr
val weibull_pdf : shape:arr -> scale:arr -> arr -> arr
val weibull_logpdf : shape:arr -> scale:arr -> arr -> arr
val weibull_cdf : shape:arr -> scale:arr -> arr -> arr
val weibull_logcdf : shape:arr -> scale:arr -> arr -> arr
val weibull_ppf : shape:arr -> scale:arr -> arr -> arr
val weibull_sf : shape:arr -> scale:arr -> arr -> arr
val weibull_logsf : shape:arr -> scale:arr -> arr -> arr
val weibull_isf : shape:arr -> scale:arr -> arr -> arr
val laplace_rvs : loc:arr -> scale:arr -> n:int -> arr
val laplace_pdf : loc:arr -> scale:arr -> arr -> arr
val laplace_logpdf : loc:arr -> scale:arr -> arr -> arr
val laplace_cdf : loc:arr -> scale:arr -> arr -> arr
val laplace_logcdf : loc:arr -> scale:arr -> arr -> arr
val laplace_ppf : loc:arr -> scale:arr -> arr -> arr
val laplace_sf : loc:arr -> scale:arr -> arr -> arr
val laplace_logsf : loc:arr -> scale:arr -> arr -> arr
val laplace_isf : loc:arr -> scale:arr -> arr -> arr
val gumbel1_rvs : a:arr -> b:arr -> n:int -> arr
val gumbel1_pdf : a:arr -> b:arr -> arr -> arr
val gumbel1_logpdf : a:arr -> b:arr -> arr -> arr
val gumbel1_cdf : a:arr -> b:arr -> arr -> arr
val gumbel1_logcdf : a:arr -> b:arr -> arr -> arr
val gumbel1_ppf : a:arr -> b:arr -> arr -> arr
val gumbel1_sf : a:arr -> b:arr -> arr -> arr
val gumbel1_logsf : a:arr -> b:arr -> arr -> arr
val gumbel1_isf : a:arr -> b:arr -> arr -> arr
val gumbel2_rvs : a:arr -> b:arr -> n:int -> arr
val gumbel2_pdf : a:arr -> b:arr -> arr -> arr
val gumbel2_logpdf : a:arr -> b:arr -> arr -> arr
val gumbel2_cdf : a:arr -> b:arr -> arr -> arr
val gumbel2_logcdf : a:arr -> b:arr -> arr -> arr
val gumbel2_ppf : a:arr -> b:arr -> arr -> arr
val gumbel2_sf : a:arr -> b:arr -> arr -> arr
val gumbel2_logsf : a:arr -> b:arr -> arr -> arr
val gumbel2_isf : a:arr -> b:arr -> arr -> arr
val logistic_rvs : loc:arr -> scale:arr -> n:int -> arr
val logistic_pdf : loc:arr -> scale:arr -> arr -> arr
val logistic_logpdf : loc:arr -> scale:arr -> arr -> arr
val logistic_cdf : loc:arr -> scale:arr -> arr -> arr
val logistic_logcdf : loc:arr -> scale:arr -> arr -> arr
val logistic_ppf : loc:arr -> scale:arr -> arr -> arr
val logistic_sf : loc:arr -> scale:arr -> arr -> arr
val logistic_logsf : loc:arr -> scale:arr -> arr -> arr
val logistic_isf : loc:arr -> scale:arr -> arr -> arr
val lognormal_rvs : mu:arr -> sigma:arr -> n:int -> arr
val lognormal_pdf : mu:arr -> sigma:arr -> arr -> arr
val lognormal_logpdf : mu:arr -> sigma:arr -> arr -> arr
val lognormal_cdf : mu:arr -> sigma:arr -> arr -> arr
val lognormal_logcdf : mu:arr -> sigma:arr -> arr -> arr
val lognormal_ppf : mu:arr -> sigma:arr -> arr -> arr
val lognormal_sf : mu:arr -> sigma:arr -> arr -> arr
val lognormal_logsf : mu:arr -> sigma:arr -> arr -> arr
val lognormal_isf : mu:arr -> sigma:arr -> arr -> arr
val rayleigh_rvs : sigma:arr -> n:int -> arr
val rayleigh_pdf : sigma:arr -> arr -> arr
val rayleigh_logpdf : sigma:arr -> arr -> arr
val rayleigh_cdf : sigma:arr -> arr -> arr
val rayleigh_logcdf : sigma:arr -> arr -> arr
val rayleigh_ppf : sigma:arr -> arr -> arr
val rayleigh_sf : sigma:arr -> arr -> arr
val rayleigh_logsf : sigma:arr -> arr -> arr
val rayleigh_isf : sigma:arr -> arr -> arr
include module type of struct include Operator end
include sig ... end
val (+$) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (-$) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (*$) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (/$) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t
val ($+) : - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t
val ($-) : - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t
val ($*) : - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t
val ($/) : - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (=) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - bool
val (!=) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - bool
val (<>) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - bool
val (>) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - bool
val (<) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - bool
val (>=) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - bool
val (<=) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - bool
include sig ... end
val (=$) : ('a, 'b) Owl_dense_ndarray_generic.t -> 'a -> bool
val (!=$) : ('a, 'b) Owl_dense_ndarray_generic.t -> 'a -> bool
val (<>$) : ('a, 'b) Owl_dense_ndarray_generic.t -> 'a -> bool
val (<$) : ('a, 'b) Owl_dense_ndarray_generic.t -> 'a -> bool
val (>$) : ('a, 'b) Owl_dense_ndarray_generic.t -> 'a -> bool
val (<=$) : ('a, 'b) Owl_dense_ndarray_generic.t -> 'a -> bool
val (>=$) : ('a, 'b) Owl_dense_ndarray_generic.t -> 'a -> bool
val (=.$) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (!=.$) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (<>.$) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (<.$) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (>.$) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (<=.$) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (>=.$) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (=~) : - ?eps:float -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - bool
val (=~$) : ?eps:float -> ('a, 'b) Owl_dense_ndarray_generic.t -> 'a -> bool
val (=~.) : - ?eps:float -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (=~.$) : - ?eps:float -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (%) : - (float, 'a) Owl_dense_ndarray_generic.t -> - (float, 'a) Owl_dense_ndarray_generic.t -> - (float, 'a) Owl_dense_ndarray_generic.t
val (%$) : - (float, 'a) Owl_dense_ndarray_generic.t -> - float -> - (float, 'a) Owl_dense_ndarray_generic.t
val (**) : - (float, 'a) Owl_dense_ndarray_generic.t -> - (float, 'a) Owl_dense_ndarray_generic.t -> - (float, 'a) Owl_dense_ndarray_generic.t
val ($**) : - float -> - (float, 'a) Owl_dense_ndarray_generic.t -> - (float, 'a) Owl_dense_ndarray_generic.t
val (**$) : - (float, 'a) Owl_dense_ndarray_generic.t -> - float -> - (float, 'a) Owl_dense_ndarray_generic.t
val (+=) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - unit
val (-=) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - unit
val (*=) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - unit
val (/=) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - unit
val (+$=) : ('a, 'b) Owl_dense_ndarray_generic.t -> 'a -> unit
val (-$=) : ('a, 'b) Owl_dense_ndarray_generic.t -> 'a -> unit
val (*$=) : ('a, 'b) Owl_dense_ndarray_generic.t -> 'a -> unit
val (/$=) : ('a, 'b) Owl_dense_ndarray_generic.t -> 'a -> unit
val (.!{;..}) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - Owl_types.index array -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (.!{;..}<-) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - Owl_types.index array -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - unit
val (.${}) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - int list -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (.${;..}) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - int list array -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (.${}<-) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - int list -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - unit
val (.${;..}<-) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - int list array -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - unit
include sig ... end
val (.%{}) : ('a, 'b) Owl_dense_ndarray_generic.t -> int -> 'a
val (.%{;..}) : ('a, 'b) Owl_dense_ndarray_generic.t -> int array -> 'a
val (.%{}<-) : ('a, 'b) Owl_dense_ndarray_generic.t -> int -> 'a -> unit
val (.%{;..}<-) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - int array -> - 'a -> - unit
val mpow : Owl_linalg_d.mat -> float -> Owl_linalg_d.mat
\ No newline at end of file diff --git a/owl/Owl_dense_ndarray/Generic/index.html b/owl/Owl_dense_ndarray/Generic/index.html deleted file mode 100644 index 80888c01b..000000000 --- a/owl/Owl_dense_ndarray/Generic/index.html +++ /dev/null @@ -1,805 +0,0 @@ - -Generic (owl.Owl_dense_ndarray.Generic)

Module Owl_dense_ndarray.Generic

include module type of struct include Owl_dense_ndarray_generic end

About the comparison of two complex numbers x and y, Owl uses the following conventions: 1) x and y are equal iff both real and imaginary parts are equal; 2) x is less than y if the magnitude of x is less than the magnitude of y; in case both x and y have the same magnitudes, x is less than y if the phase of x is less than the phase of y; 3) less or equal, greater, greater or equal relation can be further defined atop of the aforementioned conventions.

The generic module supports operations for the following Bigarry element types: Int8_signed, Int8_unsigned, Int16_signed, Int16_unsigned, Int32, Int64, Float32, Float64, Complex32, Complex64.

Type definition
type ('a, 'b) t = ('a, 'b, Stdlib.Bigarray.c_layout) Stdlib.Bigarray.Genarray.t

N-dimensional array type, i.e. Bigarray Genarray type.

type ('a, 'b) kind = ('a, 'b) Stdlib.Bigarray.kind

Type of the ndarray, e.g., Bigarray.Float32, Bigarray.Complex64, and etc.

Create Ndarrays
val empty : ('a, 'b) kind -> int array -> ('a, 'b) t

empty Bigarray.Float64 [|3;4;5|] creates a three diemensional array of Bigarray.Float64 type. Each dimension has the following size: 3, 4, and 5. The elements in the array are not initialised, they can be any value. empty is faster than zeros to create a ndarray.

The module only supports the following four types of ndarray: Bigarray.Float32, Bigarray.Float64, Bigarray.Complex32, and Bigarray.Complex64.

val create : ('a, 'b) kind -> int array -> 'a -> ('a, 'b) t

create Bigarray.Float64 [|3;4;5|] 2. creates a three-diemensional array of Bigarray.Float64 type. Each dimension has the following size: 3, 4, and 5. The elements in the array are initialised to 2.

val init : ('a, 'b) kind -> int array -> (int -> 'a) -> ('a, 'b) t

init Bigarray.Float64 d f creates a ndarray x of shape d, then using f to initialise the elements in x. The input of f is 1-dimensional index of the ndarray. You need to explicitly convert it if you need N-dimensional index. The function ind can help you.

val init_nd : ('a, 'b) kind -> int array -> (int array -> 'a) -> ('a, 'b) t

init_nd is almost the same as init but f receives n-dimensional index as input. It is more convenient since you don't have to convert the index by yourself, but this also means init_nd is slower than init.

val zeros : ('a, 'b) kind -> int array -> ('a, 'b) t

zeros Bigarray.Complex32 [|3;4;5|] creates a three-diemensional array of Bigarray.Complex32 type. Each dimension has the following size: 3, 4, and 5. The elements in the array are initialised to "zero". Depending on the kind, zero can be 0. or Complex.zero.

val ones : ('a, 'b) kind -> int array -> ('a, 'b) t

ones Bigarray.Complex32 [|3;4;5|] creates a three-diemensional array of Bigarray.Complex32 type. Each dimension has the following size: 3, 4, and 5. The elements in the array are initialised to "one". Depending on the kind, one can be 1. or Complex.one.

val eye : ('a, 'b) kind -> int -> ('a, 'b) t

eye m creates an m by m identity matrix.

val uniform : ('a, 'b) kind -> ?a:'a -> ?b:'a -> int array -> ('a, 'b) t

uniform Bigarray.Float64 [|3;4;5|] creates a three-diemensional array of type Bigarray.Float64. Each dimension has the following size: 3, 4, and 5. The elements in the array follow a uniform distribution 0,1.

val gaussian : ('a, 'b) kind -> ?mu:'a -> ?sigma:'a -> int array -> ('a, 'b) t

gaussian Float64 [|3;4;5|] ...

val poisson : ('a, 'b) kind -> mu:float -> int array -> ('a, 'b) t

poisson Float64 [|3;4;5|] ...

val sequential : ('a, 'b) kind -> ?a:'a -> ?step:'a -> int array -> ('a, 'b) t

sequential Bigarray.Float64 [|3;4;5|] 2. creates a three-diemensional array of type Bigarray.Float64. Each dimension has the following size: 3, 4, and 5. The elements in the array are assigned sequential values.

?a specifies the starting value and the default value is zero; whilst ?step specifies the step size with default value one.

val linspace : ('a, 'b) kind -> 'a -> 'a -> int -> ('a, 'b) t

linspace k 0. 9. 10 ...

val logspace : ('a, 'b) kind -> ?base:float -> 'a -> 'a -> int -> ('a, 'b) t

logspace k 0. 9. 10 ...

val bernoulli : ('a, 'b) kind -> ?p:float -> int array -> ('a, 'b) t

bernoulli k ~p:0.3 [|2;3;4|]

val complex : - ('a, 'b) kind -> - ('c, 'd) kind -> - ('a, 'b) t -> - ('a, 'b) t -> - ('c, 'd) t

complex re im constructs a complex ndarray/matrix from re and im. re and im contain the real and imaginary part of x respectively.

Note that both re and im can be complex but must have same type. The real part of re will be the real part of x and the imaginary part of im will be the imaginary part of x.

val polar : - ('a, 'b) kind -> - ('c, 'd) kind -> - ('a, 'b) t -> - ('a, 'b) t -> - ('c, 'd) t

complex rho theta constructs a complex ndarray/matrix from polar coordinates rho and theta. rho contains the magnitudes and theta contains phase angles. Note that the behaviour is undefined if rho has negative elelments or theta has infinity elelments.

val unit_basis : ('a, 'b) kind -> int -> int -> ('a, 'b) t

unit_basis k n i returns a unit basis vector with ith element set to 1.

Obtain basic properties
val shape : ('a, 'b) t -> int array

shape x returns the shape of ndarray x.

val num_dims : ('a, 'b) t -> int

num_dims x returns the number of dimensions of ndarray x.

val nth_dim : ('a, 'b) t -> int -> int

nth_dim x returns the size of the nth dimension of x.

val numel : ('a, 'b) t -> int

numel x returns the number of elements in x.

val nnz : ('a, 'b) t -> int

nnz x returns the number of non-zero elements in x.

val density : ('a, 'b) t -> float

density x returns the percentage of non-zero elements in x.

val size_in_bytes : ('a, 'b) t -> int

size_in_bytes x returns the size of x in bytes in memory.

val same_shape : ('a, 'b) t -> ('c, 'd) t -> bool

same_shape x y checks whether x and y has the same shape or not.

val same_data : ('a, 'b) t -> ('a, 'b) t -> bool

same_data x y checks whether x and y share the same underlying data in the memory. Namely, both variables point to the same memory address. This is done by checking the Data pointer in the Bigarray structure.

This function is very useful for avoiding unnecessary copying between two ndarrays especially if one has been reshaped or sliced.

val kind : ('a, 'b) t -> ('a, 'b) kind

kind x returns the type of ndarray x. It is one of the four possible values: Bigarray.Float32, Bigarray.Float64, Bigarray.Complex32, and Bigarray.Complex64.

val strides : ('a, 'b) t -> int array

strides x calculates the strides of x. E.g., if x is of shape [|3;4;5|], the returned strides will be [|20;5;1|].

val slice_size : ('a, 'b) t -> int array

slice_size calculates the slice size in each dimension, E.g., if x is of shape [|3;4;5|], the returned slice size will be [|60; 20; 5|].

val ind : ('a, 'b) t -> int -> int array

ind x i converts x's one-dimensional index i to n-dimensional one.

val i1d : ('a, 'b) t -> int array -> int

i1d x i converts x's n-dimensional index i to one-dimensional one.

Manipulate Ndarrays
val get : ('a, 'b) t -> int array -> 'a

get x i returns the value at i in x. E.g., get x [|0;2;1|] returns the value at [|0;2;1|] in x.

val set : ('a, 'b) t -> int array -> 'a -> unit

set x i a sets the value at i to a in x.

val get_index : ('a, 'b) t -> int array array -> 'a array

get_index i x returns an array of element values specified by the indices i. The length of array i equals the number of dimensions of x. The arrays in i must have the same length, and each represents the indices in that dimension.

E.g., [| [|1;2|]; [|3;4|] |] returns the value of elements at position (1,3) and (2,4) respectively.

val set_index : ('a, 'b) t -> int array array -> 'a array -> unit

set_index i x a sets the value of elements in x according to the indices specified by i. The length of array i equals the number of dimensions of x. The arrays in i must have the same length, and each represents the indices in that dimension.

If the length of a equals to the length of i, then each element will be assigned by the value in the corresponding position in x. If the length of a equals to one, then all the elements will be assigned the same value.

val get_fancy : Owl_types.index list -> ('a, 'b) t -> ('a, 'b) t

get_fancy s x returns a copy of the slice in x. The slice is defined by a which is an int option array. E.g., for a ndarray x of dimension [|2; 2; 3|], slice [0] x takes the following slices of index \(0,*,*\), i.e., [|0;0;0|], [|0;0;1|], [|0;0;2|] ... Also note that if the length of s is less than the number of dimensions of x, slice function will append slice definition to higher diemensions by assuming all the elements in missing dimensions will be taken.

Basically, slice function offers very much the same semantic as that in numpy, i.e., start:stop:step grammar, so if you how to index and slice ndarray in numpy, you should not find it difficult to use this function. Please just refer to numpy documentation or my tutorial.

There are two differences between slice_left and slice: slice_left does not make a copy but simply moving the pointer; slice_left can only make a slice from left-most axis whereas slice is much more flexible and can work on arbitrary axis which need not start from left-most side.

val set_fancy : Owl_types.index list -> ('a, 'b) t -> ('a, 'b) t -> unit

set_fancy axis x y set the slice defined by axis in x according to the values in y. y must have the same shape as the one defined by axis.

About the slice definition of axis, please refer to get_fancy function.

val get_fancy_ext : Owl_types.index array -> ('a, 'b) t -> ('a, 'b) t

This function is used for extended indexing operator since ocaml 4.10.0. The indexing and slicing syntax become much ligher.

val set_fancy_ext : Owl_types.index array -> ('a, 'b) t -> ('a, 'b) t -> unit

This function is used for extended indexing operator since ocaml 4.10.0. The indexing and slicing syntax become much ligher.

val get_slice : int list list -> ('a, 'b) t -> ('a, 'b) t

get_slice axis x aims to provide a simpler version of get_fancy. This function assumes that every list element in the passed in int list list represents a range, i.e., R constructor.

E.g., [[];[0;3];[0]] is equivalent to [R []; R [0;3]; R [0]].

val set_slice : int list list -> ('a, 'b) t -> ('a, 'b) t -> unit

set_slice axis x y aims to provide a simpler version of set_fancy. This function assumes that every list element in the passed in int list list represents a range, i.e., R constructor.

E.g., [[];[0;3];[0]] is equivalent to [R []; R [0;3]; R [0]].

val get_slice_ext : int list array -> ('a, 'b) t -> ('a, 'b) t

get_slice_ext axis x is used for extended indexing operator since ocaml 4.10.0. The indexing and slicing syntax become much ligher.

E.g., x.%{0;1;2}.

val set_slice_ext : int list array -> ('a, 'b) t -> ('a, 'b) t -> unit

Similar to get_slice_ext axis x, this function is used for extended indexing operator since ocaml 4.10.0. The indexing and slicing syntax become much ligher.

val sub_left : ('a, 'b) t -> int -> int -> ('a, 'b) t

Some as Bigarray.sub_left, please refer to Bigarray documentation.

val sub_ndarray : int array -> ('a, 'b) t -> ('a, 'b) t array

sub_ndarray parts x is similar to Bigarray.sub_left. It splits the passed in ndarray x along the axis 0 according to parts. The elelments in parts do not need to be equal but they must sum up to the dimension along axis zero.

The returned sub-ndarrays share the same memory as x. Because there is no copies made, this function is much faster than using `split` function to divide the lowest dimensionality of x.

val slice_left : ('a, 'b) t -> int array -> ('a, 'b) t

Same as Bigarray.slice_left, please refer to Bigarray documentation.

val reset : ('a, 'b) t -> unit

reset x resets all the elements in x to zero.

val fill : ('a, 'b) t -> 'a -> unit

fill x a assigns the value a to the elements in x.

val copy : ('a, 'b) t -> ('a, 'b) t

copy x makes a copy of x.

val resize : ?head:bool -> ('a, 'b) t -> int array -> ('a, 'b) t

resize ~head x d resizes the ndarray x. If there are less number of elelments in the new shape than the old one, the new ndarray shares part of the memory with the old x. head indicates the alignment between the new and old data, either from head or from tail. Note the data is flattened before the operation.

If there are more elements in the new shape d. Then new memory space will be allocated and the content of x will be copied to the new memory. The rest of the allocated space will be filled with zeros. The default value of head is true.

val reshape : ('a, 'b) t -> int array -> ('a, 'b) t

reshape x d transforms x into a new shape definted by d. Note the reshape function will not make a copy of x, the returned ndarray shares the same memory with the original x.

One shape dimension (only one) can be set to -1. In this case, the value is inferred from the length of the array and remaining dimensions.

val flatten : ('a, 'b) t -> ('a, 'b) t

flatten x transforms x into a one-dimsonal array without making a copy. Therefore the returned value shares the same memory space with original x.

val reverse : ('a, 'b) t -> ('a, 'b) t

reverse x reverse the order of all elements in the flattened x and returns the results in a new ndarray. The original x remains intact.

val flip : ?axis:int -> ('a, 'b) t -> ('a, 'b) t

flip ~axis x flips a matrix/ndarray along axis. By default axis = 0. The result is returned in a new matrix/ndarray, so the original x remains intact.

val rotate : ('a, 'b) t -> int -> ('a, 'b) t

rotate x d rotates x clockwise d degrees. d must be multiple times of 90, otherwise the function will fail. If x is an n-dimensional array, then the function rotates the plane formed by the first and second dimensions.

val transpose : ?axis:int array -> ('a, 'b) t -> ('a, 'b) t

transpose ~axis x makes a copy of x, then transpose it according to ~axis. ~axis must be a valid permutation of x dimension indices. E.g., for a three-dimensional ndarray, it can be [2;1;0], [0;2;1], [1;2;0], and etc.

val swap : int -> int -> ('a, 'b) t -> ('a, 'b) t

swap i j x makes a copy of x, then swaps the data on axis i and j.

val tile : ('a, 'b) t -> int array -> ('a, 'b) t

tile x a tiles the data in x according the repetition specified by a. This function provides the exact behaviour as numpy.tile, please refer to the numpy's online documentation for details.

val repeat : ('a, 'b) t -> int array -> ('a, 'b) t

repeat x a repeats the elements of x according the repetition specified by a. The i-th element of a specifies the number of times that the individual entries of the i-th dimension of x should be repeated.

val concat_vertical : ('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t

concat_vertical x y concatenates two ndarray x and y vertically. This is just a convenient function for concatenating two ndarrays along their lowest dimension, i.e. 0.

The associated operator is @||, please refer to :doc:`owl_operator`.

val concat_horizontal : ('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t

concat_horizontal x y concatenates two ndarrays x and y horizontally. This is just a convenient function for concatenating two ndarrays along their highest dimension.

The associated operator is @=, please refer to :doc:`owl_operator`.

val concat_vh : ('a, 'b) t array array -> ('a, 'b) t

concat_vh is used to assemble small parts of matrices into a bigger one. E.g. In [| [|a; b; c|]; [|d; e; f|]; [|g; h; i|] |], wherein `a, b, c ... i` are matrices of different shapes. They will be concatenated into a big matrix as follows.

.. math:: \beginmatrix a & b & c \\ d & e & f \\ g & h & i \endmatrix

This is achieved by first concatenating along axis:1 for each element in the array, then concatenating along axis:0. The number of elements in each array needs not to be equal as long as the aggregated dimensions match. E.g., please check the following example.

.. code-block:: ocaml

let a00 = Mat.sequential 2 3 in let a01 = Mat.sequential 2 2 in let a02 = Mat.sequential 2 1 in let a10 = Mat.sequential 3 3 in let a11 = Mat.sequential 3 3 in Mat.concat_vh | [|a00; a01; a02|]; [|a10; a11|] |;;

val concatenate : ?axis:int -> ('a, 'b) t array -> ('a, 'b) t

concatenate ~axis:2 x concatenates an array of ndarrays along the third dimension. For the ndarrays in x, they must have the same shape except the dimension specified by axis. The default value of axis is 0, i.e., the lowest dimension of a matrix/ndarray.

val stack : ?axis:int -> ('a, 'b) t array -> ('a, 'b) t

stack ~axis x stacks an array of ndarrays along the axis dimension. For example, if x contains K ndarrays of shape |2;3|, then stack ~axis:1 x will return an ndarray of dimensions |2;K;3|. The ndarrays in x, they must all have the same shape. The default value of axis is 0.

val split : ?axis:int -> int array -> ('a, 'b) t -> ('a, 'b) t array

split ~axis parts x splits an ndarray x into parts along the specified axis. This function is the inverse operation of concatenate. The elements in x must sum up to the dimension in the specified axis.

val split_vh : (int * int) array array -> ('a, 'b) t -> ('a, 'b) t array array

split_vh parts x splits a passed in ndarray x along the first two dimensions, i.e. axis 0 and axis 1. This is the inverse operation of concat_vh function, and the function is very useful in dividing a big matrix into smaller (especially heterogeneous) parts.

For example, given a matrix x of shape [|8;10|], it is possible to split in the following ways.

.. code-block:: ocaml

Mat.split_vh | [|(8,5);(8,5)|] | x;; Mat.split_vh | [|(4,5);(4,5)|]; [|(4,10)|] | x;; Mat.split_vh | [|(4,5);(4,5)|]; [|(4,5);(4,5)|] | x;;

val squeeze : ?axis:int array -> ('a, 'b) t -> ('a, 'b) t

squeeze ~axis x removes single-dimensional entries from the shape of x.

val expand : ?hi:bool -> ('a, 'b) t -> int -> ('a, 'b) t

expand x d reshapes x by increasing its rank from num_dims x to d. The opposite operation is squeeze x. The hi parameter is used to specify whether the expandsion is along high dimension (by setting true), or along the low dimension (by setting false). The default value is false.

val pad : ?v:'a -> int list list -> ('a, 'b) t -> ('a, 'b) t

pad ~v p x pads a ndarray x with a constant value v. The padding index p is a list of lists of 2 integers. These two integers denote padding width at both edges of one dimension of x.

val dropout : ?rate:float -> ('a, 'b) t -> ('a, 'b) t

dropout ~rate:0.3 x drops out 30% of the elements in x, in other words, by setting their values to zeros.

val top : ('a, 'b) t -> int -> int array array

top x n returns the indices of n greatest values of x. The indices are arranged according to the corresponding element values, from the greatest one to the smallest one.

val bottom : ('a, 'b) t -> int -> int array array

bottom x n returns the indices of n smallest values of x. The indices are arranged according to the corresponding element values, from the smallest one to the greatest one.

val sort1 : ?axis:int -> ('a, 'b) t -> ('a, 'b) t

sort1 ~axis x performs quicksort of the elements along specific axis in x. A new copy is returned as result, the original x remains intact.

val sort : ('a, 'b) t -> ('a, 'b) t

sort x performs quicksort of the elelments in x. A new copy is returned as result, the original x remains intact. If you want to perform in-place sorting, please use `sort_` instead.

val argsort : ('a, 'b) t -> (int64, Stdlib.Bigarray.int64_elt) t

argsort x returns the indices with which the elements in x are sorted in increasing order. Note that the returned index ndarray has the same shape as that of x, and the indices are 1D indices.

val draw : ?axis:int -> ('a, 'b) t -> int -> ('a, 'b) t * int array

draw ~axis x n draws n samples from x along the specified axis, with replacement. axis is set to zero by default. The return is a tuple of both samples and the indices of the selected samples.

val mmap : - Unix.file_descr -> - ?pos:int64 -> - ('a, 'b) kind -> - bool -> - int array -> - ('a, 'b) t

mmap fd kind layout shared dims ...

Iteration functions
val iteri : (int -> 'a -> unit) -> ('a, 'b) t -> unit

iteri f x applies function f to each element in x. Note that 1d index is passed to function f, you need to convert it to nd-index by yourself.

val iter : ('a -> unit) -> ('a, 'b) t -> unit

iter f x is similar to iteri f x, except the index is not passed to f.

val mapi : (int -> 'a -> 'a) -> ('a, 'b) t -> ('a, 'b) t

mapi f x makes a copy of x, then applies f to each element in x.

val map : ('a -> 'a) -> ('a, 'b) t -> ('a, 'b) t

map f x is similar to mapi f x except the index is not passed.

val foldi : - ?axis:int -> - (int -> 'a -> 'a -> 'a) -> - 'a -> - ('a, 'b) t -> - ('a, 'b) t

foldi ~axis f a x folds (or reduces) the elements in x from left along the specified axis using passed in function f. a is the initial element and in f i acc b acc is the accumulater and b is one of the elements in x along the same axis. Note that i is 1d index of b.

val fold : ?axis:int -> ('a -> 'a -> 'a) -> 'a -> ('a, 'b) t -> ('a, 'b) t

Similar to foldi, except that the index of an element is not passed to f.

val scani : ?axis:int -> (int -> 'a -> 'a -> 'a) -> ('a, 'b) t -> ('a, 'b) t

scan ~axis f x scans the x along the specified axis using passed in function f. f acc a b returns an updated acc which will be passed in the next call to f i acc a. This function can be used to implement accumulative operations such as sum and prod functions. Note that the i is 1d index of a in x.

val scan : ?axis:int -> ('a -> 'a -> 'a) -> ('a, 'b) t -> ('a, 'b) t

Similar to scani, except that the index of an element is not passed to f.

val filteri : (int -> 'a -> bool) -> ('a, 'b) t -> int array

filteri f x uses f to filter out certain elements in x. An element will be included if f returns true. The returned result is an array of 1-dimensional indices of the selected elements. To obtain the n-dimensional indices, you need to convert it manually with Owl's helper function.

val filter : ('a -> bool) -> ('a, 'b) t -> int array

Similar to filteri, but the indices are not passed to f.

val iter2i : (int -> 'a -> 'b -> unit) -> ('a, 'c) t -> ('b, 'd) t -> unit

Similar to iteri but applies to two N-dimensional arrays x and y. Both x and y must have the same shape.

val iter2 : ('a -> 'b -> unit) -> ('a, 'c) t -> ('b, 'd) t -> unit

Similar to iter2i, except that the index not passed to f.

val map2i : (int -> 'a -> 'a -> 'a) -> ('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t

map2i f x y applies f to two elements of the same position in both x and y. Note that 1d index is passed to function f.

val map2 : ('a -> 'a -> 'a) -> ('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t

map2 f x y is similar to map2i f x y except the index is not passed.

val iteri_nd : (int array -> 'a -> unit) -> ('a, 'b) t -> unit

Similar to iteri but n-d indices are passed to the user function.

val mapi_nd : (int array -> 'a -> 'a) -> ('a, 'b) t -> ('a, 'b) t

Similar to mapi but n-d indices are passed to the user function.

val foldi_nd : - ?axis:int -> - (int array -> 'a -> 'a -> 'a) -> - 'a -> - ('a, 'b) t -> - ('a, 'b) t

Similar to foldi but n-d indices are passed to the user function.

val scani_nd : - ?axis:int -> - (int array -> 'a -> 'a -> 'a) -> - ('a, 'b) t -> - ('a, 'b) t

Similar to scani but n-d indices are passed to the user function.

val filteri_nd : (int array -> 'a -> bool) -> ('a, 'b) t -> int array array

Similar to filteri but n-d indices are returned.

val iter2i_nd : - (int array -> 'a -> 'c -> unit) -> - ('a, 'b) t -> - ('c, 'd) t -> - unit

Similar to iter2i but n-d indices are passed to the user function.

val map2i_nd : - (int array -> 'a -> 'a -> 'a) -> - ('a, 'b) t -> - ('a, 'b) t -> - ('a, 'b) t

Similar to map2i but n-d indices are passed to the user function.

val iteri_slice : - ?axis:int -> - (int -> ('a, 'b) t -> unit) -> - ('a, 'b) t -> - unit

iteri_slice ~axis f x iterates the slices along the specified axis in x and applies the function f. The 1-d index of of the slice is passed in. By default, the axis is 0. Setting axis to the highest dimension is not allowed because in that case you can just use `iteri` to iterate all the elements in x which is more efficient.

Note that the slice is obtained by slicing left (due to Owl's C-layout ndarray) a sub-array out of x. E.g., if x has shape [|3;4;5|], setting axis=0 will iterate three 4 x 5 matrices. The slice shares the same memory with x so no copy is made.

val iter_slice : ?axis:int -> (('a, 'b) t -> unit) -> ('a, 'b) t -> unit

Similar to iteri_slice but slice index is not passed in.

val mapi_slice : - ?axis:int -> - (int -> ('a, 'b) t -> 'c) -> - ('a, 'b) t -> - 'c array

mapi_slice ~axis f x maps the slices along the specified axis in x and applies the function f. By default, axis is 0. The index of of the slice is passed in.

Please refer to iteri_slice for more details.

val map_slice : ?axis:int -> (('a, 'b) t -> 'c) -> ('a, 'b) t -> 'c array

Similar to mapi_slice but slice index is not passed in.

val filteri_slice : - ?axis:int -> - (int -> ('a, 'b) t -> bool) -> - ('a, 'b) t -> - ('a, 'b) t array

filteri_slice ~axis f x filters the slices along the specified axis in x. The slices which satisfy the predicate f are returned in an array.

Please refer to iteri_slice for more details.

val filter_slice : - ?axis:int -> - (('a, 'b) t -> bool) -> - ('a, 'b) t -> - ('a, 'b) t array

Similar to filteri_slice but slice index is not passed in.

val foldi_slice : - ?axis:int -> - (int -> 'c -> ('a, 'b) t -> 'c) -> - 'c -> - ('a, 'b) t -> - 'c

foldi_slice ~axis f a x fold (left) the slices along the specified axis in x. The slices which satisfy the predicate f are returned in an array.

Please refer to iteri_slice for more details.

val fold_slice : - ?axis:int -> - ('c -> ('a, 'b) t -> 'c) -> - 'c -> - ('a, 'b) t -> - 'c

Similar to foldi_slice but slice index is not passed in.

Examination & Comparison
val exists : ('a -> bool) -> ('a, 'b) t -> bool

exists f x checks all the elements in x using f. If at least one element satisfies f then the function returns true otherwise false.

val not_exists : ('a -> bool) -> ('a, 'b) t -> bool

not_exists f x checks all the elements in x, the function returns true only if all the elements fail to satisfy f : float -> bool.

val for_all : ('a -> bool) -> ('a, 'b) t -> bool

for_all f x checks all the elements in x, the function returns true if and only if all the elements pass the check of function f.

val is_zero : ('a, 'b) t -> bool

is_zero x returns true if all the elements in x are zeros.

val is_positive : ('a, 'b) t -> bool

is_positive x returns true if all the elements in x are positive.

val is_negative : ('a, 'b) t -> bool

is_negative x returns true if all the elements in x are negative.

val is_nonpositive : ('a, 'b) t -> bool

is_nonpositive returns true if all the elements in x are non-positive.

val is_nonnegative : ('a, 'b) t -> bool

is_nonnegative returns true if all the elements in x are non-negative.

val is_normal : ('a, 'b) t -> bool

is_normal x returns true if all the elelments in x are normal float numbers, i.e., not NaN, not INF, not SUBNORMAL. Please refer to

https://www.gnu.org/software/libc/manual/html_node/Floating-Point-Classes.html https://www.gnu.org/software/libc/manual/html_node/Infinity-and-NaN.html#Infinity-and-NaN

val not_nan : ('a, 'b) t -> bool

not_nan x returns false if there is any NaN element in x. Otherwise, the function returns true indicating all the numbers in x are not NaN.

val not_inf : ('a, 'b) t -> bool

not_inf x returns false if there is any positive or negative INF element in x. Otherwise, the function returns true.

val equal : ('a, 'b) t -> ('a, 'b) t -> bool

equal x y returns true if two matrices x and y are equal.

val not_equal : ('a, 'b) t -> ('a, 'b) t -> bool

not_equal x y returns true if there is at least one element in x is not equal to that in y.

val greater : ('a, 'b) t -> ('a, 'b) t -> bool

greater x y returns true if all the elements in x are greater than the corresponding elements in y.

val less : ('a, 'b) t -> ('a, 'b) t -> bool

less x y returns true if all the elements in x are smaller than the corresponding elements in y.

val greater_equal : ('a, 'b) t -> ('a, 'b) t -> bool

greater_equal x y returns true if all the elements in x are not smaller than the corresponding elements in y.

val less_equal : ('a, 'b) t -> ('a, 'b) t -> bool

less_equal x y returns true if all the elements in x are not greater than the corresponding elements in y.

val elt_equal : ('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t

elt_equal x y performs element-wise = comparison of x and y. Assume that a is from x and b is the corresponding element of a from y of the same position. The function returns another binary (0 and 1) ndarray/matrix wherein 1 indicates a = b.

The function supports broadcast operation.

val elt_not_equal : ('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t

elt_not_equal x y performs element-wise != comparison of x and y. Assume that a is from x and b is the corresponding element of a from y of the same position. The function returns another binary (0 and 1) ndarray/matrix wherein 1 indicates a <> b.

The function supports broadcast operation.

val elt_less : ('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t

elt_less x y performs element-wise < comparison of x and y. Assume that a is from x and b is the corresponding element of a from y of the same position. The function returns another binary (0 and 1) ndarray/matrix wherein 1 indicates a < b.

The function supports broadcast operation.

val elt_greater : ('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t

elt_greater x y performs element-wise > comparison of x and y. Assume that a is from x and b is the corresponding element of a from y of the same position. The function returns another binary (0 and 1) ndarray/matrix wherein 1 indicates a > b.

The function supports broadcast operation.

val elt_less_equal : ('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t

elt_less_equal x y performs element-wise <= comparison of x and y. Assume that a is from x and b is the corresponding element of a from y of the same position. The function returns another binary (0 and 1) ndarray/matrix wherein 1 indicates a <= b.

The function supports broadcast operation.

val elt_greater_equal : ('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t

elt_greater_equal x y performs element-wise >= comparison of x and y. Assume that a is from x and b is the corresponding element of a from y of the same position. The function returns another binary (0 and 1) ndarray/matrix wherein 1 indicates a >= b.

The function supports broadcast operation.

val equal_scalar : ('a, 'b) t -> 'a -> bool

equal_scalar x a checks if all the elements in x are equal to a. The function returns true iff for every element b in x, b = a.

val not_equal_scalar : ('a, 'b) t -> 'a -> bool

not_equal_scalar x a checks if all the elements in x are not equal to a. The function returns true iff for every element b in x, b <> a.

val less_scalar : ('a, 'b) t -> 'a -> bool

less_scalar x a checks if all the elements in x are less than a. The function returns true iff for every element b in x, b < a.

val greater_scalar : ('a, 'b) t -> 'a -> bool

greater_scalar x a checks if all the elements in x are greater than a. The function returns true iff for every element b in x, b > a.

val less_equal_scalar : ('a, 'b) t -> 'a -> bool

less_equal_scalar x a checks if all the elements in x are less or equal to a. The function returns true iff for every element b in x, b <= a.

val greater_equal_scalar : ('a, 'b) t -> 'a -> bool

greater_equal_scalar x a checks if all the elements in x are greater or equal to a. The function returns true iff for every element b in x, b >= a.

val elt_equal_scalar : ('a, 'b) t -> 'a -> ('a, 'b) t

elt_equal_scalar x a performs element-wise = comparison of x and a. Assume that b is one element from x The function returns another binary (0 and 1) ndarray/matrix wherein 1 of the corresponding position indicates a = b, otherwise 0.

val elt_not_equal_scalar : ('a, 'b) t -> 'a -> ('a, 'b) t

elt_not_equal_scalar x a performs element-wise != comparison of x and a. Assume that b is one element from x The function returns another binary (0 and 1) ndarray/matrix wherein 1 of the corresponding position indicates a <> b, otherwise 0.

val elt_less_scalar : ('a, 'b) t -> 'a -> ('a, 'b) t

elt_less_scalar x a performs element-wise < comparison of x and a. Assume that b is one element from x The function returns another binary (0 and 1) ndarray/matrix wherein 1 of the corresponding position indicates a < b, otherwise 0.

val elt_greater_scalar : ('a, 'b) t -> 'a -> ('a, 'b) t

elt_greater_scalar x a performs element-wise > comparison of x and a. Assume that b is one element from x The function returns another binary (0 and 1) ndarray/matrix wherein 1 of the corresponding position indicates a > b, otherwise 0.

val elt_less_equal_scalar : ('a, 'b) t -> 'a -> ('a, 'b) t

elt_less_equal_scalar x a performs element-wise <= comparison of x and a. Assume that b is one element from x The function returns another binary (0 and 1) ndarray/matrix wherein 1 of the corresponding position indicates a <= b, otherwise 0.

val elt_greater_equal_scalar : ('a, 'b) t -> 'a -> ('a, 'b) t

elt_greater_equal_scalar x a performs element-wise >= comparison of x and a. Assume that b is one element from x The function returns another binary (0 and 1) ndarray/matrix wherein 1 of the corresponding position indicates a >= b, otherwise 0.

val approx_equal : ?eps:float -> ('a, 'b) t -> ('a, 'b) t -> bool

approx_equal ~eps x y returns true if x and y are approximately equal, i.e., for any two elements a from x and b from y, we have abs (a - b) < eps. For complex numbers, the eps applies to both real and imaginary part.

Note: the threshold check is exclusive for passed in eps, i.e., the threshold interval is (a-eps, a+eps).

val approx_equal_scalar : ?eps:float -> ('a, 'b) t -> 'a -> bool

approx_equal_scalar ~eps x a returns true all the elements in x are approximately equal to a, i.e., abs (x - a) < eps. For complex numbers, the eps applies to both real and imaginary part.

Note: the threshold check is exclusive for the passed in eps.

val approx_elt_equal : ?eps:float -> ('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t

approx_elt_equal ~eps x y compares the element-wise equality of x and y, then returns another binary (i.e., 0 and 1) ndarray/matrix wherein 1 indicates that two corresponding elements a from x and b from y are considered as approximately equal, namely abs (a - b) < eps.

val approx_elt_equal_scalar : ?eps:float -> ('a, 'b) t -> 'a -> ('a, 'b) t

approx_elt_equal_scalar ~eps x a compares all the elements of x to a scalar value a, then returns another binary (i.e., 0 and 1) ndarray/matrix wherein 1 indicates that the element b from x is considered as approximately equal to a, namely abs (a - b) < eps.

Input/Output functions
val of_array : ('a, 'b) kind -> 'a array -> int array -> ('a, 'b) t

of_array k x d takes an array x and converts it into an ndarray of type k and shape d.

val to_array : ('a, 'b) t -> 'a array

to_array x converts an ndarray x to OCaml's array type. Note that the ndarray x is flattened before conversion.

val print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:('a -> string) -> - ('a, 'b) t -> - unit

print x prints all the elements in x as well as their indices. max_row and max_col specify the maximum number of rows and columns to display. header specifies whether or not to print out the headers. fmt is the function to format every element into string.

val pp_dsnda : Stdlib.Format.formatter -> ('a, 'b) t -> unit

pp_dsnda x prints x in OCaml toplevel. If the ndarray is too long, pp_dsnda only prints out parts of the ndarray.

val save : out:string -> ('a, 'b) t -> unit

save ~out x serialises a ndarray x to a file of name out.

val load : ('a, 'b) kind -> string -> ('a, 'b) t

load k s loads previously serialised ndarray from file s into memory. It is necessary to specify the type of the ndarray with parameter k.

val save_npy : out:string -> ('a, 'b) t -> unit

save_npy ~out x saves the matrix x into a npy file out. This function is implemented using npy-ocaml https://github.com/LaurentMazare/npy-ocaml.

val load_npy : ('a, 'b) kind -> string -> ('a, 'b) t

load_npy file load a npy file into a matrix of type k. If the matrix is in the file is not of type k, fails with [file]: incorrect format. This function is implemented using npy-ocaml https://github.com/LaurentMazare/npy-ocaml.

Unary math operators
val re_c2s : - (Stdlib.Complex.t, Stdlib.Bigarray.complex32_elt) t -> - (float, Stdlib.Bigarray.float32_elt) t

re_c2s x returns all the real components of x in a new ndarray of same shape.

val re_z2d : - (Stdlib.Complex.t, Stdlib.Bigarray.complex64_elt) t -> - (float, Stdlib.Bigarray.float64_elt) t

re_d2z x returns all the real components of x in a new ndarray of same shape.

val im_c2s : - (Stdlib.Complex.t, Stdlib.Bigarray.complex32_elt) t -> - (float, Stdlib.Bigarray.float32_elt) t

im_c2s x returns all the imaginary components of x in a new ndarray of same shape.

val im_z2d : - (Stdlib.Complex.t, Stdlib.Bigarray.complex64_elt) t -> - (float, Stdlib.Bigarray.float64_elt) t

im_d2z x returns all the imaginary components of x in a new ndarray of same shape.

val sum : ?axis:int -> ?keep_dims:bool -> ('a, 'b) t -> ('a, 'b) t

sum ~axis x sums the elements in x along specified axis.

val sum' : ('a, 'b) t -> 'a

sum' x returns the sumtion of all elements in x.

val sum_reduce : ?axis:int array -> ('a, 'b) t -> ('a, 'b) t

sum_reduce ~axis x sums the elements in x along multiple axes specified in the axis array.

val prod : ?axis:int -> ?keep_dims:bool -> ('a, 'b) t -> ('a, 'b) t

prod ~axis x multiples the elements in x along specified axis.

val prod' : ('a, 'b) t -> 'a

prod x returns the product of all elements in x along passed in axises.

val mean : ?axis:int -> ?keep_dims:bool -> ('a, 'b) t -> ('a, 'b) t

mean ~axis x calculates the mean along specified axis.

val mean' : ('a, 'b) t -> 'a

mean' x calculates the mean of all the elements in x.

val median : ?axis:int -> ?keep_dims:bool -> ('a, 'b) t -> ('a, 'b) t

median ~axis x calculates the median along specified axis of x.

val median' : ('a, 'b) t -> 'a

median x calculates the median of a flattened version of x.

val var : ?axis:int -> ?keep_dims:bool -> ('a, 'b) t -> ('a, 'b) t

var ~axis x calculates the variance along specified axis.

val var' : ('a, 'b) t -> 'a

var' x calculates the variance of all the elements in x.

val std : ?axis:int -> ?keep_dims:bool -> ('a, 'b) t -> ('a, 'b) t

std ~axis calculates the standard deviation along specified axis.

val std' : ('a, 'b) t -> 'a

std' x calculates the standard deviation of all the elements in x.

val sem : ?axis:int -> ?keep_dims:bool -> ('a, 'b) t -> ('a, 'b) t

sem ~axis calculates the standard error of mean along specified axis.

val sem' : ('a, 'b) t -> 'a

sem' x calculates the standard error of mean of all the elements in x.

val min : ?axis:int -> ?keep_dims:bool -> ('a, 'b) t -> ('a, 'b) t

min x returns the minimum of all elements in x along specified axis. If no axis is specified, x will be flattened and the minimum of all the elements will be returned. For two complex numbers, the one with the smaller magnitude will be selected. If two magnitudes are the same, the one with the smaller phase will be selected.

val min' : ('a, 'b) t -> 'a

min' x is similar to min but returns the minimum of all elements in x in scalar value.

val max : ?axis:int -> ?keep_dims:bool -> ('a, 'b) t -> ('a, 'b) t

max x returns the maximum of all elements in x along specified axis. If no axis is specified, x will be flattened and the maximum of all the elements will be returned. For two complex numbers, the one with the greater magnitude will be selected. If two magnitudes are the same, the one with the greater phase will be selected.

val max' : ('a, 'b) t -> 'a

max' x is similar to max but returns the maximum of all elements in x in scalar value.

val minmax : - ?axis:int -> - ?keep_dims:bool -> - ('a, 'b) t -> - ('a, 'b) t * ('a, 'b) t

minmax' x returns (min_v, max_v), min_v is the minimum value in x while max_v is the maximum.

val minmax' : ('a, 'b) t -> 'a * 'a

minmax' x returns (min_v, max_v), min_v is the minimum value in x while max_v is the maximum.

val min_i : ('a, 'b) t -> 'a * int array

min_i x returns the minimum of all elements in x as well as its index.

val max_i : ('a, 'b) t -> 'a * int array

max_i x returns the maximum of all elements in x as well as its index.

val minmax_i : ('a, 'b) t -> ('a * int array) * ('a * int array)

minmax_i x returns ((min_v,min_i), (max_v,max_i)) where (min_v,min_i) is the minimum value in x along with its index while (max_v,max_i) is the maximum value along its index.

val abs : ('a, 'b) t -> ('a, 'b) t

abs x returns the absolute value of all elements in x in a new ndarray.

val abs_c2s : - (Stdlib.Complex.t, Stdlib.Bigarray.complex32_elt) t -> - (float, Stdlib.Bigarray.float32_elt) t

abs_c2s x is similar to abs but takes complex32 as input.

val abs_z2d : - (Stdlib.Complex.t, Stdlib.Bigarray.complex64_elt) t -> - (float, Stdlib.Bigarray.float64_elt) t

abs_z2d x is similar to abs but takes complex64 as input.

val abs2 : ('a, 'b) t -> ('a, 'b) t

abs2 x returns the square of absolute value of all elements in x in a new ndarray.

val abs2_c2s : - (Stdlib.Complex.t, Stdlib.Bigarray.complex32_elt) t -> - (float, Stdlib.Bigarray.float32_elt) t

abs2_c2s x is similar to abs2 but takes complex32 as input.

val abs2_z2d : - (Stdlib.Complex.t, Stdlib.Bigarray.complex64_elt) t -> - (float, Stdlib.Bigarray.float64_elt) t

abs2_z2d x is similar to abs2 but takes complex64 as input.

val conj : ('a, 'b) t -> ('a, 'b) t

conj x returns the conjugate of the complex x.

val neg : ('a, 'b) t -> ('a, 'b) t

neg x negates the elements in x and returns the result in a new ndarray.

val reci : ('a, 'b) t -> ('a, 'b) t

reci x computes the reciprocal of every elements in x and returns the result in a new ndarray.

val reci_tol : ?tol:'a -> ('a, 'b) t -> ('a, 'b) t

reci_tol ~tol x computes the reciprocal of every element in x. Different from reci, reci_tol sets the elements whose abs value smaller than tol to zeros. If tol is not specified, the default Owl_utils.eps Float32 will be used. For complex numbers, refer to Owl's doc to see how to compare.

val signum : (float, 'a) t -> (float, 'a) t

signum computes the sign value (-1 for negative numbers, 0 (or -0) for zero, 1 for positive numbers, nan for nan).

val sqr : ('a, 'b) t -> ('a, 'b) t

sqr x computes the square of the elements in x and returns the result in a new ndarray.

val sqrt : ('a, 'b) t -> ('a, 'b) t

sqrt x computes the square root of the elements in x and returns the result in a new ndarray.

val cbrt : ('a, 'b) t -> ('a, 'b) t

cbrt x computes the cubic root of the elements in x and returns the result in a new ndarray.

val exp : ('a, 'b) t -> ('a, 'b) t

exp x computes the exponential of the elements in x and returns the result in a new ndarray.

val exp2 : ('a, 'b) t -> ('a, 'b) t

exp2 x computes the base-2 exponential of the elements in x and returns the result in a new ndarray.

val exp10 : ('a, 'b) t -> ('a, 'b) t

exp10 x computes the base-10 exponential of the elements in x and returns the result in a new ndarray.

val expm1 : ('a, 'b) t -> ('a, 'b) t

expm1 x computes exp x -. 1. of the elements in x and returns the result in a new ndarray.

val log : ('a, 'b) t -> ('a, 'b) t

log x computes the logarithm of the elements in x and returns the result in a new ndarray.

val log10 : ('a, 'b) t -> ('a, 'b) t

log10 x computes the base-10 logarithm of the elements in x and returns the result in a new ndarray.

val log2 : ('a, 'b) t -> ('a, 'b) t

log2 x computes the base-2 logarithm of the elements in x and returns the result in a new ndarray.

val log1p : ('a, 'b) t -> ('a, 'b) t

log1p x computes log (1 + x) of the elements in x and returns the result in a new ndarray.

val sin : ('a, 'b) t -> ('a, 'b) t

sin x computes the sine of the elements in x and returns the result in a new ndarray.

val cos : ('a, 'b) t -> ('a, 'b) t

cos x computes the cosine of the elements in x and returns the result in a new ndarray.

val tan : ('a, 'b) t -> ('a, 'b) t

tan x computes the tangent of the elements in x and returns the result in a new ndarray.

val asin : ('a, 'b) t -> ('a, 'b) t

asin x computes the arc sine of the elements in x and returns the result in a new ndarray.

val acos : ('a, 'b) t -> ('a, 'b) t

acos x computes the arc cosine of the elements in x and returns the result in a new ndarray.

val atan : ('a, 'b) t -> ('a, 'b) t

atan x computes the arc tangent of the elements in x and returns the result in a new ndarray.

val sinh : ('a, 'b) t -> ('a, 'b) t

sinh x computes the hyperbolic sine of the elements in x and returns the result in a new ndarray.

val cosh : ('a, 'b) t -> ('a, 'b) t

cosh x computes the hyperbolic cosine of the elements in x and returns the result in a new ndarray.

val tanh : ('a, 'b) t -> ('a, 'b) t

tanh x computes the hyperbolic tangent of the elements in x and returns the result in a new ndarray.

val asinh : ('a, 'b) t -> ('a, 'b) t

asinh x computes the hyperbolic arc sine of the elements in x and returns the result in a new ndarray.

val acosh : ('a, 'b) t -> ('a, 'b) t

acosh x computes the hyperbolic arc cosine of the elements in x and returns the result in a new ndarray.

val atanh : ('a, 'b) t -> ('a, 'b) t

atanh x computes the hyperbolic arc tangent of the elements in x and returns the result in a new ndarray.

val floor : ('a, 'b) t -> ('a, 'b) t

floor x computes the floor of the elements in x and returns the result in a new ndarray.

val ceil : ('a, 'b) t -> ('a, 'b) t

ceil x computes the ceiling of the elements in x and returns the result in a new ndarray.

val round : ('a, 'b) t -> ('a, 'b) t

round x rounds the elements in x and returns the result in a new ndarray.

val trunc : ('a, 'b) t -> ('a, 'b) t

trunc x computes the truncation of the elements in x and returns the result in a new ndarray.

val fix : ('a, 'b) t -> ('a, 'b) t

fix x rounds each element of x to the nearest integer toward zero. For positive elements, the behavior is the same as floor. For negative ones, the behavior is the same as ceil.

val modf : ('a, 'b) t -> ('a, 'b) t * ('a, 'b) t

modf x performs modf over all the elements in x, the fractal part is saved in the first element of the returned tuple whereas the integer part is saved in the second element.

val erf : (float, 'a) t -> (float, 'a) t

erf x computes the error function of the elements in x and returns the result in a new ndarray.

val erfc : (float, 'a) t -> (float, 'a) t

erfc x computes the complementary error function of the elements in x and returns the result in a new ndarray.

val logistic : (float, 'a) t -> (float, 'a) t

logistic x computes the logistic function 1/(1 + exp(-a) of the elements in x and returns the result in a new ndarray.

val relu : (float, 'a) t -> (float, 'a) t

relu x computes the rectified linear unit function max(x, 0) of the elements in x and returns the result in a new ndarray.

val elu : ?alpha:float -> (float, 'a) t -> (float, 'a) t

elu alpha x computes the exponential linear unit function x >= 0. ? x : (alpha * (exp(x) - 1)) of the elements in x and returns the result in a new ndarray.

val leaky_relu : ?alpha:float -> (float, 'a) t -> (float, 'a) t

leaky_relu alpha x computes the leaky rectified linear unit function x >= 0. ? x : (alpha * x) of the elements in x and returns the result in a new ndarray.

val softplus : (float, 'a) t -> (float, 'a) t

softplus x computes the softplus function log(1 + exp(x) of the elements in x and returns the result in a new ndarray.

val softsign : (float, 'a) t -> (float, 'a) t

softsign x computes the softsign function x / (1 + abs(x)) of the elements in x and returns the result in a new ndarray.

val softmax : ?axis:int -> (float, 'a) t -> (float, 'a) t

softmax x computes the softmax functions (exp x) / (sum (exp x)) of all the elements along the specified axis in x and returns the result in a new ndarray.

By default, axis = -1, i.e. along the highest dimension.

val sigmoid : (float, 'a) t -> (float, 'a) t

sigmoid x computes the sigmoid function 1 / (1 + exp (-x)) for each element in x.

val log_sum_exp' : (float, 'a) t -> float

log_sum_exp x computes the logarithm of the sum of exponentials of all the elements in x.

val log_sum_exp : - ?axis:int -> - ?keep_dims:bool -> - (float, 'a) t -> - (float, 'a) t

log_sum_exp ~axis x computes the logarithm of the sum of exponentials of all the elements in x along axis axis.

val l1norm : ?axis:int -> ?keep_dims:bool -> ('a, 'b) t -> ('a, 'b) t

l1norm x calculates the l1-norm of of x along specified axis.

val l1norm' : ('a, 'b) t -> 'a

l1norm x calculates the l1-norm of all the element in x.

val l2norm : ?axis:int -> ?keep_dims:bool -> ('a, 'b) t -> ('a, 'b) t

l2norm x calculates the l2-norm of of x along specified axis.

val l2norm' : ('a, 'b) t -> 'a

l2norm x calculates the l2-norm of all the element in x.

val l2norm_sqr : ?axis:int -> ?keep_dims:bool -> ('a, 'b) t -> ('a, 'b) t

l2norm_sqr x calculates the square l2-norm of of x along specified axis.

val l2norm_sqr' : ('a, 'b) t -> 'a

l2norm_sqr x calculates the square of l2-norm (or l2norm, Euclidean norm) of all elements in x. The function uses conjugate transpose in the product, hence it always returns a float number.

val vecnorm : - ?axis:int -> - ?p:float -> - ?keep_dims:bool -> - ('a, 'b) t -> - ('a, 'b) t

vecnorm ~axis ~p x calculates the generalised vector p-norm along the specified axis. The generalised p-norm is defined as below.

.. math:: ||v||_p = \Big \sum_{k=0}^{N-1} |v_k|^p \Big^

/p

Parameters: * axis is the axis for reduction. * p is order of norm, default value is 2. * x is the input ndarray.

Returns: * If p = infinity, then returns :math:`||v||_\infty = \max_i(|v(i)|)`. * If p = -infinity, then returns :math:`||v||_

\infty

}

= \min_i(|v(i)|)`. * Otherwise returns generalised vector p-norm defined above.

val vecnorm' : ?p:float -> ('a, 'b) t -> 'a

vecnorm' flattens the input into 1-d vector first, then calculates the generalised p-norm the same as venorm.

val cumsum : ?axis:int -> ('a, 'b) t -> ('a, 'b) t

cumsum ~axis x : performs cumulative sum of the elements along the given axis ~axis. If ~axis is None, then the cumsum is performed along the lowest dimension. The returned result however always remains the same shape.

val cumprod : ?axis:int -> ('a, 'b) t -> ('a, 'b) t

cumprod ~axis x : similar to cumsum but performs cumulative product of the elements along the given ~axis.

val cummin : ?axis:int -> ('a, 'b) t -> ('a, 'b) t

cummin ~axis x : performs cumulative min along axis dimension.

val cummax : ?axis:int -> ('a, 'b) t -> ('a, 'b) t

cummax ~axis x : performs cumulative max along axis dimension.

val diff : ?axis:int -> ?n:int -> ('a, 'b) t -> ('a, 'b) t

diff ~axis ~n x calculates the n-th difference of x along the specified axis.

Parameters: * axis: axis to calculate the difference. The default value is the highest dimension. * n: how many times to calculate the difference. The default value is 1.

Return: * The difference ndarray y. Note that the shape of y 1 less than that of x along specified axis.

val angle : (Stdlib.Complex.t, 'a) t -> (Stdlib.Complex.t, 'a) t

angle x calculates the phase angle of all complex numbers in x.

val proj : (Stdlib.Complex.t, 'a) t -> (Stdlib.Complex.t, 'a) t

proj x computes the projection on Riemann sphere of all elelments in x.

val lgamma : ('a, 'b) t -> ('a, 'b) t

lgamma x computes the loggamma of the elements in x and returns the result in a new ndarray.

val dawsn : ('a, 'b) t -> ('a, 'b) t

dawsn x computes the Dawson function of the elements in x and returns the result in a new ndarray.

val i0 : ('a, 'b) t -> ('a, 'b) t

i0 x computes the modified Bessel function of order 0 of the elements in x and returns the result in a new ndarray.

val i0e : ('a, 'b) t -> ('a, 'b) t

i0e x computes the exponentially scaled modified Bessel function of order 0 of the elements in x and returns the result in a new ndarray.

val i1 : ('a, 'b) t -> ('a, 'b) t

i1 x computes the modified Bessel function of order 1 of the elements in x and returns the result in a new ndarray.

val i1e : ('a, 'b) t -> ('a, 'b) t

i1e x computes the exponentially scaled modified Bessel function of order 1 of the elements in x and returns the result in a new ndarray.

val iv : v:('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t

iv v x computes modified Bessel function of x of real order v

val scalar_iv : v:'a -> ('a, 'b) t -> ('a, 'b) t

scalar_iv v x computes the modified Bessel function of x of real order v.

val iv_scalar : v:('a, 'b) t -> 'a -> ('a, 'b) t

iv_scalar v x computes modified Bessel function of x of real order v

val j0 : ('a, 'b) t -> ('a, 'b) t

j0 x computes the Bessel function of order 0 of the elements in x and returns the result in a new ndarray.

val j1 : ('a, 'b) t -> ('a, 'b) t

j1 x computes the Bessel function of order 1 of the elements in x and returns the result in a new ndarray.

val jv : v:('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t

jv v x computes Bessel function the first kind of x of real order v

val scalar_jv : v:'a -> ('a, 'b) t -> ('a, 'b) t

scalar_jv v x computes the Bessel function of the first kind of x of real order v.

val jv_scalar : v:('a, 'b) t -> 'a -> ('a, 'b) t

jv_scalar v x computes Bessel function of the first kind of x of real order v

Binary math operators
val add : ('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t

add x y adds all the elements in x and y elementwise, and returns the result in a new ndarray.

General broadcast operation is automatically applied to add/sub/mul/div, etc. The function compares the dimension element-wise from the highest to the lowest with the following broadcast rules (same as numpy): 1. equal; 2. either is 1.

val sub : ('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t

sub x y subtracts all the elements in x and y elementwise, and returns the result in a new ndarray.

val mul : ('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t

mul x y multiplies all the elements in x and y elementwise, and returns the result in a new ndarray.

val div : ('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t

div x y divides all the elements in x and y elementwise, and returns the result in a new ndarray.

val add_scalar : ('a, 'b) t -> 'a -> ('a, 'b) t

add_scalar x a adds a scalar value a to each element in x, and returns the result in a new ndarray.

val sub_scalar : ('a, 'b) t -> 'a -> ('a, 'b) t

sub_scalar x a subtracts a scalar value a from each element in x, and returns the result in a new ndarray.

val mul_scalar : ('a, 'b) t -> 'a -> ('a, 'b) t

mul_scalar x a multiplies each element in x by a scalar value a, and returns the result in a new ndarray.

val div_scalar : ('a, 'b) t -> 'a -> ('a, 'b) t

div_scalar x a divides each element in x by a scalar value a, and returns the result in a new ndarray.

val scalar_add : 'a -> ('a, 'b) t -> ('a, 'b) t

scalar_add a x adds a scalar value a to each element in x, and returns the result in a new ndarray.

val scalar_sub : 'a -> ('a, 'b) t -> ('a, 'b) t

scalar_sub a x subtracts each element in x from a scalar value a, and returns the result in a new ndarray.

val scalar_mul : 'a -> ('a, 'b) t -> ('a, 'b) t

scalar_mul a x multiplies each element in x by a scalar value a, and returns the result in a new ndarray.

val scalar_div : 'a -> ('a, 'b) t -> ('a, 'b) t

scalar_div a x divides a scalar value a by each element in x, and returns the result in a new ndarray.

val pow : ('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t

pow x y computes pow(a, b) of all the elements in x and y elementwise, and returns the result in a new ndarray.

val scalar_pow : 'a -> ('a, 'b) t -> ('a, 'b) t

scalar_pow a x computes the power value of a scalar value a using the elements in a ndarray x.

val pow_scalar : ('a, 'b) t -> 'a -> ('a, 'b) t

pow_scalar x a computes each element in x power to a.

val atan2 : (float, 'a) t -> (float, 'a) t -> (float, 'a) t

atan2 x y computes atan2(a, b) of all the elements in x and y elementwise, and returns the result in a new ndarray.

val scalar_atan2 : float -> (float, 'a) t -> (float, 'a) t

scalar_atan2 a x

val atan2_scalar : (float, 'a) t -> float -> (float, 'a) t

scalar_atan2 x a

val hypot : (float, 'a) t -> (float, 'a) t -> (float, 'a) t

hypot x y computes sqrt(x*x + y*y) of all the elements in x and y elementwise, and returns the result in a new ndarray.

val min2 : ('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t

min2 x y computes the minimum of all the elements in x and y elementwise, and returns the result in a new ndarray.

val max2 : ('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t

max2 x y computes the maximum of all the elements in x and y elementwise, and returns the result in a new ndarray.

val fmod : (float, 'a) t -> (float, 'a) t -> (float, 'a) t

fmod x y performs float mod division.

val fmod_scalar : (float, 'a) t -> float -> (float, 'a) t

fmod_scalar x a performs mod division between x and scalar a.

val scalar_fmod : float -> (float, 'a) t -> (float, 'a) t

scalar_fmod x a performs mod division between scalar a and x.

val ssqr' : ('a, 'b) t -> 'a -> 'a

ssqr x a computes the sum of squared differences of all the elements in x from constant a. This function only computes the square of each element rather than the conjugate transpose as l2norm_sqr does.

val ssqr_diff' : ('a, 'b) t -> ('a, 'b) t -> 'a

ssqr_diff x y computes the sum of squared differences of every elements in x and its corresponding element in y.

val cross_entropy' : (float, 'a) t -> (float, 'a) t -> float

cross_entropy x y calculates the cross entropy between x and y using base e.

val clip_by_value : ?amin:'a -> ?amax:'a -> ('a, 'b) t -> ('a, 'b) t

clip_by_value ~amin ~amax x clips the elements in x based on amin and amax. The elements smaller than amin will be set to amin, and the elements greater than amax will be set to amax.

val clip_by_l2norm : 'a -> ('a, 'b) t -> ('a, 'b) t

clip_by_l2norm t x clips the x according to the threshold set by t.

val fma : ('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t

fma x y z calculates the `fused multiply add`, i.e. (x * y) + z.

Tensor Calculus
val contract1 : (int * int) array -> ('a, 'b) t -> ('a, 'b) t

contract1 index_pairs x performs indices contraction (a.k.a tensor contraction) on x. index_pairs is an array of contracted indices.

Caveat: Not well tested yet, use with care! Also, consider to use TTGT in future for better performance.

val contract2 : (int * int) array -> ('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t

contract2 index_pairs x y performs indices contraction (a.k.a tensor contraction) on two ndarrays x and y. index_pairs is an array of contracted indices, the first element is the index of x, the second is that of y.

Caveat: Not well tested yet, use with care! Also, consider to use TTGT in future for better performance.

Cast functions
val cast : ('a, 'b) kind -> ('c, 'd) t -> ('a, 'b) t

cast kind x casts x of type ('c, 'd) t to type ('a, 'b) t specify by the passed in kind parameter. This function is a generalisation of the other casting functions such as cast_s2d, cast_c2z, and etc.

val cast_s2d : - (float, Stdlib.Bigarray.float32_elt) t -> - (float, Stdlib.Bigarray.float64_elt) t

cast_s2d x casts x from float32 to float64.

val cast_d2s : - (float, Stdlib.Bigarray.float64_elt) t -> - (float, Stdlib.Bigarray.float32_elt) t

cast_d2s x casts x from float64 to float32.

val cast_c2z : - (Stdlib.Complex.t, Stdlib.Bigarray.complex32_elt) t -> - (Stdlib.Complex.t, Stdlib.Bigarray.complex64_elt) t

cast_c2z x casts x from complex32 to complex64.

val cast_z2c : - (Stdlib.Complex.t, Stdlib.Bigarray.complex64_elt) t -> - (Stdlib.Complex.t, Stdlib.Bigarray.complex32_elt) t

cast_z2c x casts x from complex64 to complex32.

val cast_s2c : - (float, Stdlib.Bigarray.float32_elt) t -> - (Stdlib.Complex.t, Stdlib.Bigarray.complex32_elt) t

cast_s2c x casts x from float32 to complex32.

val cast_d2z : - (float, Stdlib.Bigarray.float64_elt) t -> - (Stdlib.Complex.t, Stdlib.Bigarray.complex64_elt) t

cast_d2z x casts x from float64 to complex64.

val cast_s2z : - (float, Stdlib.Bigarray.float32_elt) t -> - (Stdlib.Complex.t, Stdlib.Bigarray.complex64_elt) t

cast_s2z x casts x from float32 to complex64.

val cast_d2c : - (float, Stdlib.Bigarray.float64_elt) t -> - (Stdlib.Complex.t, Stdlib.Bigarray.complex32_elt) t

cast_d2c x casts x from float64 to complex32.

val conv1d : - ?padding:Owl_types.padding -> - ('a, 'b) t -> - ('a, 'b) t -> - int array -> - ('a, 'b) t

TODO

val conv2d : - ?padding:Owl_types.padding -> - ('a, 'b) t -> - ('a, 'b) t -> - int array -> - ('a, 'b) t

TODO

val conv3d : - ?padding:Owl_types.padding -> - ('a, 'b) t -> - ('a, 'b) t -> - int array -> - ('a, 'b) t

TODO

val dilated_conv1d : - ?padding:Owl_types.padding -> - ('a, 'b) t -> - ('a, 'b) t -> - int array -> - int array -> - ('a, 'b) t

TODO

val dilated_conv2d : - ?padding:Owl_types.padding -> - ('a, 'b) t -> - ('a, 'b) t -> - int array -> - int array -> - ('a, 'b) t

TODO

val dilated_conv3d : - ?padding:Owl_types.padding -> - ('a, 'b) t -> - ('a, 'b) t -> - int array -> - int array -> - ('a, 'b) t

TODO

val transpose_conv1d : - ?padding:Owl_types.padding -> - ('a, 'b) t -> - ('a, 'b) t -> - int array -> - ('a, 'b) t

TODO

val transpose_conv2d : - ?padding:Owl_types.padding -> - ('a, 'b) t -> - ('a, 'b) t -> - int array -> - ('a, 'b) t

TODO

val transpose_conv3d : - ?padding:Owl_types.padding -> - ('a, 'b) t -> - ('a, 'b) t -> - int array -> - ('a, 'b) t

TODO

val max_pool1d : - ?padding:Owl_types.padding -> - ('a, 'b) t -> - int array -> - int array -> - ('a, 'b) t

TODO

val max_pool2d : - ?padding:Owl_types.padding -> - ('a, 'b) t -> - int array -> - int array -> - ('a, 'b) t

TODO

val max_pool3d : - ?padding:Owl_types.padding -> - ('a, 'b) t -> - int array -> - int array -> - ('a, 'b) t

TODO

val avg_pool1d : - ?padding:Owl_types.padding -> - ('a, 'b) t -> - int array -> - int array -> - ('a, 'b) t

TODO

val avg_pool2d : - ?padding:Owl_types.padding -> - ('a, 'b) t -> - int array -> - int array -> - ('a, 'b) t

TODO

val avg_pool3d : - ?padding:Owl_types.padding -> - ('a, 'b) t -> - int array -> - int array -> - ('a, 'b) t

TODO

val max_pool2d_argmax : - ?padding:Owl_types.padding -> - ('a, 'b) t -> - int array -> - int array -> - ('a, 'b) t * (int64, Stdlib.Bigarray.int64_elt) t

TODO

val upsampling2d : ('a, 'b) t -> int array -> ('a, 'b) t

TODO

val conv1d_backward_input : - ('a, 'b) t -> - ('a, 'b) t -> - int array -> - ('a, 'b) t -> - ('a, 'b) t

TODO

val conv1d_backward_kernel : - ('a, 'b) t -> - ('a, 'b) t -> - int array -> - ('a, 'b) t -> - ('a, 'b) t

TODO

val conv2d_backward_input : - ('a, 'b) t -> - ('a, 'b) t -> - int array -> - ('a, 'b) t -> - ('a, 'b) t

TODO

val conv2d_backward_kernel : - ('a, 'b) t -> - ('a, 'b) t -> - int array -> - ('a, 'b) t -> - ('a, 'b) t

TODO

val conv3d_backward_input : - ('a, 'b) t -> - ('a, 'b) t -> - int array -> - ('a, 'b) t -> - ('a, 'b) t

TODO

val conv3d_backward_kernel : - ('a, 'b) t -> - ('a, 'b) t -> - int array -> - ('a, 'b) t -> - ('a, 'b) t

TODO

val dilated_conv1d_backward_input : - ('a, 'b) t -> - ('a, 'b) t -> - int array -> - int array -> - ('a, 'b) t -> - ('a, 'b) t

TODO

val dilated_conv1d_backward_kernel : - ('a, 'b) t -> - ('a, 'b) t -> - int array -> - int array -> - ('a, 'b) t -> - ('a, 'b) t

TODO

val dilated_conv2d_backward_input : - ('a, 'b) t -> - ('a, 'b) t -> - int array -> - int array -> - ('a, 'b) t -> - ('a, 'b) t

TODO

val dilated_conv2d_backward_kernel : - ('a, 'b) t -> - ('a, 'b) t -> - int array -> - int array -> - ('a, 'b) t -> - ('a, 'b) t

TODO

val dilated_conv3d_backward_input : - ('a, 'b) t -> - ('a, 'b) t -> - int array -> - int array -> - ('a, 'b) t -> - ('a, 'b) t

TODO

val dilated_conv3d_backward_kernel : - ('a, 'b) t -> - ('a, 'b) t -> - int array -> - int array -> - ('a, 'b) t -> - ('a, 'b) t

TODO

val transpose_conv1d_backward_input : - ('a, 'b) t -> - ('a, 'b) t -> - int array -> - ('a, 'b) t -> - ('a, 'b) t

TODO

val transpose_conv1d_backward_kernel : - ('a, 'b) t -> - ('a, 'b) t -> - int array -> - ('a, 'b) t -> - ('a, 'b) t

TODO

val transpose_conv2d_backward_input : - ('a, 'b) t -> - ('a, 'b) t -> - int array -> - ('a, 'b) t -> - ('a, 'b) t

TODO

val transpose_conv2d_backward_kernel : - ('a, 'b) t -> - ('a, 'b) t -> - int array -> - ('a, 'b) t -> - ('a, 'b) t

TODO

val transpose_conv3d_backward_input : - ('a, 'b) t -> - ('a, 'b) t -> - int array -> - ('a, 'b) t -> - ('a, 'b) t

TODO

val transpose_conv3d_backward_kernel : - ('a, 'b) t -> - ('a, 'b) t -> - int array -> - ('a, 'b) t -> - ('a, 'b) t

TODO

val max_pool1d_backward : - Owl_types.padding -> - ('a, 'b) t -> - int array -> - int array -> - ('a, 'b) t -> - ('a, 'b) t

TODO

val max_pool2d_backward : - Owl_types.padding -> - ('a, 'b) t -> - int array -> - int array -> - ('a, 'b) t -> - ('a, 'b) t

TODO

val max_pool3d_backward : - Owl_types.padding -> - ('a, 'b) t -> - int array -> - int array -> - ('a, 'b) t -> - ('a, 'b) t

TODO

val avg_pool1d_backward : - Owl_types.padding -> - ('a, 'b) t -> - int array -> - int array -> - ('a, 'b) t -> - ('a, 'b) t

TODO

val avg_pool2d_backward : - Owl_types.padding -> - ('a, 'b) t -> - int array -> - int array -> - ('a, 'b) t -> - ('a, 'b) t

TODO

val avg_pool3d_backward : - Owl_types.padding -> - ('a, 'b) t -> - int array -> - int array -> - ('a, 'b) t -> - ('a, 'b) t

TODO

val upsampling2d_backward : ('a, 'b) t -> int array -> ('a, 'b) t -> ('a, 'b) t

TODO

Helper functions

The following functions are helper functions for some other functions in both Ndarray and Ndview modules. In general, you are not supposed to use these functions directly.

val print_element : ('a, 'b) kind -> 'a -> unit

print_element kind a prints the value of a single element.

val print_index : int array -> unit

print_index i prints out the index of an element.

val _check_transpose_axis : int array -> int -> unit

_check_transpose_axis a d checks whether a is a legiti('a, 'b) te transpose index.

val one_hot : int -> ('a, 'b) t -> ('a, 'b) t

one_hot idx depth creates one-hot vectors according to the indices ndarray and the specified depth. If idx is rank N, then the return is rank N+1. More specifically, if idx is of shape [|a;b;c|], the return is of shape [|a;b;c;depth|].

val sum_slices : ?axis:int -> ('a, 'b) t -> ('a, 'b) t

sum_slices ~axis:2 x for x of [|2;3;4;5|], it returns an ndarray of shape [|4;5|]. Currently, the operation is done using gemm, it is fast but consumes more memory.

val slide : - ?axis:int -> - ?ofs:int -> - ?step:int -> - window:int -> - ('a, 'b) t -> - ('a, 'b) t

slide ~axis ~window x generates a new ndarray by sliding a window along specified axis in x. E.g., if x has shape [|a;b;c|] and axis = 1, then [|a; number of windows; window; c|] is the shape of the returned ndarray.

Parameters: * axis is the axis for sliding, the default is -1, i.e. highest dimension. * ofs is the starting position of the sliding window. The default is 0. * step is the step size, the default is 1. * window is the size of the sliding window.

In-place modification
val create_ : out:('a, 'b) t -> 'a -> unit

TODO

val uniform_ : ?a:'a -> ?b:'a -> out:('a, 'b) t -> unit

TODO

val gaussian_ : ?mu:'a -> ?sigma:'a -> out:('a, 'b) t -> unit

TODO

val poisson_ : mu:float -> out:('a, 'b) t -> unit

TODO

val sequential_ : ?a:'a -> ?step:'a -> out:('a, 'b) t -> unit

TODO

val bernoulli_ : ?p:float -> out:('a, 'b) t -> unit

TODO

val zeros_ : out:('a, 'b) t -> unit

TODO

val ones_ : out:('a, 'b) t -> unit

TODO

val one_hot_ : out:('a, 'b) t -> int -> ('a, 'b) t -> unit

TODO

val sort_ : ('a, 'b) t -> unit

sort_ x performs in-place quicksort of the elelments in x.

val get_fancy_ : out:('a, 'b) t -> Owl_types.index list -> ('a, 'b) t -> unit

TODO

val set_fancy_ : - out:('a, 'b) t -> - Owl_types.index list -> - ('a, 'b) t -> - ('a, 'b) t -> - unit

TODO

val get_slice_ : out:('a, 'b) t -> int list list -> ('a, 'b) t -> unit

TODO

val set_slice_ : - out:('a, 'b) t -> - int list list -> - ('a, 'b) t -> - ('a, 'b) t -> - unit

TODO

val copy_ : out:('a, 'b) t -> ('a, 'b) t -> unit

copy_ ~out src copies the data from ndarray src to destination out.

val reshape_ : out:('a, 'b) t -> ('a, 'b) t -> unit

TODO

val reverse_ : out:('a, 'b) t -> ('a, 'b) t -> unit

TODO

val transpose_ : out:('a, 'b) t -> ?axis:int array -> ('a, 'b) t -> unit

transpose_ ~out x is similar to transpose x but the output is written to out.

val repeat_ : out:('a, 'b) t -> ('a, 'b) t -> int array -> unit

repeat_ ~out x reps is similar to repeat x reps but the output is written to out.

val tile_ : out:('a, 'b) t -> ('a, 'b) t -> int array -> unit

tile_ ~out x reps is similar to tile x reps but the output is written to out.

val pad_ : out:('a, 'b) t -> ?v:'a -> int list list -> ('a, 'b) t -> unit

pad_ ~out ?v p x is similar to pad ?v p x but the output is written to out.

val sum_ : out:('a, 'b) t -> axis:int -> ('a, 'b) t -> unit

TODO

val min_ : out:('a, 'b) t -> axis:int -> ('a, 'b) t -> unit

TODO

val max_ : out:('a, 'b) t -> axis:int -> ('a, 'b) t -> unit

TODO

val add_ : ?out:('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t -> unit

add_ x y is similar to add function but the output is written to out. You need to make sure out is big enough to hold the output result.

val sub_ : ?out:('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t -> unit

sub_ x y is similar to sub function but the output is written to out. You need to make sure out is big enough to hold the output result.

val mul_ : ?out:('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t -> unit

mul_ x y is similar to mul function but the output is written to out. You need to make sure out is big enough to hold the output result.

val div_ : ?out:('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t -> unit

div_ x y is similar to div function but the output is written to out. You need to make sure out is big enough to hold the output result.

val pow_ : ?out:('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t -> unit

pow_ x y is similar to pow function but the output is written to out. You need to make sure out is big enough to hold the output result.

val atan2_ : ?out:('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t -> unit

atan2_ x y is similar to atan2 function but the output is written to out. You need to make sure out is big enough to hold the output result.

val hypot_ : ?out:('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t -> unit

hypot_ x y is similar to hypot function but the output is written to out. You need to make sure out is big enough to hold the output result.

val fmod_ : ?out:('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t -> unit

fmod_ x y is similar to fmod function but the output is written to out. You need to make sure out is big enough to hold the output result.

val min2_ : ?out:('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t -> unit

min2_ x y is similar to min2 function but the output is written to out. You need to make sure out is big enough to hold the output result.

val max2_ : ?out:('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t -> unit

max2_ x y is similar to max2 function but the output is written to out. You need to make sure out is big enough to hold the output result.

val add_scalar_ : ?out:('a, 'b) t -> ('a, 'b) t -> 'a -> unit

add_scalar_ x y is similar to add_scalar function but the output is written to x.

val sub_scalar_ : ?out:('a, 'b) t -> ('a, 'b) t -> 'a -> unit

sub_scalar_ x y is similar to sub_scalar function but the output is written to x.

val mul_scalar_ : ?out:('a, 'b) t -> ('a, 'b) t -> 'a -> unit

mul_scalar_ x y is similar to mul_scalar function but the output is written to x.

val div_scalar_ : ?out:('a, 'b) t -> ('a, 'b) t -> 'a -> unit

div_scalar_ x y is similar to div_scalar function but the output is written to x.

val pow_scalar_ : ?out:('a, 'b) t -> ('a, 'b) t -> 'a -> unit

pow_scalar_ x y is similar to pow_scalar function but the output is written to x.

val atan2_scalar_ : ?out:('a, 'b) t -> ('a, 'b) t -> 'a -> unit

atan2_scalar_ x y is similar to atan2_scalar function but the output is written to x.

val fmod_scalar_ : ?out:('a, 'b) t -> ('a, 'b) t -> 'a -> unit

fmod_scalar_ x y is similar to fmod_scalar function but the output is written to x.

val scalar_add_ : ?out:('a, 'b) t -> 'a -> ('a, 'b) t -> unit

scalar_add_ a x is similar to scalar_add function but the output is written to x.

val scalar_sub_ : ?out:('a, 'b) t -> 'a -> ('a, 'b) t -> unit

scalar_sub_ a x is similar to scalar_sub function but the output is written to x.

val scalar_mul_ : ?out:('a, 'b) t -> 'a -> ('a, 'b) t -> unit

scalar_mul_ a x is similar to scalar_mul function but the output is written to x.

val scalar_div_ : ?out:('a, 'b) t -> 'a -> ('a, 'b) t -> unit

scalar_div_ a x is similar to scalar_div function but the output is written to x.

val scalar_pow_ : ?out:('a, 'b) t -> 'a -> ('a, 'b) t -> unit

scalar_pow_ a x is similar to scalar_pow function but the output is written to x.

val scalar_atan2_ : ?out:('a, 'b) t -> 'a -> ('a, 'b) t -> unit

scalar_atan2_ a x is similar to scalar_atan2 function but the output is written to x.

val scalar_fmod_ : ?out:('a, 'b) t -> 'a -> ('a, 'b) t -> unit

scalar_fmod_ a x is similar to scalar_fmod function but the output is written to x.

val clip_by_value_ : - ?out:('a, 'b) t -> - ?amin:'a -> - ?amax:'a -> - ('a, 'b) t -> - unit

TODO

val clip_by_l2norm_ : ?out:('a, 'b) t -> 'a -> ('a, 'b) t -> unit

TODO

val fma_ : ?out:('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t -> unit

fma_ ~out x y z is similar to fma x y z function but the output is written to out.

val dot_ : - ?transa:bool -> - ?transb:bool -> - ?alpha:'a -> - ?beta:'a -> - c:('a, 'b) t -> - ('a, 'b) t -> - ('a, 'b) t -> - unit

Refer to :doc:`owl_dense_matrix_generic`

val conj_ : ?out:('a, 'b) t -> ('a, 'b) t -> unit

conj_ x is similar to conj but output is written to x

val abs_ : ?out:('a, 'b) t -> ('a, 'b) t -> unit

abs_ x is similar to abs but output is written to x

val neg_ : ?out:('a, 'b) t -> ('a, 'b) t -> unit

neg_ x is similar to neg but output is written to x

val reci_ : ?out:('a, 'b) t -> ('a, 'b) t -> unit

reci_ x is similar to reci but output is written to x

val signum_ : ?out:('a, 'b) t -> ('a, 'b) t -> unit

signum_ x is similar to signum but output is written to x

val sqr_ : ?out:('a, 'b) t -> ('a, 'b) t -> unit

sqr_ x is similar to sqr but output is written to x

val sqrt_ : ?out:('a, 'b) t -> ('a, 'b) t -> unit

sqrt_ x is similar to sqrt but output is written to x

val cbrt_ : ?out:('a, 'b) t -> ('a, 'b) t -> unit

cbrt_ x is similar to cbrt but output is written to x

val exp_ : ?out:('a, 'b) t -> ('a, 'b) t -> unit

exp_ x is similar to exp_ but output is written to x

val exp2_ : ?out:('a, 'b) t -> ('a, 'b) t -> unit

exp2_ x is similar to exp2 but output is written to x

val exp10_ : ?out:('a, 'b) t -> ('a, 'b) t -> unit

exp2_ x is similar to exp2 but output is written to x

val expm1_ : ?out:('a, 'b) t -> ('a, 'b) t -> unit

expm1_ x is similar to expm1 but output is written to x

val log_ : ?out:('a, 'b) t -> ('a, 'b) t -> unit

log_ x is similar to log but output is written to x

val log2_ : ?out:('a, 'b) t -> ('a, 'b) t -> unit

log2_ x is similar to log2 but output is written to x

val log10_ : ?out:('a, 'b) t -> ('a, 'b) t -> unit

log10_ x is similar to log10 but output is written to x

val log1p_ : ?out:('a, 'b) t -> ('a, 'b) t -> unit

log1p_ x is similar to log1p but output is written to x

val sin_ : ?out:('a, 'b) t -> ('a, 'b) t -> unit

sin_ x is similar to sin but output is written to x

val cos_ : ?out:('a, 'b) t -> ('a, 'b) t -> unit

cos_ x is similar to cos but output is written to x

val tan_ : ?out:('a, 'b) t -> ('a, 'b) t -> unit

tan_ x is similar to tan but output is written to x

val asin_ : ?out:('a, 'b) t -> ('a, 'b) t -> unit

asin_ x is similar to asin but output is written to x

val acos_ : ?out:('a, 'b) t -> ('a, 'b) t -> unit

acos_ x is similar to acos but output is written to x

val atan_ : ?out:('a, 'b) t -> ('a, 'b) t -> unit

atan_ x is similar to atan but output is written to x

val sinh_ : ?out:('a, 'b) t -> ('a, 'b) t -> unit

sinh_ x is similar to sinh but output is written to x

val cosh_ : ?out:('a, 'b) t -> ('a, 'b) t -> unit

cosh_ x is similar to cosh but output is written to x

val tanh_ : ?out:('a, 'b) t -> ('a, 'b) t -> unit

tanh_ x is similar to tanh but output is written to x

val asinh_ : ?out:('a, 'b) t -> ('a, 'b) t -> unit

asinh_ x is similar to asinh but output is written to x

val acosh_ : ?out:('a, 'b) t -> ('a, 'b) t -> unit

acosh_ x is similar to acosh but output is written to x

val atanh_ : ?out:('a, 'b) t -> ('a, 'b) t -> unit

atanh_ x is similar to atanh but output is written to x

val floor_ : ?out:('a, 'b) t -> ('a, 'b) t -> unit

floor_ x is similar to floor but output is written to x

val ceil_ : ?out:('a, 'b) t -> ('a, 'b) t -> unit

ceil_ x is similar to ceil but output is written to x

val round_ : ?out:('a, 'b) t -> ('a, 'b) t -> unit

round_ x is similar to round but output is written to x

val trunc_ : ?out:('a, 'b) t -> ('a, 'b) t -> unit

trunc_ x is similar to trunc but output is written to x

val fix_ : ?out:('a, 'b) t -> ('a, 'b) t -> unit

fix_ x is similar to fix but output is written to x

val erf_ : ?out:('a, 'b) t -> ('a, 'b) t -> unit

erf_ x is similar to erf but output is written to x

val erfc_ : ?out:('a, 'b) t -> ('a, 'b) t -> unit

erfc_ x is similar to erfc but output is written to x

val relu_ : ?out:('a, 'b) t -> ('a, 'b) t -> unit

relu_ x is similar to relu but output is written to x

val softplus_ : ?out:('a, 'b) t -> ('a, 'b) t -> unit

softplus_ x is similar to softplus but output is written to x

val softsign_ : ?out:('a, 'b) t -> ('a, 'b) t -> unit

softsign_ x is similar to softsign but output is written to x

val sigmoid_ : ?out:('a, 'b) t -> ('a, 'b) t -> unit

sigmoid_ x is similar to sigmoid but output is written to x

val softmax_ : ?out:('a, 'b) t -> ?axis:int -> ('a, 'b) t -> unit

softmax_ x is similar to softmax but output is written to x

val cumsum_ : ?out:('a, 'b) t -> ?axis:int -> ('a, 'b) t -> unit

cumsum_ x is similar to cumsum but output is written to x

val cumprod_ : ?out:('a, 'b) t -> ?axis:int -> ('a, 'b) t -> unit

cumprod_ x is similar to cumprod but output is written to x

val cummin_ : ?out:('a, 'b) t -> ?axis:int -> ('a, 'b) t -> unit

cummin_ x is similar to cummin but output is written to x

val cummax_ : ?out:('a, 'b) t -> ?axis:int -> ('a, 'b) t -> unit

cummax_ x is similar to cummax but output is written to x

val dropout_ : ?out:('a, 'b) t -> ?rate:float -> ('a, 'b) t -> unit

dropout_ x is similar to dropout but output is written to x

val elt_equal_ : ?out:('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t -> unit

elt_equal_ x y is similar to elt_equal function but the output is written to out. You need to make sure out is big enough to hold the output result.

val elt_not_equal_ : ?out:('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t -> unit

elt_not_equal_ x y is similar to elt_not_equal function but the output is written to out. You need to make sure out is big enough to hold the output result.

val elt_less_ : ?out:('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t -> unit

elt_less_ x y is similar to elt_less function but the output is written to out. You need to make sure out is big enough to hold the output result.

val elt_greater_ : ?out:('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t -> unit

elt_greater_ x y is similar to elt_greater function but the output is written to out. You need to make sure out is big enough to hold the output result.

val elt_less_equal_ : ?out:('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t -> unit

elt_less_equal_ x y is similar to elt_less_equal function but the output is written to out. You need to make sure out is big enough to hold the output result.

val elt_greater_equal_ : ?out:('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t -> unit

elt_greater_equal_ x y is similar to elt_greater_equal function but the output is written to out. You need to make sure out is big enough to hold the output result.

val elt_equal_scalar_ : ?out:('a, 'b) t -> ('a, 'b) t -> 'a -> unit

elt_equal_scalar_ x a is similar to elt_equal_scalar function but the output is written to x.

val elt_not_equal_scalar_ : ?out:('a, 'b) t -> ('a, 'b) t -> 'a -> unit

elt_not_equal_scalar_ x a is similar to elt_not_equal_scalar function but the output is written to x.

val elt_less_scalar_ : ?out:('a, 'b) t -> ('a, 'b) t -> 'a -> unit

elt_less_scalar_ x a is similar to elt_less_scalar function but the output is written to x.

val elt_greater_scalar_ : ?out:('a, 'b) t -> ('a, 'b) t -> 'a -> unit

elt_greater_scalar_ x a is similar to elt_greater_scalar function but the output is written to x.

val elt_less_equal_scalar_ : ?out:('a, 'b) t -> ('a, 'b) t -> 'a -> unit

elt_less_equal_scalar_ x a is similar to elt_less_equal_scalar function but the output is written to x.

val elt_greater_equal_scalar_ : ?out:('a, 'b) t -> ('a, 'b) t -> 'a -> unit

elt_greater_equal_scalar_ x a is similar to elt_greater_equal_scalar function but the output is written to x.

val conv1d_ : - out:('a, 'b) t -> - ?padding:Owl_types.padding -> - ('a, 'b) t -> - ('a, 'b) t -> - int array -> - unit

TODO

val conv2d_ : - out:('a, 'b) t -> - ?padding:Owl_types.padding -> - ('a, 'b) t -> - ('a, 'b) t -> - int array -> - unit

TODO

val conv3d_ : - out:('a, 'b) t -> - ?padding:Owl_types.padding -> - ('a, 'b) t -> - ('a, 'b) t -> - int array -> - unit

TODO

val dilated_conv1d_ : - out:('a, 'b) t -> - ?padding:Owl_types.padding -> - ('a, 'b) t -> - ('a, 'b) t -> - int array -> - int array -> - unit

TODO

val dilated_conv2d_ : - out:('a, 'b) t -> - ?padding:Owl_types.padding -> - ('a, 'b) t -> - ('a, 'b) t -> - int array -> - int array -> - unit

TODO

val dilated_conv3d_ : - out:('a, 'b) t -> - ?padding:Owl_types.padding -> - ('a, 'b) t -> - ('a, 'b) t -> - int array -> - int array -> - unit

TODO

val transpose_conv1d_ : - out:('a, 'b) t -> - ?padding:Owl_types.padding -> - ('a, 'b) t -> - ('a, 'b) t -> - int array -> - unit

TODO

val transpose_conv2d_ : - out:('a, 'b) t -> - ?padding:Owl_types.padding -> - ('a, 'b) t -> - ('a, 'b) t -> - int array -> - unit

TODO

val transpose_conv3d_ : - out:('a, 'b) t -> - ?padding:Owl_types.padding -> - ('a, 'b) t -> - ('a, 'b) t -> - int array -> - unit

TODO

val max_pool1d_ : - out:('a, 'b) t -> - ?padding:Owl_types.padding -> - ('a, 'b) t -> - int array -> - int array -> - unit

TODO

val max_pool2d_ : - out:('a, 'b) t -> - ?padding:Owl_types.padding -> - ('a, 'b) t -> - int array -> - int array -> - unit

TODO

val max_pool3d_ : - out:('a, 'b) t -> - ?padding:Owl_types.padding -> - ('a, 'b) t -> - int array -> - int array -> - unit

TODO

val avg_pool1d_ : - out:('a, 'b) t -> - ?padding:Owl_types.padding -> - ('a, 'b) t -> - int array -> - int array -> - unit

TODO

val avg_pool2d_ : - out:('a, 'b) t -> - ?padding:Owl_types.padding -> - ('a, 'b) t -> - int array -> - int array -> - unit

TODO

val avg_pool3d_ : - out:('a, 'b) t -> - ?padding:Owl_types.padding -> - ('a, 'b) t -> - int array -> - int array -> - unit

TODO

val upsampling2d_ : out:('a, 'b) t -> ('a, 'b) t -> int array -> unit

TODO

val conv1d_backward_input_ : - out:('a, 'b) t -> - ('a, 'b) t -> - ('a, 'b) t -> - int array -> - ('a, 'b) t -> - unit

TODO

val conv1d_backward_kernel_ : - out:('a, 'b) t -> - ('a, 'b) t -> - ('a, 'b) t -> - int array -> - ('a, 'b) t -> - unit

TODO

val conv2d_backward_input_ : - out:('a, 'b) t -> - ('a, 'b) t -> - ('a, 'b) t -> - int array -> - ('a, 'b) t -> - unit

TODO

val conv2d_backward_kernel_ : - out:('a, 'b) t -> - ('a, 'b) t -> - ('a, 'b) t -> - int array -> - ('a, 'b) t -> - unit

TODO

val conv3d_backward_input_ : - out:('a, 'b) t -> - ('a, 'b) t -> - ('a, 'b) t -> - int array -> - ('a, 'b) t -> - unit

TODO

val conv3d_backward_kernel_ : - out:('a, 'b) t -> - ('a, 'b) t -> - ('a, 'b) t -> - int array -> - ('a, 'b) t -> - unit

TODO

val dilated_conv1d_backward_input_ : - out:('a, 'b) t -> - ('a, 'b) t -> - ('a, 'b) t -> - int array -> - int array -> - ('a, 'b) t -> - unit

TODO

val dilated_conv1d_backward_kernel_ : - out:('a, 'b) t -> - ('a, 'b) t -> - ('a, 'b) t -> - int array -> - int array -> - ('a, 'b) t -> - unit

TODO

val dilated_conv2d_backward_input_ : - out:('a, 'b) t -> - ('a, 'b) t -> - ('a, 'b) t -> - int array -> - int array -> - ('a, 'b) t -> - unit

TODO

val dilated_conv2d_backward_kernel_ : - out:('a, 'b) t -> - ('a, 'b) t -> - ('a, 'b) t -> - int array -> - int array -> - ('a, 'b) t -> - unit

TODO

val dilated_conv3d_backward_input_ : - out:('a, 'b) t -> - ('a, 'b) t -> - ('a, 'b) t -> - int array -> - int array -> - ('a, 'b) t -> - unit

TODO

val dilated_conv3d_backward_kernel_ : - out:('a, 'b) t -> - ('a, 'b) t -> - ('a, 'b) t -> - int array -> - int array -> - ('a, 'b) t -> - unit

TODO

val transpose_conv1d_backward_input_ : - out:('a, 'b) t -> - ('a, 'b) t -> - ('a, 'b) t -> - int array -> - ('a, 'b) t -> - unit

TODO

val transpose_conv1d_backward_kernel_ : - out:('a, 'b) t -> - ('a, 'b) t -> - ('a, 'b) t -> - int array -> - ('a, 'b) t -> - unit

TODO

val transpose_conv2d_backward_input_ : - out:('a, 'b) t -> - ('a, 'b) t -> - ('a, 'b) t -> - int array -> - ('a, 'b) t -> - unit

TODO

val transpose_conv2d_backward_kernel_ : - out:('a, 'b) t -> - ('a, 'b) t -> - ('a, 'b) t -> - int array -> - ('a, 'b) t -> - unit

TODO

val transpose_conv3d_backward_input_ : - out:('a, 'b) t -> - ('a, 'b) t -> - ('a, 'b) t -> - int array -> - ('a, 'b) t -> - unit

TODO

val transpose_conv3d_backward_kernel_ : - out:('a, 'b) t -> - ('a, 'b) t -> - ('a, 'b) t -> - int array -> - ('a, 'b) t -> - unit

TODO

val max_pool1d_backward_ : - out:('a, 'b) t -> - Owl_types.padding -> - ('a, 'b) t -> - int array -> - int array -> - ('a, 'b) t -> - unit

TODO

val max_pool2d_backward_ : - out:('a, 'b) t -> - Owl_types.padding -> - ('a, 'b) t -> - int array -> - int array -> - ('a, 'b) t -> - unit

TODO

val max_pool3d_backward_ : - out:('a, 'b) t -> - Owl_types.padding -> - ('a, 'b) t -> - int array -> - int array -> - ('a, 'b) t -> - unit

TODO

val avg_pool1d_backward_ : - out:('a, 'b) t -> - Owl_types.padding -> - ('a, 'b) t -> - int array -> - int array -> - ('a, 'b) t -> - unit

TODO

val avg_pool2d_backward_ : - out:('a, 'b) t -> - Owl_types.padding -> - ('a, 'b) t -> - int array -> - int array -> - ('a, 'b) t -> - unit

TODO

val avg_pool3d_backward_ : - out:('a, 'b) t -> - Owl_types.padding -> - ('a, 'b) t -> - int array -> - int array -> - ('a, 'b) t -> - unit

TODO

val upsampling2d_backward_ : - out:('a, 'b) t -> - ('a, 'b) t -> - int array -> - ('a, 'b) t -> - unit

TODO

val fused_adagrad_ : ?out:('a, 'b) t -> rate:'a -> eps:'a -> ('a, 'b) t -> unit

TODO

Matrix functions
type area = Owl_dense_ndarray_generic.area = {
  1. a : int;
  2. b : int;
  3. c : int;
  4. d : int;
}

Refer to :doc:`owl_dense_matrix_generic`

val area : int -> int -> int -> int -> area

Refer to :doc:`owl_dense_matrix_generic`

val copy_area_to : ('a, 'b) t -> area -> ('a, 'b) t -> area -> unit

Refer to :doc:`owl_dense_matrix_generic`

val row_num : ('a, 'b) t -> int

Refer to :doc:`owl_dense_matrix_generic`

val col_num : ('a, 'b) t -> int

Refer to :doc:`owl_dense_matrix_generic`

val row : ('a, 'b) t -> int -> ('a, 'b) t

Refer to :doc:`owl_dense_matrix_generic`

val col : ('a, 'b) t -> int -> ('a, 'b) t

Refer to :doc:`owl_dense_matrix_generic`

val rows : ('a, 'b) t -> int array -> ('a, 'b) t

Refer to :doc:`owl_dense_matrix_generic`

val cols : ('a, 'b) t -> int array -> ('a, 'b) t

Refer to :doc:`owl_dense_matrix_generic`

val copy_row_to : ('a, 'b) t -> ('a, 'b) t -> int -> unit

Refer to :doc:`owl_dense_matrix_generic`

val copy_col_to : ('a, 'b) t -> ('a, 'b) t -> int -> unit

Refer to :doc:`owl_dense_matrix_generic`

val dot : ('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t

Refer to :doc:`owl_dense_matrix_generic`

val diag : ?k:int -> ('a, 'b) t -> ('a, 'b) t

Refer to :doc:`owl_dense_matrix_generic`

val trace : ('a, 'b) t -> 'a

Refer to :doc:`owl_dense_matrix_generic`

val to_rows : ('a, 'b) t -> ('a, 'b) t array

Refer to :doc:`owl_dense_matrix_generic`

val of_rows : ('a, 'b) t array -> ('a, 'b) t

Refer to :doc:`owl_dense_matrix_generic`

val to_cols : ('a, 'b) t -> ('a, 'b) t array

Refer to :doc:`owl_dense_matrix_generic`

val of_cols : ('a, 'b) t array -> ('a, 'b) t

Refer to :doc:`owl_dense_matrix_generic`

val to_arrays : ('a, 'b) t -> 'a array array

Refer to :doc:`owl_dense_matrix_generic`

val of_arrays : ('a, 'b) kind -> 'a array array -> ('a, 'b) t

Refer to :doc:`owl_dense_matrix_generic`

val draw_rows : - ?replacement:bool -> - ('a, 'b) t -> - int -> - ('a, 'b) t * int array

Refer to :doc:`owl_dense_matrix_generic`

val draw_cols : - ?replacement:bool -> - ('a, 'b) t -> - int -> - ('a, 'b) t * int array

Refer to :doc:`owl_dense_matrix_generic`

val draw_rows2 : - ?replacement:bool -> - ('a, 'b) t -> - ('a, 'b) t -> - int -> - ('a, 'b) t * ('a, 'b) t * int array

Refer to :doc:`owl_dense_matrix_generic`

val draw_cols2 : - ?replacement:bool -> - ('a, 'b) t -> - ('a, 'b) t -> - int -> - ('a, 'b) t * ('a, 'b) t * int array

Refer to :doc:`owl_dense_matrix_generic`

Helper functions
val float_to_elt : 'a -> 'a

Identity function to deal with the type conversion required by other functors.

val elt_to_float : 'a -> 'a

Identity function to deal with the type conversion required by other functors.

include module type of struct include Operator end
include sig ... end
val (+$) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (-$) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (*$) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (/$) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t
val ($+) : - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t
val ($-) : - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t
val ($*) : - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t
val ($/) : - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (=) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - bool
val (!=) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - bool
val (<>) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - bool
val (>) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - bool
val (<) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - bool
val (>=) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - bool
val (<=) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - bool
include sig ... end
val (=$) : ('a, 'b) Owl_dense_ndarray_generic.t -> 'a -> bool
val (!=$) : ('a, 'b) Owl_dense_ndarray_generic.t -> 'a -> bool
val (<>$) : ('a, 'b) Owl_dense_ndarray_generic.t -> 'a -> bool
val (<$) : ('a, 'b) Owl_dense_ndarray_generic.t -> 'a -> bool
val (>$) : ('a, 'b) Owl_dense_ndarray_generic.t -> 'a -> bool
val (<=$) : ('a, 'b) Owl_dense_ndarray_generic.t -> 'a -> bool
val (>=$) : ('a, 'b) Owl_dense_ndarray_generic.t -> 'a -> bool
val (=.$) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (!=.$) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (<>.$) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (<.$) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (>.$) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (<=.$) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (>=.$) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (=~) : - ?eps:float -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - bool
val (=~$) : ?eps:float -> ('a, 'b) Owl_dense_ndarray_generic.t -> 'a -> bool
val (=~.) : - ?eps:float -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (=~.$) : - ?eps:float -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (%) : - (float, 'a) Owl_dense_ndarray_generic.t -> - (float, 'a) Owl_dense_ndarray_generic.t -> - (float, 'a) Owl_dense_ndarray_generic.t
val (%$) : - (float, 'a) Owl_dense_ndarray_generic.t -> - float -> - (float, 'a) Owl_dense_ndarray_generic.t
val (**) : - (float, 'a) Owl_dense_ndarray_generic.t -> - (float, 'a) Owl_dense_ndarray_generic.t -> - (float, 'a) Owl_dense_ndarray_generic.t
val ($**) : - float -> - (float, 'a) Owl_dense_ndarray_generic.t -> - (float, 'a) Owl_dense_ndarray_generic.t
val (**$) : - (float, 'a) Owl_dense_ndarray_generic.t -> - float -> - (float, 'a) Owl_dense_ndarray_generic.t
val (+=) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - unit
val (-=) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - unit
val (*=) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - unit
val (/=) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - unit
val (+$=) : ('a, 'b) Owl_dense_ndarray_generic.t -> 'a -> unit
val (-$=) : ('a, 'b) Owl_dense_ndarray_generic.t -> 'a -> unit
val (*$=) : ('a, 'b) Owl_dense_ndarray_generic.t -> 'a -> unit
val (/$=) : ('a, 'b) Owl_dense_ndarray_generic.t -> 'a -> unit
val (.!{;..}) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - Owl_types.index array -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (.!{;..}<-) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - Owl_types.index array -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - unit
val (.${}) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - int list -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (.${;..}) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - int list array -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (.${}<-) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - int list -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - unit
val (.${;..}<-) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - int list array -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - unit
include sig ... end
val (.%{}) : ('a, 'b) Owl_dense_ndarray_generic.t -> int -> 'a
val (.%{;..}) : ('a, 'b) Owl_dense_ndarray_generic.t -> int array -> 'a
val (.%{}<-) : ('a, 'b) Owl_dense_ndarray_generic.t -> int -> 'a -> unit
val (.%{;..}<-) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - int array -> - 'a -> - unit
val mpow : - ('a, 'b) Owl_linalg_generic.t -> - float -> - ('a, 'b) Owl_linalg_generic.t
\ No newline at end of file diff --git a/owl/Owl_dense_ndarray/Operator/index.html b/owl/Owl_dense_ndarray/Operator/index.html deleted file mode 100644 index b5a184463..000000000 --- a/owl/Owl_dense_ndarray/Operator/index.html +++ /dev/null @@ -1,171 +0,0 @@ - -Operator (owl.Owl_dense_ndarray.Operator)

Module Owl_dense_ndarray.Operator

include sig ... end
val (+$) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (-$) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (*$) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (/$) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t
val ($+) : - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t
val ($-) : - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t
val ($*) : - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t
val ($/) : - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (=) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - bool
val (!=) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - bool
val (<>) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - bool
val (>) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - bool
val (<) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - bool
val (>=) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - bool
val (<=) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - bool
include sig ... end
val (=$) : ('a, 'b) Owl_dense_ndarray_generic.t -> 'a -> bool
val (!=$) : ('a, 'b) Owl_dense_ndarray_generic.t -> 'a -> bool
val (<>$) : ('a, 'b) Owl_dense_ndarray_generic.t -> 'a -> bool
val (<$) : ('a, 'b) Owl_dense_ndarray_generic.t -> 'a -> bool
val (>$) : ('a, 'b) Owl_dense_ndarray_generic.t -> 'a -> bool
val (<=$) : ('a, 'b) Owl_dense_ndarray_generic.t -> 'a -> bool
val (>=$) : ('a, 'b) Owl_dense_ndarray_generic.t -> 'a -> bool
val (=.$) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (!=.$) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (<>.$) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (<.$) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (>.$) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (<=.$) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (>=.$) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (=~) : - ?eps:float -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - bool
val (=~$) : ?eps:float -> ('a, 'b) Owl_dense_ndarray_generic.t -> 'a -> bool
val (=~.) : - ?eps:float -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (=~.$) : - ?eps:float -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (%) : - (float, 'a) Owl_dense_ndarray_generic.t -> - (float, 'a) Owl_dense_ndarray_generic.t -> - (float, 'a) Owl_dense_ndarray_generic.t
val (%$) : - (float, 'a) Owl_dense_ndarray_generic.t -> - float -> - (float, 'a) Owl_dense_ndarray_generic.t
val (**) : - (float, 'a) Owl_dense_ndarray_generic.t -> - (float, 'a) Owl_dense_ndarray_generic.t -> - (float, 'a) Owl_dense_ndarray_generic.t
val ($**) : - float -> - (float, 'a) Owl_dense_ndarray_generic.t -> - (float, 'a) Owl_dense_ndarray_generic.t
val (**$) : - (float, 'a) Owl_dense_ndarray_generic.t -> - float -> - (float, 'a) Owl_dense_ndarray_generic.t
val (+=) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - unit
val (-=) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - unit
val (*=) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - unit
val (/=) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - unit
val (+$=) : ('a, 'b) Owl_dense_ndarray_generic.t -> 'a -> unit
val (-$=) : ('a, 'b) Owl_dense_ndarray_generic.t -> 'a -> unit
val (*$=) : ('a, 'b) Owl_dense_ndarray_generic.t -> 'a -> unit
val (/$=) : ('a, 'b) Owl_dense_ndarray_generic.t -> 'a -> unit
val (.!{;..}) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - Owl_types.index array -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (.!{;..}<-) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - Owl_types.index array -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - unit
val (.${}) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - int list -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (.${;..}) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - int list array -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (.${}<-) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - int list -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - unit
val (.${;..}<-) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - int list array -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - unit
include sig ... end
val (.%{}) : ('a, 'b) Owl_dense_ndarray_generic.t -> int -> 'a
val (.%{;..}) : ('a, 'b) Owl_dense_ndarray_generic.t -> int array -> 'a
val (.%{}<-) : ('a, 'b) Owl_dense_ndarray_generic.t -> int -> 'a -> unit
val (.%{;..}<-) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - int array -> - 'a -> - unit
\ No newline at end of file diff --git a/owl/Owl_dense_ndarray/S/index.html b/owl/Owl_dense_ndarray/S/index.html deleted file mode 100644 index d01a50c69..000000000 --- a/owl/Owl_dense_ndarray/S/index.html +++ /dev/null @@ -1,579 +0,0 @@ - -S (owl.Owl_dense_ndarray.S)

Module Owl_dense_ndarray.S

include module type of struct include Owl_dense_ndarray_s end
type elt = float
type arr = - (float, Stdlib.Bigarray.float32_elt, Stdlib.Bigarray.c_layout) - Stdlib.Bigarray.Genarray.t
include Owl_dense_ndarray_intf.Common with type elt := elt and type arr := arr
include Owl_base_dense_ndarray_intf.Common - with type elt := elt - with type arr := arr
val empty : int array -> arr
val zeros : int array -> arr
val ones : int array -> arr
val create : int array -> elt -> arr
val init : int array -> (int -> elt) -> arr
val init_nd : int array -> (int array -> elt) -> arr
val sequential : ?a:elt -> ?step:elt -> int array -> arr
val uniform : ?a:elt -> ?b:elt -> int array -> arr
val gaussian : ?mu:elt -> ?sigma:elt -> int array -> arr
val bernoulli : ?p:float -> int array -> arr
val shape : arr -> int array
val numel : arr -> int
val strides : arr -> int array

Refer to :doc:`owl_dense_ndarray_generic`

val slice_size : arr -> int array

Refer to :doc:`owl_dense_ndarray_generic`

val get : arr -> int array -> elt
val set : arr -> int array -> elt -> unit
val get_slice : int list list -> arr -> arr
val set_slice : int list list -> arr -> arr -> unit
val copy : arr -> arr
val copy_ : out:arr -> arr -> unit
val reset : arr -> unit
val reshape : arr -> int array -> arr
val flatten : arr -> arr
val reverse : arr -> arr
val tile : arr -> int array -> arr
val repeat : arr -> int array -> arr
val concatenate : ?axis:int -> arr array -> arr
val stack : ?axis:int -> arr array -> arr
val squeeze : ?axis:int array -> arr -> arr
val expand : ?hi:bool -> arr -> int -> arr
val split : ?axis:int -> int array -> arr -> arr array
val draw : ?axis:int -> arr -> int -> arr * int array
val pad : ?v:elt -> int list list -> arr -> arr
val one_hot : int -> arr -> arr
val print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(elt -> string) -> - arr -> - unit
val abs : arr -> arr
val neg : arr -> arr
val floor : arr -> arr
val ceil : arr -> arr
val round : arr -> arr
val sqr : arr -> arr
val sqrt : arr -> arr
val log : arr -> arr
val log2 : arr -> arr
val log10 : arr -> arr
val exp : arr -> arr
val sin : arr -> arr
val cos : arr -> arr
val tan : arr -> arr
val sinh : arr -> arr
val cosh : arr -> arr
val tanh : arr -> arr
val asin : arr -> arr
val acos : arr -> arr
val atan : arr -> arr
val asinh : arr -> arr
val acosh : arr -> arr
val atanh : arr -> arr
val min : ?axis:int -> ?keep_dims:bool -> arr -> arr
val max : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum_reduce : ?axis:int array -> arr -> arr
val min' : arr -> elt
val max' : arr -> elt
val sum' : arr -> elt
val pow : arr -> arr -> arr
val scalar_pow : elt -> arr -> arr
val pow_scalar : arr -> elt -> arr
val add : arr -> arr -> arr
val sub : arr -> arr -> arr
val mul : arr -> arr -> arr
val div : arr -> arr -> arr
val add_scalar : arr -> elt -> arr
val sub_scalar : arr -> elt -> arr
val mul_scalar : arr -> elt -> arr
val div_scalar : arr -> elt -> arr
val scalar_add : elt -> arr -> arr
val scalar_sub : elt -> arr -> arr
val scalar_mul : elt -> arr -> arr
val scalar_div : elt -> arr -> arr
val fma : arr -> arr -> arr -> arr
Iterate array elements
val iteri : (int -> elt -> unit) -> arr -> unit
val iter : (elt -> unit) -> arr -> unit
val mapi : (int -> elt -> elt) -> arr -> arr
val map : (elt -> elt) -> arr -> arr
val filteri : (int -> elt -> bool) -> arr -> int array
val filter : (elt -> bool) -> arr -> int array
val foldi : ?axis:int -> (int -> elt -> elt -> elt) -> elt -> arr -> arr
val fold : ?axis:int -> (elt -> elt -> elt) -> elt -> arr -> arr
val scani : ?axis:int -> (int -> elt -> elt -> elt) -> arr -> arr
val scan : ?axis:int -> (elt -> elt -> elt) -> arr -> arr
Examination & Comparison
val exists : (elt -> bool) -> arr -> bool
val not_exists : (elt -> bool) -> arr -> bool
val for_all : (elt -> bool) -> arr -> bool
val is_zero : arr -> bool
val is_positive : arr -> bool
val is_negative : arr -> bool
val is_nonpositive : arr -> bool
val is_nonnegative : arr -> bool
val is_normal : arr -> bool
val not_nan : arr -> bool
val not_inf : arr -> bool
val equal : arr -> arr -> bool
val not_equal : arr -> arr -> bool
val greater : arr -> arr -> bool
val less : arr -> arr -> bool
val greater_equal : arr -> arr -> bool
val less_equal : arr -> arr -> bool
val elt_equal : arr -> arr -> arr
val elt_not_equal : arr -> arr -> arr
val elt_less : arr -> arr -> arr
val elt_greater : arr -> arr -> arr
val elt_less_equal : arr -> arr -> arr
val elt_greater_equal : arr -> arr -> arr
val equal_scalar : arr -> elt -> bool
val not_equal_scalar : arr -> elt -> bool
val less_scalar : arr -> elt -> bool
val greater_scalar : arr -> elt -> bool
val less_equal_scalar : arr -> elt -> bool
val greater_equal_scalar : arr -> elt -> bool
val elt_equal_scalar : arr -> elt -> arr
val elt_not_equal_scalar : arr -> elt -> arr
val elt_less_scalar : arr -> elt -> arr
val elt_greater_scalar : arr -> elt -> arr
val elt_less_equal_scalar : arr -> elt -> arr
val elt_greater_equal_scalar : arr -> elt -> arr
val row_num : arr -> int
val col_num : arr -> int
val row : arr -> int -> arr
val rows : arr -> int array -> arr
val copy_row_to : arr -> arr -> int -> unit
val copy_col_to : arr -> arr -> int -> unit
val diag : ?k:int -> arr -> arr
val transpose : ?axis:int array -> arr -> arr
val to_rows : arr -> arr array
val of_rows : arr array -> arr
val to_cols : arr -> arr array
val of_cols : arr array -> arr
val of_array : elt array -> int array -> arr
val of_arrays : elt array array -> arr
Create N-dimensional array
val linspace : elt -> elt -> int -> arr

linspace k 0. 9. 10 ...

val logspace : ?base:float -> elt -> elt -> int -> arr

logspace k 0. 9. 10 ...

val unit_basis : int -> int -> arr

unit_basis k n i returns a unit basis vector with ith element set to 1.

Obtain basic properties
val num_dims : arr -> int
val nth_dim : arr -> int -> int
val nnz : arr -> int
val density : arr -> float
val size_in_bytes : arr -> int
val same_shape : arr -> arr -> bool
val same_data : arr -> arr -> bool
val ind : arr -> int -> int array
val i1d : arr -> int array -> int
Manipulate a N-dimensional array
val get_index : arr -> int array array -> elt array
val set_index : arr -> int array array -> elt array -> unit
val get_fancy : Owl_types.index list -> arr -> arr
val set_fancy : Owl_types.index list -> arr -> arr -> unit
val sub_left : arr -> int -> int -> arr
val sub_ndarray : int array -> arr -> arr array
val slice_left : arr -> int array -> arr
val fill : arr -> elt -> unit
val resize : ?head:bool -> arr -> int array -> arr
val flip : ?axis:int -> arr -> arr
val rotate : arr -> int -> arr
val swap : int -> int -> arr -> arr
val concat_vertical : arr -> arr -> arr
val concat_horizontal : arr -> arr -> arr
val concat_vh : arr array array -> arr
val split_vh : (int * int) array array -> arr -> arr array array
val dropout : ?rate:float -> arr -> arr
val top : arr -> int -> int array array
val bottom : arr -> int -> int array array
val sort : arr -> arr
val sort1 : ?axis:int -> arr -> arr
val argsort : - arr -> - (int64, Stdlib.Bigarray.int64_elt, Stdlib.Bigarray.c_layout) - Stdlib.Bigarray.Genarray.t
val mmap : Unix.file_descr -> ?pos:int64 -> bool -> int array -> arr
Iterate array elements
val iter2i : (int -> elt -> elt -> unit) -> arr -> arr -> unit
val iter2 : (elt -> elt -> unit) -> arr -> arr -> unit
val map2i : (int -> elt -> elt -> elt) -> arr -> arr -> arr
val map2 : (elt -> elt -> elt) -> arr -> arr -> arr
val iteri_nd : (int array -> elt -> unit) -> arr -> unit
val mapi_nd : (int array -> elt -> elt) -> arr -> arr
val foldi_nd : - ?axis:int -> - (int array -> elt -> elt -> elt) -> - elt -> - arr -> - arr
val scani_nd : ?axis:int -> (int array -> elt -> elt -> elt) -> arr -> arr
val filteri_nd : (int array -> elt -> bool) -> arr -> int array array
val iter2i_nd : (int array -> elt -> elt -> unit) -> arr -> arr -> unit
val map2i_nd : (int array -> elt -> elt -> elt) -> arr -> arr -> arr
val iteri_slice : ?axis:int -> (int -> arr -> unit) -> arr -> unit
val iter_slice : ?axis:int -> (arr -> unit) -> arr -> unit
val mapi_slice : ?axis:int -> (int -> arr -> 'c) -> arr -> 'c array
val map_slice : ?axis:int -> (arr -> 'c) -> arr -> 'c array
val filteri_slice : ?axis:int -> (int -> arr -> bool) -> arr -> arr array
val filter_slice : ?axis:int -> (arr -> bool) -> arr -> arr array
val foldi_slice : ?axis:int -> (int -> 'c -> arr -> 'c) -> 'c -> arr -> 'c
val fold_slice : ?axis:int -> ('c -> arr -> 'c) -> 'c -> arr -> 'c
Examine array elements or compare two arrays
Input/Output functions
val to_array : arr -> elt array
val save : out:string -> arr -> unit
val load : string -> arr
val save_npy : out:string -> arr -> unit
val load_npy : string -> arr
Unary mathematical operations
val prod : ?axis:int -> ?keep_dims:bool -> arr -> arr
val prod' : arr -> elt
val mean : ?axis:int -> ?keep_dims:bool -> arr -> arr
val mean' : arr -> elt
val median' : arr -> elt
val median : ?axis:int -> ?keep_dims:bool -> arr -> arr
val var : ?axis:int -> ?keep_dims:bool -> arr -> arr
val var' : arr -> elt
val std : ?axis:int -> ?keep_dims:bool -> arr -> arr
val std' : arr -> elt
val sem : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sem' : arr -> elt
val minmax : ?axis:int -> ?keep_dims:bool -> arr -> arr * arr
val minmax' : arr -> elt * elt
val min_i : arr -> elt * int array
val max_i : arr -> elt * int array
val minmax_i : arr -> (elt * int array) * (elt * int array)
val abs2 : arr -> arr
val conj : arr -> arr
val reci : arr -> arr
val reci_tol : ?tol:elt -> arr -> arr
val cbrt : arr -> arr
val exp2 : arr -> arr
val exp10 : arr -> arr
val expm1 : arr -> arr
val log1p : arr -> arr
val trunc : arr -> arr
val fix : arr -> arr
val modf : arr -> arr * arr
val l1norm : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l2norm : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l2norm_sqr : ?axis:int -> ?keep_dims:bool -> arr -> arr
val vecnorm : ?axis:int -> ?p:float -> ?keep_dims:bool -> arr -> arr
val vecnorm' : ?p:float -> arr -> elt
val cumsum : ?axis:int -> arr -> arr
val cumprod : ?axis:int -> arr -> arr
val cummin : ?axis:int -> arr -> arr
val cummax : ?axis:int -> arr -> arr
val diff : ?axis:int -> ?n:int -> arr -> arr
val lgamma : arr -> arr
Binary mathematical operations
val min2 : arr -> arr -> arr
val max2 : arr -> arr -> arr
val ssqr' : arr -> elt -> elt
val ssqr_diff' : arr -> arr -> elt
Tensor Calculus
val contract1 : (int * int) array -> arr -> arr
val contract2 : (int * int) array -> arr -> arr -> arr
Experimental functions
val slide : ?axis:int -> ?ofs:int -> ?step:int -> window:int -> arr -> arr
Functions of in-place modification
val create_ : out:arr -> elt -> unit
val uniform_ : ?a:elt -> ?b:elt -> out:arr -> unit
val gaussian_ : ?mu:elt -> ?sigma:elt -> out:arr -> unit
val sequential_ : ?a:elt -> ?step:elt -> out:arr -> unit
val bernoulli_ : ?p:float -> out:arr -> unit
val zeros_ : out:arr -> unit
val ones_ : out:arr -> unit
val sort_ : arr -> unit
val one_hot_ : out:arr -> int -> arr -> unit
val get_fancy_ : out:arr -> Owl_types.index list -> arr -> unit
val set_fancy_ : out:arr -> Owl_types.index list -> arr -> arr -> unit
val get_slice_ : out:arr -> int list list -> arr -> unit
val set_slice_ : out:arr -> int list list -> arr -> arr -> unit
val reshape_ : out:arr -> arr -> unit
val reverse_ : out:arr -> arr -> unit
val transpose_ : out:arr -> ?axis:int array -> arr -> unit
val repeat_ : out:arr -> arr -> int array -> unit
val tile_ : out:arr -> arr -> int array -> unit
val pad_ : out:arr -> ?v:elt -> int list list -> arr -> unit
val sum_ : out:arr -> axis:int -> arr -> unit
val min_ : out:arr -> axis:int -> arr -> unit
val max_ : out:arr -> axis:int -> arr -> unit
val add_ : ?out:arr -> arr -> arr -> unit
val sub_ : ?out:arr -> arr -> arr -> unit
val mul_ : ?out:arr -> arr -> arr -> unit
val div_ : ?out:arr -> arr -> arr -> unit
val pow_ : ?out:arr -> arr -> arr -> unit
val atan2_ : ?out:arr -> arr -> arr -> unit
val hypot_ : ?out:arr -> arr -> arr -> unit
val fmod_ : ?out:arr -> arr -> arr -> unit
val min2_ : ?out:arr -> arr -> arr -> unit
val max2_ : ?out:arr -> arr -> arr -> unit
val add_scalar_ : ?out:arr -> arr -> elt -> unit
val sub_scalar_ : ?out:arr -> arr -> elt -> unit
val mul_scalar_ : ?out:arr -> arr -> elt -> unit
val div_scalar_ : ?out:arr -> arr -> elt -> unit
val pow_scalar_ : ?out:arr -> arr -> elt -> unit
val atan2_scalar_ : ?out:arr -> arr -> elt -> unit
val fmod_scalar_ : ?out:arr -> arr -> elt -> unit
val scalar_add_ : ?out:arr -> elt -> arr -> unit
val scalar_sub_ : ?out:arr -> elt -> arr -> unit
val scalar_mul_ : ?out:arr -> elt -> arr -> unit
val scalar_div_ : ?out:arr -> elt -> arr -> unit
val scalar_pow_ : ?out:arr -> elt -> arr -> unit
val scalar_atan2_ : ?out:arr -> elt -> arr -> unit
val scalar_fmod_ : ?out:arr -> elt -> arr -> unit
val fma_ : ?out:arr -> arr -> arr -> arr -> unit
val clip_by_value_ : ?out:arr -> ?amin:elt -> ?amax:elt -> arr -> unit
val clip_by_l2norm_ : ?out:arr -> elt -> arr -> unit
val dot_ : - ?transa:bool -> - ?transb:bool -> - ?alpha:elt -> - ?beta:elt -> - c:arr -> - arr -> - arr -> - unit
val conj_ : ?out:arr -> arr -> unit
val abs_ : ?out:arr -> arr -> unit
val neg_ : ?out:arr -> arr -> unit
val reci_ : ?out:arr -> arr -> unit
val signum_ : ?out:arr -> arr -> unit
val sqr_ : ?out:arr -> arr -> unit
val sqrt_ : ?out:arr -> arr -> unit
val cbrt_ : ?out:arr -> arr -> unit
val exp_ : ?out:arr -> arr -> unit
val exp2_ : ?out:arr -> arr -> unit
val exp10_ : ?out:arr -> arr -> unit
val expm1_ : ?out:arr -> arr -> unit
val log_ : ?out:arr -> arr -> unit
val log2_ : ?out:arr -> arr -> unit
val log10_ : ?out:arr -> arr -> unit
val log1p_ : ?out:arr -> arr -> unit
val sin_ : ?out:arr -> arr -> unit
val cos_ : ?out:arr -> arr -> unit
val tan_ : ?out:arr -> arr -> unit
val asin_ : ?out:arr -> arr -> unit
val acos_ : ?out:arr -> arr -> unit
val atan_ : ?out:arr -> arr -> unit
val sinh_ : ?out:arr -> arr -> unit
val cosh_ : ?out:arr -> arr -> unit
val tanh_ : ?out:arr -> arr -> unit
val asinh_ : ?out:arr -> arr -> unit
val acosh_ : ?out:arr -> arr -> unit
val atanh_ : ?out:arr -> arr -> unit
val floor_ : ?out:arr -> arr -> unit
val ceil_ : ?out:arr -> arr -> unit
val round_ : ?out:arr -> arr -> unit
val trunc_ : ?out:arr -> arr -> unit
val fix_ : ?out:arr -> arr -> unit
val erf_ : ?out:arr -> arr -> unit
val erfc_ : ?out:arr -> arr -> unit
val relu_ : ?out:arr -> arr -> unit
val softplus_ : ?out:arr -> arr -> unit
val softsign_ : ?out:arr -> arr -> unit
val sigmoid_ : ?out:arr -> arr -> unit
val softmax_ : ?out:arr -> ?axis:int -> arr -> unit
val cumsum_ : ?out:arr -> ?axis:int -> arr -> unit
val cumprod_ : ?out:arr -> ?axis:int -> arr -> unit
val cummin_ : ?out:arr -> ?axis:int -> arr -> unit
val cummax_ : ?out:arr -> ?axis:int -> arr -> unit
val dropout_ : ?out:arr -> ?rate:float -> arr -> unit
val elt_equal_ : ?out:arr -> arr -> arr -> unit
val elt_not_equal_ : ?out:arr -> arr -> arr -> unit
val elt_less_ : ?out:arr -> arr -> arr -> unit
val elt_greater_ : ?out:arr -> arr -> arr -> unit
val elt_less_equal_ : ?out:arr -> arr -> arr -> unit
val elt_greater_equal_ : ?out:arr -> arr -> arr -> unit
val elt_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_not_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_less_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_greater_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_less_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_greater_equal_scalar_ : ?out:arr -> arr -> elt -> unit

Matrix functions

val col : arr -> int -> arr
val cols : arr -> int array -> arr
val to_arrays : arr -> elt array array
val draw_rows : ?replacement:bool -> arr -> int -> arr * int array
val draw_cols : ?replacement:bool -> arr -> int -> arr * int array
val draw_rows2 : - ?replacement:bool -> - arr -> - arr -> - int -> - arr * arr * int array
val draw_cols2 : - ?replacement:bool -> - arr -> - arr -> - int -> - arr * arr * int array
include Owl_dense_ndarray_intf.Real with type elt := elt and type arr := arr
include Owl_base_dense_ndarray_intf.Real - with type elt := elt - with type arr := arr
val sum_slices : ?axis:int -> arr -> arr
val signum : arr -> arr
val relu : arr -> arr
val dawsn : arr -> arr
val l1norm' : arr -> elt
val l2norm' : arr -> elt
val l2norm_sqr' : arr -> elt
val clip_by_value : ?amin:elt -> ?amax:elt -> arr -> arr
val clip_by_l2norm : elt -> arr -> arr
val atan2 : arr -> arr -> arr
val approx_equal : ?eps:float -> arr -> arr -> bool
val approx_equal_scalar : ?eps:float -> arr -> float -> bool
val approx_elt_equal : ?eps:float -> arr -> arr -> arr
val approx_elt_equal_scalar : ?eps:float -> arr -> float -> arr
val dot : arr -> arr -> arr
val trace : arr -> elt
Helper functions
val float_to_elt : float -> elt
val elt_to_float : elt -> float
Real operations
val i0 : arr -> arr
val i0e : arr -> arr
val i1 : arr -> arr
val i1e : arr -> arr
val iv : v:arr -> arr -> arr
val scalar_iv : v:elt -> arr -> arr
val iv_scalar : v:arr -> elt -> arr
val j0 : arr -> arr
val j1 : arr -> arr
val jv : v:arr -> arr -> arr
val scalar_jv : v:elt -> arr -> arr
val jv_scalar : v:arr -> elt -> arr
val erf : arr -> arr
val erfc : arr -> arr
val logistic : arr -> arr
val elu : ?alpha:elt -> arr -> arr
val leaky_relu : ?alpha:elt -> arr -> arr
val softplus : arr -> arr
val softsign : arr -> arr
val softmax : ?axis:int -> arr -> arr
val sigmoid : arr -> arr
val log_sum_exp' : arr -> float
val log_sum_exp : ?axis:int -> ?keep_dims:bool -> arr -> arr
val scalar_atan2 : elt -> arr -> arr
val atan2_scalar : arr -> elt -> arr
val hypot : arr -> arr -> arr
val fmod : arr -> arr -> arr
val fmod_scalar : arr -> elt -> arr
val scalar_fmod : elt -> arr -> arr
val cross_entropy' : arr -> arr -> float
val fused_adagrad_ : ?out:arr -> rate:float -> eps:float -> arr -> unit
val poisson : mu:elt -> int array -> arr
val poisson_ : mu:elt -> out:arr -> unit
include Owl_dense_ndarray_intf.NN with type arr := arr
include Owl_base_dense_ndarray_intf.NN with type arr := arr
val conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val dilated_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val transpose_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val max_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val upsampling2d : arr -> int array -> arr
val conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val dilated_conv1d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv1d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val transpose_conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val max_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val upsampling2d_backward : arr -> int array -> arr -> arr
val max_pool2d_argmax : - ?padding:Owl_types.padding -> - arr -> - int array -> - int array -> - arr - * (int64, Stdlib.Bigarray.int64_elt, Stdlib.Bigarray.c_layout) - Stdlib.Bigarray.Genarray.t
val conv1d_ : - out:arr -> - ?padding:Owl_types.padding -> - arr -> - arr -> - int array -> - unit
val conv2d_ : - out:arr -> - ?padding:Owl_types.padding -> - arr -> - arr -> - int array -> - unit
val conv3d_ : - out:arr -> - ?padding:Owl_types.padding -> - arr -> - arr -> - int array -> - unit
val dilated_conv1d_ : - out:arr -> - ?padding:Owl_types.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val dilated_conv2d_ : - out:arr -> - ?padding:Owl_types.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val dilated_conv3d_ : - out:arr -> - ?padding:Owl_types.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val transpose_conv1d_ : - out:arr -> - ?padding:Owl_types.padding -> - arr -> - arr -> - int array -> - unit
val transpose_conv2d_ : - out:arr -> - ?padding:Owl_types.padding -> - arr -> - arr -> - int array -> - unit
val transpose_conv3d_ : - out:arr -> - ?padding:Owl_types.padding -> - arr -> - arr -> - int array -> - unit
val max_pool1d_ : - out:arr -> - ?padding:Owl_types.padding -> - arr -> - int array -> - int array -> - unit
val max_pool2d_ : - out:arr -> - ?padding:Owl_types.padding -> - arr -> - int array -> - int array -> - unit
val max_pool3d_ : - out:arr -> - ?padding:Owl_types.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool1d_ : - out:arr -> - ?padding:Owl_types.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool2d_ : - out:arr -> - ?padding:Owl_types.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool3d_ : - out:arr -> - ?padding:Owl_types.padding -> - arr -> - int array -> - int array -> - unit
val upsampling2d_ : out:arr -> arr -> int array -> unit
val conv1d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv1d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv2d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv2d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv3d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv3d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val dilated_conv1d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv1d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv2d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv2d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv3d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv3d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val transpose_conv1d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv1d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv2d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv2d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv3d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv3d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val max_pool1d_backward_ : - out:arr -> - Owl_types.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val max_pool2d_backward_ : - out:arr -> - Owl_types.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val max_pool3d_backward_ : - out:arr -> - Owl_types.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool1d_backward_ : - out:arr -> - Owl_types.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool2d_backward_ : - out:arr -> - Owl_types.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool3d_backward_ : - out:arr -> - Owl_types.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val upsampling2d_backward_ : out:arr -> arr -> int array -> arr -> unit
include module type of struct include Operator end
include sig ... end
val (+$) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (-$) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (*$) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (/$) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t
val ($+) : - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t
val ($-) : - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t
val ($*) : - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t
val ($/) : - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (=) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - bool
val (!=) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - bool
val (<>) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - bool
val (>) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - bool
val (<) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - bool
val (>=) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - bool
val (<=) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - bool
include sig ... end
val (=$) : ('a, 'b) Owl_dense_ndarray_generic.t -> 'a -> bool
val (!=$) : ('a, 'b) Owl_dense_ndarray_generic.t -> 'a -> bool
val (<>$) : ('a, 'b) Owl_dense_ndarray_generic.t -> 'a -> bool
val (<$) : ('a, 'b) Owl_dense_ndarray_generic.t -> 'a -> bool
val (>$) : ('a, 'b) Owl_dense_ndarray_generic.t -> 'a -> bool
val (<=$) : ('a, 'b) Owl_dense_ndarray_generic.t -> 'a -> bool
val (>=$) : ('a, 'b) Owl_dense_ndarray_generic.t -> 'a -> bool
val (=.$) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (!=.$) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (<>.$) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (<.$) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (>.$) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (<=.$) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (>=.$) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (=~) : - ?eps:float -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - bool
val (=~$) : ?eps:float -> ('a, 'b) Owl_dense_ndarray_generic.t -> 'a -> bool
val (=~.) : - ?eps:float -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (=~.$) : - ?eps:float -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (%) : - (float, 'a) Owl_dense_ndarray_generic.t -> - (float, 'a) Owl_dense_ndarray_generic.t -> - (float, 'a) Owl_dense_ndarray_generic.t
val (%$) : - (float, 'a) Owl_dense_ndarray_generic.t -> - float -> - (float, 'a) Owl_dense_ndarray_generic.t
val (**) : - (float, 'a) Owl_dense_ndarray_generic.t -> - (float, 'a) Owl_dense_ndarray_generic.t -> - (float, 'a) Owl_dense_ndarray_generic.t
val ($**) : - float -> - (float, 'a) Owl_dense_ndarray_generic.t -> - (float, 'a) Owl_dense_ndarray_generic.t
val (**$) : - (float, 'a) Owl_dense_ndarray_generic.t -> - float -> - (float, 'a) Owl_dense_ndarray_generic.t
val (+=) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - unit
val (-=) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - unit
val (*=) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - unit
val (/=) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - unit
val (+$=) : ('a, 'b) Owl_dense_ndarray_generic.t -> 'a -> unit
val (-$=) : ('a, 'b) Owl_dense_ndarray_generic.t -> 'a -> unit
val (*$=) : ('a, 'b) Owl_dense_ndarray_generic.t -> 'a -> unit
val (/$=) : ('a, 'b) Owl_dense_ndarray_generic.t -> 'a -> unit
val (.!{;..}) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - Owl_types.index array -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (.!{;..}<-) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - Owl_types.index array -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - unit
val (.${}) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - int list -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (.${;..}) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - int list array -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (.${}<-) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - int list -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - unit
val (.${;..}<-) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - int list array -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - unit
include sig ... end
val (.%{}) : ('a, 'b) Owl_dense_ndarray_generic.t -> int -> 'a
val (.%{;..}) : ('a, 'b) Owl_dense_ndarray_generic.t -> int array -> 'a
val (.%{}<-) : ('a, 'b) Owl_dense_ndarray_generic.t -> int -> 'a -> unit
val (.%{;..}<-) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - int array -> - 'a -> - unit
val mpow : Owl_linalg_s.mat -> float -> Owl_linalg_s.mat
\ No newline at end of file diff --git a/owl/Owl_dense_ndarray/Z/index.html b/owl/Owl_dense_ndarray/Z/index.html deleted file mode 100644 index 1c42942cf..000000000 --- a/owl/Owl_dense_ndarray/Z/index.html +++ /dev/null @@ -1,582 +0,0 @@ - -Z (owl.Owl_dense_ndarray.Z)

Module Owl_dense_ndarray.Z

include module type of struct include Owl_dense_ndarray_z end
type elt = Stdlib.Complex.t
type arr = - (Stdlib.Complex.t, Stdlib.Bigarray.complex64_elt, Stdlib.Bigarray.c_layout) - Stdlib.Bigarray.Genarray.t
type cast_arr = - (float, Stdlib.Bigarray.float64_elt, Stdlib.Bigarray.c_layout) - Stdlib.Bigarray.Genarray.t
include Owl_dense_ndarray_intf.Common with type elt := elt and type arr := arr
include Owl_base_dense_ndarray_intf.Common - with type elt := elt - with type arr := arr
val empty : int array -> arr
val zeros : int array -> arr
val ones : int array -> arr
val create : int array -> elt -> arr
val init : int array -> (int -> elt) -> arr
val init_nd : int array -> (int array -> elt) -> arr
val sequential : ?a:elt -> ?step:elt -> int array -> arr
val uniform : ?a:elt -> ?b:elt -> int array -> arr
val gaussian : ?mu:elt -> ?sigma:elt -> int array -> arr
val bernoulli : ?p:float -> int array -> arr
val shape : arr -> int array
val numel : arr -> int
val strides : arr -> int array

Refer to :doc:`owl_dense_ndarray_generic`

val slice_size : arr -> int array

Refer to :doc:`owl_dense_ndarray_generic`

val get : arr -> int array -> elt
val set : arr -> int array -> elt -> unit
val get_slice : int list list -> arr -> arr
val set_slice : int list list -> arr -> arr -> unit
val copy : arr -> arr
val copy_ : out:arr -> arr -> unit
val reset : arr -> unit
val reshape : arr -> int array -> arr
val flatten : arr -> arr
val reverse : arr -> arr
val tile : arr -> int array -> arr
val repeat : arr -> int array -> arr
val concatenate : ?axis:int -> arr array -> arr
val stack : ?axis:int -> arr array -> arr
val squeeze : ?axis:int array -> arr -> arr
val expand : ?hi:bool -> arr -> int -> arr
val split : ?axis:int -> int array -> arr -> arr array
val draw : ?axis:int -> arr -> int -> arr * int array
val pad : ?v:elt -> int list list -> arr -> arr
val one_hot : int -> arr -> arr
val print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(elt -> string) -> - arr -> - unit
val abs : arr -> arr
val neg : arr -> arr
val floor : arr -> arr
val ceil : arr -> arr
val round : arr -> arr
val sqr : arr -> arr
val sqrt : arr -> arr
val log : arr -> arr
val log2 : arr -> arr
val log10 : arr -> arr
val exp : arr -> arr
val sin : arr -> arr
val cos : arr -> arr
val tan : arr -> arr
val sinh : arr -> arr
val cosh : arr -> arr
val tanh : arr -> arr
val asin : arr -> arr
val acos : arr -> arr
val atan : arr -> arr
val asinh : arr -> arr
val acosh : arr -> arr
val atanh : arr -> arr
val min : ?axis:int -> ?keep_dims:bool -> arr -> arr
val max : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum_reduce : ?axis:int array -> arr -> arr
val min' : arr -> elt
val max' : arr -> elt
val pow : arr -> arr -> arr
val scalar_pow : elt -> arr -> arr
val pow_scalar : arr -> elt -> arr
val add : arr -> arr -> arr
val sub : arr -> arr -> arr
val mul : arr -> arr -> arr
val div : arr -> arr -> arr
val add_scalar : arr -> elt -> arr
val sub_scalar : arr -> elt -> arr
val mul_scalar : arr -> elt -> arr
val div_scalar : arr -> elt -> arr
val scalar_add : elt -> arr -> arr
val scalar_sub : elt -> arr -> arr
val scalar_mul : elt -> arr -> arr
val scalar_div : elt -> arr -> arr
val fma : arr -> arr -> arr -> arr
Iterate array elements
val iteri : (int -> elt -> unit) -> arr -> unit
val iter : (elt -> unit) -> arr -> unit
val mapi : (int -> elt -> elt) -> arr -> arr
val map : (elt -> elt) -> arr -> arr
val filteri : (int -> elt -> bool) -> arr -> int array
val filter : (elt -> bool) -> arr -> int array
val foldi : ?axis:int -> (int -> elt -> elt -> elt) -> elt -> arr -> arr
val fold : ?axis:int -> (elt -> elt -> elt) -> elt -> arr -> arr
val scani : ?axis:int -> (int -> elt -> elt -> elt) -> arr -> arr
val scan : ?axis:int -> (elt -> elt -> elt) -> arr -> arr
Examination & Comparison
val exists : (elt -> bool) -> arr -> bool
val not_exists : (elt -> bool) -> arr -> bool
val for_all : (elt -> bool) -> arr -> bool
val is_zero : arr -> bool
val is_positive : arr -> bool
val is_negative : arr -> bool
val is_nonpositive : arr -> bool
val is_nonnegative : arr -> bool
val is_normal : arr -> bool
val not_nan : arr -> bool
val not_inf : arr -> bool
val equal : arr -> arr -> bool
val not_equal : arr -> arr -> bool
val greater : arr -> arr -> bool
val less : arr -> arr -> bool
val greater_equal : arr -> arr -> bool
val less_equal : arr -> arr -> bool
val elt_equal : arr -> arr -> arr
val elt_not_equal : arr -> arr -> arr
val elt_less : arr -> arr -> arr
val elt_greater : arr -> arr -> arr
val elt_less_equal : arr -> arr -> arr
val elt_greater_equal : arr -> arr -> arr
val equal_scalar : arr -> elt -> bool
val not_equal_scalar : arr -> elt -> bool
val less_scalar : arr -> elt -> bool
val greater_scalar : arr -> elt -> bool
val less_equal_scalar : arr -> elt -> bool
val greater_equal_scalar : arr -> elt -> bool
val elt_equal_scalar : arr -> elt -> arr
val elt_not_equal_scalar : arr -> elt -> arr
val elt_less_scalar : arr -> elt -> arr
val elt_greater_scalar : arr -> elt -> arr
val elt_less_equal_scalar : arr -> elt -> arr
val elt_greater_equal_scalar : arr -> elt -> arr
val row_num : arr -> int
val col_num : arr -> int
val row : arr -> int -> arr
val rows : arr -> int array -> arr
val copy_row_to : arr -> arr -> int -> unit
val copy_col_to : arr -> arr -> int -> unit
val diag : ?k:int -> arr -> arr
val transpose : ?axis:int array -> arr -> arr
val to_rows : arr -> arr array
val of_rows : arr array -> arr
val to_cols : arr -> arr array
val of_cols : arr array -> arr
val of_array : elt array -> int array -> arr
val of_arrays : elt array array -> arr
Create N-dimensional array
val linspace : elt -> elt -> int -> arr

linspace k 0. 9. 10 ...

val logspace : ?base:float -> elt -> elt -> int -> arr

logspace k 0. 9. 10 ...

val unit_basis : int -> int -> arr

unit_basis k n i returns a unit basis vector with ith element set to 1.

Obtain basic properties
val num_dims : arr -> int
val nth_dim : arr -> int -> int
val nnz : arr -> int
val density : arr -> float
val size_in_bytes : arr -> int
val same_shape : arr -> arr -> bool
val same_data : arr -> arr -> bool
val ind : arr -> int -> int array
val i1d : arr -> int array -> int
Manipulate a N-dimensional array
val get_index : arr -> int array array -> elt array
val set_index : arr -> int array array -> elt array -> unit
val get_fancy : Owl_types.index list -> arr -> arr
val set_fancy : Owl_types.index list -> arr -> arr -> unit
val sub_left : arr -> int -> int -> arr
val sub_ndarray : int array -> arr -> arr array
val slice_left : arr -> int array -> arr
val fill : arr -> elt -> unit
val resize : ?head:bool -> arr -> int array -> arr
val flip : ?axis:int -> arr -> arr
val rotate : arr -> int -> arr
val swap : int -> int -> arr -> arr
val concat_vertical : arr -> arr -> arr
val concat_horizontal : arr -> arr -> arr
val concat_vh : arr array array -> arr
val split_vh : (int * int) array array -> arr -> arr array array
val dropout : ?rate:float -> arr -> arr
val top : arr -> int -> int array array
val bottom : arr -> int -> int array array
val sort : arr -> arr
val sort1 : ?axis:int -> arr -> arr
val argsort : - arr -> - (int64, Stdlib.Bigarray.int64_elt, Stdlib.Bigarray.c_layout) - Stdlib.Bigarray.Genarray.t
val mmap : Unix.file_descr -> ?pos:int64 -> bool -> int array -> arr
Iterate array elements
val iter2i : (int -> elt -> elt -> unit) -> arr -> arr -> unit
val iter2 : (elt -> elt -> unit) -> arr -> arr -> unit
val map2i : (int -> elt -> elt -> elt) -> arr -> arr -> arr
val map2 : (elt -> elt -> elt) -> arr -> arr -> arr
val iteri_nd : (int array -> elt -> unit) -> arr -> unit
val mapi_nd : (int array -> elt -> elt) -> arr -> arr
val foldi_nd : - ?axis:int -> - (int array -> elt -> elt -> elt) -> - elt -> - arr -> - arr
val scani_nd : ?axis:int -> (int array -> elt -> elt -> elt) -> arr -> arr
val filteri_nd : (int array -> elt -> bool) -> arr -> int array array
val iter2i_nd : (int array -> elt -> elt -> unit) -> arr -> arr -> unit
val map2i_nd : (int array -> elt -> elt -> elt) -> arr -> arr -> arr
val iteri_slice : ?axis:int -> (int -> arr -> unit) -> arr -> unit
val iter_slice : ?axis:int -> (arr -> unit) -> arr -> unit
val mapi_slice : ?axis:int -> (int -> arr -> 'c) -> arr -> 'c array
val map_slice : ?axis:int -> (arr -> 'c) -> arr -> 'c array
val filteri_slice : ?axis:int -> (int -> arr -> bool) -> arr -> arr array
val filter_slice : ?axis:int -> (arr -> bool) -> arr -> arr array
val foldi_slice : ?axis:int -> (int -> 'c -> arr -> 'c) -> 'c -> arr -> 'c
val fold_slice : ?axis:int -> ('c -> arr -> 'c) -> 'c -> arr -> 'c
Examine array elements or compare two arrays
val approx_equal : ?eps:float -> arr -> arr -> bool
val approx_equal_scalar : ?eps:float -> arr -> elt -> bool
val approx_elt_equal : ?eps:float -> arr -> arr -> arr
val approx_elt_equal_scalar : ?eps:float -> arr -> elt -> arr
Input/Output functions
val to_array : arr -> elt array
val save : out:string -> arr -> unit
val load : string -> arr
val save_npy : out:string -> arr -> unit
val load_npy : string -> arr
Unary mathematical operations
val prod : ?axis:int -> ?keep_dims:bool -> arr -> arr
val prod' : arr -> elt
val mean : ?axis:int -> ?keep_dims:bool -> arr -> arr
val mean' : arr -> elt
val median' : arr -> elt
val median : ?axis:int -> ?keep_dims:bool -> arr -> arr
val var : ?axis:int -> ?keep_dims:bool -> arr -> arr
val var' : arr -> elt
val std : ?axis:int -> ?keep_dims:bool -> arr -> arr
val std' : arr -> elt
val sem : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sem' : arr -> elt
val minmax : ?axis:int -> ?keep_dims:bool -> arr -> arr * arr
val minmax' : arr -> elt * elt
val min_i : arr -> elt * int array
val max_i : arr -> elt * int array
val minmax_i : arr -> (elt * int array) * (elt * int array)
val abs2 : arr -> arr
val conj : arr -> arr
val reci : arr -> arr
val reci_tol : ?tol:elt -> arr -> arr
val cbrt : arr -> arr
val exp2 : arr -> arr
val exp10 : arr -> arr
val expm1 : arr -> arr
val log1p : arr -> arr
val trunc : arr -> arr
val fix : arr -> arr
val modf : arr -> arr * arr
val l1norm : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm' : arr -> elt
val l2norm : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l2norm' : arr -> elt
val l2norm_sqr : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l2norm_sqr' : arr -> elt
val vecnorm : ?axis:int -> ?p:float -> ?keep_dims:bool -> arr -> arr
val vecnorm' : ?p:float -> arr -> elt
val cumsum : ?axis:int -> arr -> arr
val cumprod : ?axis:int -> arr -> arr
val cummin : ?axis:int -> arr -> arr
val cummax : ?axis:int -> arr -> arr
val diff : ?axis:int -> ?n:int -> arr -> arr
val lgamma : arr -> arr
Binary mathematical operations
val min2 : arr -> arr -> arr
val max2 : arr -> arr -> arr
val ssqr' : arr -> elt -> elt
val ssqr_diff' : arr -> arr -> elt
val clip_by_value : ?amin:elt -> ?amax:elt -> arr -> arr
val clip_by_l2norm : elt -> arr -> arr
Tensor Calculus
val contract1 : (int * int) array -> arr -> arr
val contract2 : (int * int) array -> arr -> arr -> arr
Experimental functions
val sum_slices : ?axis:int -> arr -> arr
val slide : ?axis:int -> ?ofs:int -> ?step:int -> window:int -> arr -> arr
Functions of in-place modification
val create_ : out:arr -> elt -> unit
val uniform_ : ?a:elt -> ?b:elt -> out:arr -> unit
val gaussian_ : ?mu:elt -> ?sigma:elt -> out:arr -> unit
val sequential_ : ?a:elt -> ?step:elt -> out:arr -> unit
val bernoulli_ : ?p:float -> out:arr -> unit
val zeros_ : out:arr -> unit
val ones_ : out:arr -> unit
val sort_ : arr -> unit
val one_hot_ : out:arr -> int -> arr -> unit
val get_fancy_ : out:arr -> Owl_types.index list -> arr -> unit
val set_fancy_ : out:arr -> Owl_types.index list -> arr -> arr -> unit
val get_slice_ : out:arr -> int list list -> arr -> unit
val set_slice_ : out:arr -> int list list -> arr -> arr -> unit
val reshape_ : out:arr -> arr -> unit
val reverse_ : out:arr -> arr -> unit
val transpose_ : out:arr -> ?axis:int array -> arr -> unit
val repeat_ : out:arr -> arr -> int array -> unit
val tile_ : out:arr -> arr -> int array -> unit
val pad_ : out:arr -> ?v:elt -> int list list -> arr -> unit
val sum_ : out:arr -> axis:int -> arr -> unit
val min_ : out:arr -> axis:int -> arr -> unit
val max_ : out:arr -> axis:int -> arr -> unit
val add_ : ?out:arr -> arr -> arr -> unit
val sub_ : ?out:arr -> arr -> arr -> unit
val mul_ : ?out:arr -> arr -> arr -> unit
val div_ : ?out:arr -> arr -> arr -> unit
val pow_ : ?out:arr -> arr -> arr -> unit
val atan2_ : ?out:arr -> arr -> arr -> unit
val hypot_ : ?out:arr -> arr -> arr -> unit
val fmod_ : ?out:arr -> arr -> arr -> unit
val min2_ : ?out:arr -> arr -> arr -> unit
val max2_ : ?out:arr -> arr -> arr -> unit
val add_scalar_ : ?out:arr -> arr -> elt -> unit
val sub_scalar_ : ?out:arr -> arr -> elt -> unit
val mul_scalar_ : ?out:arr -> arr -> elt -> unit
val div_scalar_ : ?out:arr -> arr -> elt -> unit
val pow_scalar_ : ?out:arr -> arr -> elt -> unit
val atan2_scalar_ : ?out:arr -> arr -> elt -> unit
val fmod_scalar_ : ?out:arr -> arr -> elt -> unit
val scalar_add_ : ?out:arr -> elt -> arr -> unit
val scalar_sub_ : ?out:arr -> elt -> arr -> unit
val scalar_mul_ : ?out:arr -> elt -> arr -> unit
val scalar_div_ : ?out:arr -> elt -> arr -> unit
val scalar_pow_ : ?out:arr -> elt -> arr -> unit
val scalar_atan2_ : ?out:arr -> elt -> arr -> unit
val scalar_fmod_ : ?out:arr -> elt -> arr -> unit
val fma_ : ?out:arr -> arr -> arr -> arr -> unit
val clip_by_value_ : ?out:arr -> ?amin:elt -> ?amax:elt -> arr -> unit
val clip_by_l2norm_ : ?out:arr -> elt -> arr -> unit
val dot_ : - ?transa:bool -> - ?transb:bool -> - ?alpha:elt -> - ?beta:elt -> - c:arr -> - arr -> - arr -> - unit
val conj_ : ?out:arr -> arr -> unit
val abs_ : ?out:arr -> arr -> unit
val neg_ : ?out:arr -> arr -> unit
val reci_ : ?out:arr -> arr -> unit
val signum_ : ?out:arr -> arr -> unit
val sqr_ : ?out:arr -> arr -> unit
val sqrt_ : ?out:arr -> arr -> unit
val cbrt_ : ?out:arr -> arr -> unit
val exp_ : ?out:arr -> arr -> unit
val exp2_ : ?out:arr -> arr -> unit
val exp10_ : ?out:arr -> arr -> unit
val expm1_ : ?out:arr -> arr -> unit
val log_ : ?out:arr -> arr -> unit
val log2_ : ?out:arr -> arr -> unit
val log10_ : ?out:arr -> arr -> unit
val log1p_ : ?out:arr -> arr -> unit
val sin_ : ?out:arr -> arr -> unit
val cos_ : ?out:arr -> arr -> unit
val tan_ : ?out:arr -> arr -> unit
val asin_ : ?out:arr -> arr -> unit
val acos_ : ?out:arr -> arr -> unit
val atan_ : ?out:arr -> arr -> unit
val sinh_ : ?out:arr -> arr -> unit
val cosh_ : ?out:arr -> arr -> unit
val tanh_ : ?out:arr -> arr -> unit
val asinh_ : ?out:arr -> arr -> unit
val acosh_ : ?out:arr -> arr -> unit
val atanh_ : ?out:arr -> arr -> unit
val floor_ : ?out:arr -> arr -> unit
val ceil_ : ?out:arr -> arr -> unit
val round_ : ?out:arr -> arr -> unit
val trunc_ : ?out:arr -> arr -> unit
val fix_ : ?out:arr -> arr -> unit
val erf_ : ?out:arr -> arr -> unit
val erfc_ : ?out:arr -> arr -> unit
val relu_ : ?out:arr -> arr -> unit
val softplus_ : ?out:arr -> arr -> unit
val softsign_ : ?out:arr -> arr -> unit
val sigmoid_ : ?out:arr -> arr -> unit
val softmax_ : ?out:arr -> ?axis:int -> arr -> unit
val cumsum_ : ?out:arr -> ?axis:int -> arr -> unit
val cumprod_ : ?out:arr -> ?axis:int -> arr -> unit
val cummin_ : ?out:arr -> ?axis:int -> arr -> unit
val cummax_ : ?out:arr -> ?axis:int -> arr -> unit
val dropout_ : ?out:arr -> ?rate:float -> arr -> unit
val elt_equal_ : ?out:arr -> arr -> arr -> unit
val elt_not_equal_ : ?out:arr -> arr -> arr -> unit
val elt_less_ : ?out:arr -> arr -> arr -> unit
val elt_greater_ : ?out:arr -> arr -> arr -> unit
val elt_less_equal_ : ?out:arr -> arr -> arr -> unit
val elt_greater_equal_ : ?out:arr -> arr -> arr -> unit
val elt_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_not_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_less_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_greater_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_less_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_greater_equal_scalar_ : ?out:arr -> arr -> elt -> unit

Matrix functions

val col : arr -> int -> arr
val cols : arr -> int array -> arr
val dot : arr -> arr -> arr
val trace : arr -> elt
val to_arrays : arr -> elt array array
val draw_rows : ?replacement:bool -> arr -> int -> arr * int array
val draw_cols : ?replacement:bool -> arr -> int -> arr * int array
val draw_rows2 : - ?replacement:bool -> - arr -> - arr -> - int -> - arr * arr * int array
val draw_cols2 : - ?replacement:bool -> - arr -> - arr -> - int -> - arr * arr * int array
include Owl_dense_ndarray_intf.NN with type arr := arr
include Owl_base_dense_ndarray_intf.NN with type arr := arr
val conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val dilated_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val transpose_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val max_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val upsampling2d : arr -> int array -> arr
val conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val dilated_conv1d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv1d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val transpose_conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val max_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val upsampling2d_backward : arr -> int array -> arr -> arr
val max_pool2d_argmax : - ?padding:Owl_types.padding -> - arr -> - int array -> - int array -> - arr - * (int64, Stdlib.Bigarray.int64_elt, Stdlib.Bigarray.c_layout) - Stdlib.Bigarray.Genarray.t
val conv1d_ : - out:arr -> - ?padding:Owl_types.padding -> - arr -> - arr -> - int array -> - unit
val conv2d_ : - out:arr -> - ?padding:Owl_types.padding -> - arr -> - arr -> - int array -> - unit
val conv3d_ : - out:arr -> - ?padding:Owl_types.padding -> - arr -> - arr -> - int array -> - unit
val dilated_conv1d_ : - out:arr -> - ?padding:Owl_types.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val dilated_conv2d_ : - out:arr -> - ?padding:Owl_types.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val dilated_conv3d_ : - out:arr -> - ?padding:Owl_types.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val transpose_conv1d_ : - out:arr -> - ?padding:Owl_types.padding -> - arr -> - arr -> - int array -> - unit
val transpose_conv2d_ : - out:arr -> - ?padding:Owl_types.padding -> - arr -> - arr -> - int array -> - unit
val transpose_conv3d_ : - out:arr -> - ?padding:Owl_types.padding -> - arr -> - arr -> - int array -> - unit
val max_pool1d_ : - out:arr -> - ?padding:Owl_types.padding -> - arr -> - int array -> - int array -> - unit
val max_pool2d_ : - out:arr -> - ?padding:Owl_types.padding -> - arr -> - int array -> - int array -> - unit
val max_pool3d_ : - out:arr -> - ?padding:Owl_types.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool1d_ : - out:arr -> - ?padding:Owl_types.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool2d_ : - out:arr -> - ?padding:Owl_types.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool3d_ : - out:arr -> - ?padding:Owl_types.padding -> - arr -> - int array -> - int array -> - unit
val upsampling2d_ : out:arr -> arr -> int array -> unit
val conv1d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv1d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv2d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv2d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv3d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv3d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val dilated_conv1d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv1d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv2d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv2d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv3d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv3d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val transpose_conv1d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv1d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv2d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv2d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv3d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv3d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val max_pool1d_backward_ : - out:arr -> - Owl_types.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val max_pool2d_backward_ : - out:arr -> - Owl_types.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val max_pool3d_backward_ : - out:arr -> - Owl_types.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool1d_backward_ : - out:arr -> - Owl_types.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool2d_backward_ : - out:arr -> - Owl_types.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool3d_backward_ : - out:arr -> - Owl_types.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val upsampling2d_backward_ : out:arr -> arr -> int array -> arr -> unit
include Owl_dense_ndarray_intf.Complex - with type elt := elt - and type arr := arr - and type cast_arr := cast_arr
Complex operations
val complex : cast_arr -> cast_arr -> arr

complex re im constructs a complex ndarray/matrix from re and im. re and im contain the real and imaginary part of x respectively.

Note that both re and im can be complex but must have same type. The real part of re will be the real part of x and the imaginary part of im will be the imaginary part of x.

val polar : cast_arr -> cast_arr -> arr

polar rho theta constructs a complex ndarray/matrix from polar coordinates rho and theta. rho contains the magnitudes and theta contains phase angles. Note that the behaviour is undefined if rho has negative elelments or theta has infinity elelments.

val re : arr -> cast_arr
val im : arr -> cast_arr
val sum' : arr -> elt
include module type of struct include Operator end
include sig ... end
val (+$) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (-$) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (*$) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (/$) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t
val ($+) : - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t
val ($-) : - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t
val ($*) : - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t
val ($/) : - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (=) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - bool
val (!=) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - bool
val (<>) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - bool
val (>) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - bool
val (<) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - bool
val (>=) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - bool
val (<=) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - bool
include sig ... end
val (=$) : ('a, 'b) Owl_dense_ndarray_generic.t -> 'a -> bool
val (!=$) : ('a, 'b) Owl_dense_ndarray_generic.t -> 'a -> bool
val (<>$) : ('a, 'b) Owl_dense_ndarray_generic.t -> 'a -> bool
val (<$) : ('a, 'b) Owl_dense_ndarray_generic.t -> 'a -> bool
val (>$) : ('a, 'b) Owl_dense_ndarray_generic.t -> 'a -> bool
val (<=$) : ('a, 'b) Owl_dense_ndarray_generic.t -> 'a -> bool
val (>=$) : ('a, 'b) Owl_dense_ndarray_generic.t -> 'a -> bool
val (=.$) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (!=.$) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (<>.$) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (<.$) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (>.$) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (<=.$) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (>=.$) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (=~) : - ?eps:float -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - bool
val (=~$) : ?eps:float -> ('a, 'b) Owl_dense_ndarray_generic.t -> 'a -> bool
val (=~.) : - ?eps:float -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (=~.$) : - ?eps:float -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - 'a -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (%) : - (float, 'a) Owl_dense_ndarray_generic.t -> - (float, 'a) Owl_dense_ndarray_generic.t -> - (float, 'a) Owl_dense_ndarray_generic.t
val (%$) : - (float, 'a) Owl_dense_ndarray_generic.t -> - float -> - (float, 'a) Owl_dense_ndarray_generic.t
val (**) : - (float, 'a) Owl_dense_ndarray_generic.t -> - (float, 'a) Owl_dense_ndarray_generic.t -> - (float, 'a) Owl_dense_ndarray_generic.t
val ($**) : - float -> - (float, 'a) Owl_dense_ndarray_generic.t -> - (float, 'a) Owl_dense_ndarray_generic.t
val (**$) : - (float, 'a) Owl_dense_ndarray_generic.t -> - float -> - (float, 'a) Owl_dense_ndarray_generic.t
val (+=) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - unit
val (-=) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - unit
val (*=) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - unit
val (/=) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - unit
val (+$=) : ('a, 'b) Owl_dense_ndarray_generic.t -> 'a -> unit
val (-$=) : ('a, 'b) Owl_dense_ndarray_generic.t -> 'a -> unit
val (*$=) : ('a, 'b) Owl_dense_ndarray_generic.t -> 'a -> unit
val (/$=) : ('a, 'b) Owl_dense_ndarray_generic.t -> 'a -> unit
val (.!{;..}) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - Owl_types.index array -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (.!{;..}<-) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - Owl_types.index array -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - unit
val (.${}) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - int list -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (.${;..}) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - int list array -> - ('a, 'b) Owl_dense_ndarray_generic.t
val (.${}<-) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - int list -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - unit
val (.${;..}<-) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - int list array -> - ('a, 'b) Owl_dense_ndarray_generic.t -> - unit
include sig ... end
val (.%{}) : ('a, 'b) Owl_dense_ndarray_generic.t -> int -> 'a
val (.%{;..}) : ('a, 'b) Owl_dense_ndarray_generic.t -> int array -> 'a
val (.%{}<-) : ('a, 'b) Owl_dense_ndarray_generic.t -> int -> 'a -> unit
val (.%{;..}<-) : - ('a, 'b) Owl_dense_ndarray_generic.t -> - int array -> - 'a -> - unit
val mpow : Owl_linalg_z.mat -> float -> Owl_linalg_z.mat
\ No newline at end of file diff --git a/owl/Owl_dense_ndarray_a/.dummy b/owl/Owl_dense_ndarray_a/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl/Owl_dense_ndarray_c/.dummy b/owl/Owl_dense_ndarray_c/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl/Owl_dense_ndarray_d/.dummy b/owl/Owl_dense_ndarray_d/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl/Owl_dense_ndarray_generic/.dummy b/owl/Owl_dense_ndarray_generic/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl/Owl_dense_ndarray_intf/.dummy b/owl/Owl_dense_ndarray_intf/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl/Owl_dense_ndarray_intf/module-type-Common/index.html b/owl/Owl_dense_ndarray_intf/module-type-Common/index.html deleted file mode 100644 index 15cd0c99a..000000000 --- a/owl/Owl_dense_ndarray_intf/module-type-Common/index.html +++ /dev/null @@ -1,34 +0,0 @@ - -Common (owl.Owl_dense_ndarray_intf.Common)

Module type Owl_dense_ndarray_intf.Common

include Owl_base_dense_ndarray_intf.Common
type arr
type elt
val empty : int array -> arr
val zeros : int array -> arr
val ones : int array -> arr
val create : int array -> elt -> arr
val init : int array -> (int -> elt) -> arr
val init_nd : int array -> (int array -> elt) -> arr
val sequential : ?a:elt -> ?step:elt -> int array -> arr
val uniform : ?a:elt -> ?b:elt -> int array -> arr
val gaussian : ?mu:elt -> ?sigma:elt -> int array -> arr
val bernoulli : ?p:float -> int array -> arr
val shape : arr -> int array
val numel : arr -> int
val strides : arr -> int array

Refer to :doc:`owl_dense_ndarray_generic`

val slice_size : arr -> int array

Refer to :doc:`owl_dense_ndarray_generic`

val get : arr -> int array -> elt
val set : arr -> int array -> elt -> unit
val get_slice : int list list -> arr -> arr
val set_slice : int list list -> arr -> arr -> unit
val copy : arr -> arr
val copy_ : out:arr -> arr -> unit
val reset : arr -> unit
val reshape : arr -> int array -> arr
val flatten : arr -> arr
val reverse : arr -> arr
val tile : arr -> int array -> arr
val repeat : arr -> int array -> arr
val concatenate : ?axis:int -> arr array -> arr
val stack : ?axis:int -> arr array -> arr
val squeeze : ?axis:int array -> arr -> arr
val expand : ?hi:bool -> arr -> int -> arr
val split : ?axis:int -> int array -> arr -> arr array
val draw : ?axis:int -> arr -> int -> arr * int array
val pad : ?v:elt -> int list list -> arr -> arr
val one_hot : int -> arr -> arr
val print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(elt -> string) -> - arr -> - unit
val abs : arr -> arr
val neg : arr -> arr
val floor : arr -> arr
val ceil : arr -> arr
val round : arr -> arr
val sqr : arr -> arr
val sqrt : arr -> arr
val log : arr -> arr
val log2 : arr -> arr
val log10 : arr -> arr
val exp : arr -> arr
val sin : arr -> arr
val cos : arr -> arr
val tan : arr -> arr
val sinh : arr -> arr
val cosh : arr -> arr
val tanh : arr -> arr
val asin : arr -> arr
val acos : arr -> arr
val atan : arr -> arr
val asinh : arr -> arr
val acosh : arr -> arr
val atanh : arr -> arr
val min : ?axis:int -> ?keep_dims:bool -> arr -> arr
val max : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum_reduce : ?axis:int array -> arr -> arr
val min' : arr -> elt
val max' : arr -> elt
val sum' : arr -> elt
val pow : arr -> arr -> arr
val scalar_pow : elt -> arr -> arr
val pow_scalar : arr -> elt -> arr
val add : arr -> arr -> arr
val sub : arr -> arr -> arr
val mul : arr -> arr -> arr
val div : arr -> arr -> arr
val add_scalar : arr -> elt -> arr
val sub_scalar : arr -> elt -> arr
val mul_scalar : arr -> elt -> arr
val div_scalar : arr -> elt -> arr
val scalar_add : elt -> arr -> arr
val scalar_sub : elt -> arr -> arr
val scalar_mul : elt -> arr -> arr
val scalar_div : elt -> arr -> arr
val fma : arr -> arr -> arr -> arr
Iterate array elements
val iteri : (int -> elt -> unit) -> arr -> unit
val iter : (elt -> unit) -> arr -> unit
val mapi : (int -> elt -> elt) -> arr -> arr
val map : (elt -> elt) -> arr -> arr
val filteri : (int -> elt -> bool) -> arr -> int array
val filter : (elt -> bool) -> arr -> int array
val foldi : ?axis:int -> (int -> elt -> elt -> elt) -> elt -> arr -> arr
val fold : ?axis:int -> (elt -> elt -> elt) -> elt -> arr -> arr
val scani : ?axis:int -> (int -> elt -> elt -> elt) -> arr -> arr
val scan : ?axis:int -> (elt -> elt -> elt) -> arr -> arr
Examination & Comparison
val exists : (elt -> bool) -> arr -> bool
val not_exists : (elt -> bool) -> arr -> bool
val for_all : (elt -> bool) -> arr -> bool
val is_zero : arr -> bool
val is_positive : arr -> bool
val is_negative : arr -> bool
val is_nonpositive : arr -> bool
val is_nonnegative : arr -> bool
val is_normal : arr -> bool
val not_nan : arr -> bool
val not_inf : arr -> bool
val equal : arr -> arr -> bool
val not_equal : arr -> arr -> bool
val greater : arr -> arr -> bool
val less : arr -> arr -> bool
val greater_equal : arr -> arr -> bool
val less_equal : arr -> arr -> bool
val elt_equal : arr -> arr -> arr
val elt_not_equal : arr -> arr -> arr
val elt_less : arr -> arr -> arr
val elt_greater : arr -> arr -> arr
val elt_less_equal : arr -> arr -> arr
val elt_greater_equal : arr -> arr -> arr
val equal_scalar : arr -> elt -> bool
val not_equal_scalar : arr -> elt -> bool
val less_scalar : arr -> elt -> bool
val greater_scalar : arr -> elt -> bool
val less_equal_scalar : arr -> elt -> bool
val greater_equal_scalar : arr -> elt -> bool
val elt_equal_scalar : arr -> elt -> arr
val elt_not_equal_scalar : arr -> elt -> arr
val elt_less_scalar : arr -> elt -> arr
val elt_greater_scalar : arr -> elt -> arr
val elt_less_equal_scalar : arr -> elt -> arr
val elt_greater_equal_scalar : arr -> elt -> arr
val row_num : arr -> int
val col_num : arr -> int
val row : arr -> int -> arr
val rows : arr -> int array -> arr
val copy_row_to : arr -> arr -> int -> unit
val copy_col_to : arr -> arr -> int -> unit
val diag : ?k:int -> arr -> arr
val transpose : ?axis:int array -> arr -> arr
val to_rows : arr -> arr array
val of_rows : arr array -> arr
val to_cols : arr -> arr array
val of_cols : arr array -> arr
val of_array : elt array -> int array -> arr
val of_arrays : elt array array -> arr
Create N-dimensional array
val linspace : elt -> elt -> int -> arr

linspace k 0. 9. 10 ...

val logspace : ?base:float -> elt -> elt -> int -> arr

logspace k 0. 9. 10 ...

val unit_basis : int -> int -> arr

unit_basis k n i returns a unit basis vector with ith element set to 1.

Obtain basic properties
val num_dims : arr -> int
val nth_dim : arr -> int -> int
val nnz : arr -> int
val density : arr -> float
val size_in_bytes : arr -> int
val same_shape : arr -> arr -> bool
val same_data : arr -> arr -> bool
val ind : arr -> int -> int array
val i1d : arr -> int array -> int
Manipulate a N-dimensional array
val get_index : arr -> int array array -> elt array
val set_index : arr -> int array array -> elt array -> unit
val get_fancy : Owl_types.index list -> arr -> arr
val set_fancy : Owl_types.index list -> arr -> arr -> unit
val sub_left : arr -> int -> int -> arr
val sub_ndarray : int array -> arr -> arr array
val slice_left : arr -> int array -> arr
val fill : arr -> elt -> unit
val resize : ?head:bool -> arr -> int array -> arr
val flip : ?axis:int -> arr -> arr
val rotate : arr -> int -> arr
val swap : int -> int -> arr -> arr
val concat_vertical : arr -> arr -> arr
val concat_horizontal : arr -> arr -> arr
val concat_vh : arr array array -> arr
val split_vh : (int * int) array array -> arr -> arr array array
val dropout : ?rate:float -> arr -> arr
val top : arr -> int -> int array array
val bottom : arr -> int -> int array array
val sort : arr -> arr
val sort1 : ?axis:int -> arr -> arr
val argsort : - arr -> - (int64, Stdlib.Bigarray.int64_elt, Stdlib.Bigarray.c_layout) - Stdlib.Bigarray.Genarray.t
val mmap : Unix.file_descr -> ?pos:int64 -> bool -> int array -> arr
Iterate array elements
val iter2i : (int -> elt -> elt -> unit) -> arr -> arr -> unit
val iter2 : (elt -> elt -> unit) -> arr -> arr -> unit
val map2i : (int -> elt -> elt -> elt) -> arr -> arr -> arr
val map2 : (elt -> elt -> elt) -> arr -> arr -> arr
val iteri_nd : (int array -> elt -> unit) -> arr -> unit
val mapi_nd : (int array -> elt -> elt) -> arr -> arr
val foldi_nd : - ?axis:int -> - (int array -> elt -> elt -> elt) -> - elt -> - arr -> - arr
val scani_nd : ?axis:int -> (int array -> elt -> elt -> elt) -> arr -> arr
val filteri_nd : (int array -> elt -> bool) -> arr -> int array array
val iter2i_nd : (int array -> elt -> elt -> unit) -> arr -> arr -> unit
val map2i_nd : (int array -> elt -> elt -> elt) -> arr -> arr -> arr
val iteri_slice : ?axis:int -> (int -> arr -> unit) -> arr -> unit
val iter_slice : ?axis:int -> (arr -> unit) -> arr -> unit
val mapi_slice : ?axis:int -> (int -> arr -> 'c) -> arr -> 'c array
val map_slice : ?axis:int -> (arr -> 'c) -> arr -> 'c array
val filteri_slice : ?axis:int -> (int -> arr -> bool) -> arr -> arr array
val filter_slice : ?axis:int -> (arr -> bool) -> arr -> arr array
val foldi_slice : ?axis:int -> (int -> 'c -> arr -> 'c) -> 'c -> arr -> 'c
val fold_slice : ?axis:int -> ('c -> arr -> 'c) -> 'c -> arr -> 'c
Examine array elements or compare two arrays
val approx_equal : ?eps:float -> arr -> arr -> bool
val approx_equal_scalar : ?eps:float -> arr -> elt -> bool
val approx_elt_equal : ?eps:float -> arr -> arr -> arr
val approx_elt_equal_scalar : ?eps:float -> arr -> elt -> arr
Input/Output functions
val to_array : arr -> elt array
val save : out:string -> arr -> unit
val load : string -> arr
val save_npy : out:string -> arr -> unit
val load_npy : string -> arr
Unary mathematical operations
val prod : ?axis:int -> ?keep_dims:bool -> arr -> arr
val prod' : arr -> elt
val mean : ?axis:int -> ?keep_dims:bool -> arr -> arr
val mean' : arr -> elt
val median' : arr -> elt
val median : ?axis:int -> ?keep_dims:bool -> arr -> arr
val var : ?axis:int -> ?keep_dims:bool -> arr -> arr
val var' : arr -> elt
val std : ?axis:int -> ?keep_dims:bool -> arr -> arr
val std' : arr -> elt
val sem : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sem' : arr -> elt
val minmax : ?axis:int -> ?keep_dims:bool -> arr -> arr * arr
val minmax' : arr -> elt * elt
val min_i : arr -> elt * int array
val max_i : arr -> elt * int array
val minmax_i : arr -> (elt * int array) * (elt * int array)
val abs2 : arr -> arr
val conj : arr -> arr
val reci : arr -> arr
val reci_tol : ?tol:elt -> arr -> arr
val cbrt : arr -> arr
val exp2 : arr -> arr
val exp10 : arr -> arr
val expm1 : arr -> arr
val log1p : arr -> arr
val trunc : arr -> arr
val fix : arr -> arr
val modf : arr -> arr * arr
val l1norm : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm' : arr -> elt
val l2norm : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l2norm' : arr -> elt
val l2norm_sqr : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l2norm_sqr' : arr -> elt
val vecnorm : ?axis:int -> ?p:float -> ?keep_dims:bool -> arr -> arr
val vecnorm' : ?p:float -> arr -> elt
val cumsum : ?axis:int -> arr -> arr
val cumprod : ?axis:int -> arr -> arr
val cummin : ?axis:int -> arr -> arr
val cummax : ?axis:int -> arr -> arr
val diff : ?axis:int -> ?n:int -> arr -> arr
val lgamma : arr -> arr
Binary mathematical operations
val min2 : arr -> arr -> arr
val max2 : arr -> arr -> arr
val ssqr' : arr -> elt -> elt
val ssqr_diff' : arr -> arr -> elt
val clip_by_value : ?amin:elt -> ?amax:elt -> arr -> arr
val clip_by_l2norm : elt -> arr -> arr
Tensor Calculus
val contract1 : (int * int) array -> arr -> arr
val contract2 : (int * int) array -> arr -> arr -> arr
Experimental functions
val sum_slices : ?axis:int -> arr -> arr
val slide : ?axis:int -> ?ofs:int -> ?step:int -> window:int -> arr -> arr
Functions of in-place modification
val create_ : out:arr -> elt -> unit
val uniform_ : ?a:elt -> ?b:elt -> out:arr -> unit
val gaussian_ : ?mu:elt -> ?sigma:elt -> out:arr -> unit
val sequential_ : ?a:elt -> ?step:elt -> out:arr -> unit
val bernoulli_ : ?p:float -> out:arr -> unit
val zeros_ : out:arr -> unit
val ones_ : out:arr -> unit
val sort_ : arr -> unit
val one_hot_ : out:arr -> int -> arr -> unit
val get_fancy_ : out:arr -> Owl_types.index list -> arr -> unit
val set_fancy_ : out:arr -> Owl_types.index list -> arr -> arr -> unit
val get_slice_ : out:arr -> int list list -> arr -> unit
val set_slice_ : out:arr -> int list list -> arr -> arr -> unit
val reshape_ : out:arr -> arr -> unit
val reverse_ : out:arr -> arr -> unit
val transpose_ : out:arr -> ?axis:int array -> arr -> unit
val repeat_ : out:arr -> arr -> int array -> unit
val tile_ : out:arr -> arr -> int array -> unit
val pad_ : out:arr -> ?v:elt -> int list list -> arr -> unit
val sum_ : out:arr -> axis:int -> arr -> unit
val min_ : out:arr -> axis:int -> arr -> unit
val max_ : out:arr -> axis:int -> arr -> unit
val add_ : ?out:arr -> arr -> arr -> unit
val sub_ : ?out:arr -> arr -> arr -> unit
val mul_ : ?out:arr -> arr -> arr -> unit
val div_ : ?out:arr -> arr -> arr -> unit
val pow_ : ?out:arr -> arr -> arr -> unit
val atan2_ : ?out:arr -> arr -> arr -> unit
val hypot_ : ?out:arr -> arr -> arr -> unit
val fmod_ : ?out:arr -> arr -> arr -> unit
val min2_ : ?out:arr -> arr -> arr -> unit
val max2_ : ?out:arr -> arr -> arr -> unit
val add_scalar_ : ?out:arr -> arr -> elt -> unit
val sub_scalar_ : ?out:arr -> arr -> elt -> unit
val mul_scalar_ : ?out:arr -> arr -> elt -> unit
val div_scalar_ : ?out:arr -> arr -> elt -> unit
val pow_scalar_ : ?out:arr -> arr -> elt -> unit
val atan2_scalar_ : ?out:arr -> arr -> elt -> unit
val fmod_scalar_ : ?out:arr -> arr -> elt -> unit
val scalar_add_ : ?out:arr -> elt -> arr -> unit
val scalar_sub_ : ?out:arr -> elt -> arr -> unit
val scalar_mul_ : ?out:arr -> elt -> arr -> unit
val scalar_div_ : ?out:arr -> elt -> arr -> unit
val scalar_pow_ : ?out:arr -> elt -> arr -> unit
val scalar_atan2_ : ?out:arr -> elt -> arr -> unit
val scalar_fmod_ : ?out:arr -> elt -> arr -> unit
val fma_ : ?out:arr -> arr -> arr -> arr -> unit
val clip_by_value_ : ?out:arr -> ?amin:elt -> ?amax:elt -> arr -> unit
val clip_by_l2norm_ : ?out:arr -> elt -> arr -> unit
val dot_ : - ?transa:bool -> - ?transb:bool -> - ?alpha:elt -> - ?beta:elt -> - c:arr -> - arr -> - arr -> - unit
val conj_ : ?out:arr -> arr -> unit
val abs_ : ?out:arr -> arr -> unit
val neg_ : ?out:arr -> arr -> unit
val reci_ : ?out:arr -> arr -> unit
val signum_ : ?out:arr -> arr -> unit
val sqr_ : ?out:arr -> arr -> unit
val sqrt_ : ?out:arr -> arr -> unit
val cbrt_ : ?out:arr -> arr -> unit
val exp_ : ?out:arr -> arr -> unit
val exp2_ : ?out:arr -> arr -> unit
val exp10_ : ?out:arr -> arr -> unit
val expm1_ : ?out:arr -> arr -> unit
val log_ : ?out:arr -> arr -> unit
val log2_ : ?out:arr -> arr -> unit
val log10_ : ?out:arr -> arr -> unit
val log1p_ : ?out:arr -> arr -> unit
val sin_ : ?out:arr -> arr -> unit
val cos_ : ?out:arr -> arr -> unit
val tan_ : ?out:arr -> arr -> unit
val asin_ : ?out:arr -> arr -> unit
val acos_ : ?out:arr -> arr -> unit
val atan_ : ?out:arr -> arr -> unit
val sinh_ : ?out:arr -> arr -> unit
val cosh_ : ?out:arr -> arr -> unit
val tanh_ : ?out:arr -> arr -> unit
val asinh_ : ?out:arr -> arr -> unit
val acosh_ : ?out:arr -> arr -> unit
val atanh_ : ?out:arr -> arr -> unit
val floor_ : ?out:arr -> arr -> unit
val ceil_ : ?out:arr -> arr -> unit
val round_ : ?out:arr -> arr -> unit
val trunc_ : ?out:arr -> arr -> unit
val fix_ : ?out:arr -> arr -> unit
val erf_ : ?out:arr -> arr -> unit
val erfc_ : ?out:arr -> arr -> unit
val relu_ : ?out:arr -> arr -> unit
val softplus_ : ?out:arr -> arr -> unit
val softsign_ : ?out:arr -> arr -> unit
val sigmoid_ : ?out:arr -> arr -> unit
val softmax_ : ?out:arr -> ?axis:int -> arr -> unit
val cumsum_ : ?out:arr -> ?axis:int -> arr -> unit
val cumprod_ : ?out:arr -> ?axis:int -> arr -> unit
val cummin_ : ?out:arr -> ?axis:int -> arr -> unit
val cummax_ : ?out:arr -> ?axis:int -> arr -> unit
val dropout_ : ?out:arr -> ?rate:float -> arr -> unit
val elt_equal_ : ?out:arr -> arr -> arr -> unit
val elt_not_equal_ : ?out:arr -> arr -> arr -> unit
val elt_less_ : ?out:arr -> arr -> arr -> unit
val elt_greater_ : ?out:arr -> arr -> arr -> unit
val elt_less_equal_ : ?out:arr -> arr -> arr -> unit
val elt_greater_equal_ : ?out:arr -> arr -> arr -> unit
val elt_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_not_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_less_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_greater_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_less_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_greater_equal_scalar_ : ?out:arr -> arr -> elt -> unit

Matrix functions

val col : arr -> int -> arr
val cols : arr -> int array -> arr
val dot : arr -> arr -> arr
val trace : arr -> elt
val to_arrays : arr -> elt array array
val draw_rows : ?replacement:bool -> arr -> int -> arr * int array
val draw_cols : ?replacement:bool -> arr -> int -> arr * int array
val draw_rows2 : - ?replacement:bool -> - arr -> - arr -> - int -> - arr * arr * int array
val draw_cols2 : - ?replacement:bool -> - arr -> - arr -> - int -> - arr * arr * int array
\ No newline at end of file diff --git a/owl/Owl_dense_ndarray_intf/module-type-Complex/index.html b/owl/Owl_dense_ndarray_intf/module-type-Complex/index.html deleted file mode 100644 index 166b6bc0e..000000000 --- a/owl/Owl_dense_ndarray_intf/module-type-Complex/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Complex (owl.Owl_dense_ndarray_intf.Complex)

Module type Owl_dense_ndarray_intf.Complex

type elt
type arr
type cast_arr
Complex operations
val complex : cast_arr -> cast_arr -> arr

complex re im constructs a complex ndarray/matrix from re and im. re and im contain the real and imaginary part of x respectively.

Note that both re and im can be complex but must have same type. The real part of re will be the real part of x and the imaginary part of im will be the imaginary part of x.

val polar : cast_arr -> cast_arr -> arr

polar rho theta constructs a complex ndarray/matrix from polar coordinates rho and theta. rho contains the magnitudes and theta contains phase angles. Note that the behaviour is undefined if rho has negative elelments or theta has infinity elelments.

val re : arr -> cast_arr
val im : arr -> cast_arr
val sum' : arr -> elt
\ No newline at end of file diff --git a/owl/Owl_dense_ndarray_intf/module-type-Distribution/index.html b/owl/Owl_dense_ndarray_intf/module-type-Distribution/index.html deleted file mode 100644 index 026ee16fe..000000000 --- a/owl/Owl_dense_ndarray_intf/module-type-Distribution/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Distribution (owl.Owl_dense_ndarray_intf.Distribution)

Module type Owl_dense_ndarray_intf.Distribution

type arr
Stats & distribution functions
val uniform_rvs : a:arr -> b:arr -> n:int -> arr
val uniform_pdf : a:arr -> b:arr -> arr -> arr
val uniform_logpdf : a:arr -> b:arr -> arr -> arr
val uniform_cdf : a:arr -> b:arr -> arr -> arr
val uniform_logcdf : a:arr -> b:arr -> arr -> arr
val uniform_ppf : a:arr -> b:arr -> arr -> arr
val uniform_sf : a:arr -> b:arr -> arr -> arr
val uniform_logsf : a:arr -> b:arr -> arr -> arr
val uniform_isf : a:arr -> b:arr -> arr -> arr
val gaussian_rvs : mu:arr -> sigma:arr -> n:int -> arr
val gaussian_pdf : mu:arr -> sigma:arr -> arr -> arr
val gaussian_logpdf : mu:arr -> sigma:arr -> arr -> arr
val gaussian_cdf : mu:arr -> sigma:arr -> arr -> arr
val gaussian_logcdf : mu:arr -> sigma:arr -> arr -> arr
val gaussian_ppf : mu:arr -> sigma:arr -> arr -> arr
val gaussian_sf : mu:arr -> sigma:arr -> arr -> arr
val gaussian_logsf : mu:arr -> sigma:arr -> arr -> arr
val gaussian_isf : mu:arr -> sigma:arr -> arr -> arr
val exponential_rvs : lambda:arr -> n:int -> arr
val exponential_pdf : lambda:arr -> arr -> arr
val exponential_logpdf : lambda:arr -> arr -> arr
val exponential_cdf : lambda:arr -> arr -> arr
val exponential_logcdf : lambda:arr -> arr -> arr
val exponential_ppf : lambda:arr -> arr -> arr
val exponential_sf : lambda:arr -> arr -> arr
val exponential_logsf : lambda:arr -> arr -> arr
val exponential_isf : lambda:arr -> arr -> arr
val gamma_rvs : shape:arr -> scale:arr -> n:int -> arr
val gamma_pdf : shape:arr -> scale:arr -> arr -> arr
val gamma_logpdf : shape:arr -> scale:arr -> arr -> arr
val gamma_cdf : shape:arr -> scale:arr -> arr -> arr
val gamma_logcdf : shape:arr -> scale:arr -> arr -> arr
val gamma_ppf : shape:arr -> scale:arr -> arr -> arr
val gamma_sf : shape:arr -> scale:arr -> arr -> arr
val gamma_logsf : shape:arr -> scale:arr -> arr -> arr
val gamma_isf : shape:arr -> scale:arr -> arr -> arr
val beta_rvs : a:arr -> b:arr -> n:int -> arr
val beta_pdf : a:arr -> b:arr -> arr -> arr
val beta_logpdf : a:arr -> b:arr -> arr -> arr
val beta_cdf : a:arr -> b:arr -> arr -> arr
val beta_logcdf : a:arr -> b:arr -> arr -> arr
val beta_ppf : a:arr -> b:arr -> arr -> arr
val beta_sf : a:arr -> b:arr -> arr -> arr
val beta_logsf : a:arr -> b:arr -> arr -> arr
val beta_isf : a:arr -> b:arr -> arr -> arr
val chi2_rvs : df:arr -> n:int -> arr
val chi2_pdf : df:arr -> arr -> arr
val chi2_logpdf : df:arr -> arr -> arr
val chi2_cdf : df:arr -> arr -> arr
val chi2_logcdf : df:arr -> arr -> arr
val chi2_ppf : df:arr -> arr -> arr
val chi2_sf : df:arr -> arr -> arr
val chi2_logsf : df:arr -> arr -> arr
val chi2_isf : df:arr -> arr -> arr
val f_rvs : dfnum:arr -> dfden:arr -> n:int -> arr
val f_pdf : dfnum:arr -> dfden:arr -> arr -> arr
val f_logpdf : dfnum:arr -> dfden:arr -> arr -> arr
val f_cdf : dfnum:arr -> dfden:arr -> arr -> arr
val f_logcdf : dfnum:arr -> dfden:arr -> arr -> arr
val f_ppf : dfnum:arr -> dfden:arr -> arr -> arr
val f_sf : dfnum:arr -> dfden:arr -> arr -> arr
val f_logsf : dfnum:arr -> dfden:arr -> arr -> arr
val f_isf : dfnum:arr -> dfden:arr -> arr -> arr
val cauchy_rvs : loc:arr -> scale:arr -> n:int -> arr
val cauchy_pdf : loc:arr -> scale:arr -> arr -> arr
val cauchy_logpdf : loc:arr -> scale:arr -> arr -> arr
val cauchy_cdf : loc:arr -> scale:arr -> arr -> arr
val cauchy_logcdf : loc:arr -> scale:arr -> arr -> arr
val cauchy_ppf : loc:arr -> scale:arr -> arr -> arr
val cauchy_sf : loc:arr -> scale:arr -> arr -> arr
val cauchy_logsf : loc:arr -> scale:arr -> arr -> arr
val cauchy_isf : loc:arr -> scale:arr -> arr -> arr
val lomax_rvs : shape:arr -> scale:arr -> n:int -> arr
val lomax_pdf : shape:arr -> scale:arr -> arr -> arr
val lomax_logpdf : shape:arr -> scale:arr -> arr -> arr
val lomax_cdf : shape:arr -> scale:arr -> arr -> arr
val lomax_logcdf : shape:arr -> scale:arr -> arr -> arr
val lomax_ppf : shape:arr -> scale:arr -> arr -> arr
val lomax_sf : shape:arr -> scale:arr -> arr -> arr
val lomax_logsf : shape:arr -> scale:arr -> arr -> arr
val lomax_isf : shape:arr -> scale:arr -> arr -> arr
val weibull_rvs : shape:arr -> scale:arr -> n:int -> arr
val weibull_pdf : shape:arr -> scale:arr -> arr -> arr
val weibull_logpdf : shape:arr -> scale:arr -> arr -> arr
val weibull_cdf : shape:arr -> scale:arr -> arr -> arr
val weibull_logcdf : shape:arr -> scale:arr -> arr -> arr
val weibull_ppf : shape:arr -> scale:arr -> arr -> arr
val weibull_sf : shape:arr -> scale:arr -> arr -> arr
val weibull_logsf : shape:arr -> scale:arr -> arr -> arr
val weibull_isf : shape:arr -> scale:arr -> arr -> arr
val laplace_rvs : loc:arr -> scale:arr -> n:int -> arr
val laplace_pdf : loc:arr -> scale:arr -> arr -> arr
val laplace_logpdf : loc:arr -> scale:arr -> arr -> arr
val laplace_cdf : loc:arr -> scale:arr -> arr -> arr
val laplace_logcdf : loc:arr -> scale:arr -> arr -> arr
val laplace_ppf : loc:arr -> scale:arr -> arr -> arr
val laplace_sf : loc:arr -> scale:arr -> arr -> arr
val laplace_logsf : loc:arr -> scale:arr -> arr -> arr
val laplace_isf : loc:arr -> scale:arr -> arr -> arr
val gumbel1_rvs : a:arr -> b:arr -> n:int -> arr
val gumbel1_pdf : a:arr -> b:arr -> arr -> arr
val gumbel1_logpdf : a:arr -> b:arr -> arr -> arr
val gumbel1_cdf : a:arr -> b:arr -> arr -> arr
val gumbel1_logcdf : a:arr -> b:arr -> arr -> arr
val gumbel1_ppf : a:arr -> b:arr -> arr -> arr
val gumbel1_sf : a:arr -> b:arr -> arr -> arr
val gumbel1_logsf : a:arr -> b:arr -> arr -> arr
val gumbel1_isf : a:arr -> b:arr -> arr -> arr
val gumbel2_rvs : a:arr -> b:arr -> n:int -> arr
val gumbel2_pdf : a:arr -> b:arr -> arr -> arr
val gumbel2_logpdf : a:arr -> b:arr -> arr -> arr
val gumbel2_cdf : a:arr -> b:arr -> arr -> arr
val gumbel2_logcdf : a:arr -> b:arr -> arr -> arr
val gumbel2_ppf : a:arr -> b:arr -> arr -> arr
val gumbel2_sf : a:arr -> b:arr -> arr -> arr
val gumbel2_logsf : a:arr -> b:arr -> arr -> arr
val gumbel2_isf : a:arr -> b:arr -> arr -> arr
val logistic_rvs : loc:arr -> scale:arr -> n:int -> arr
val logistic_pdf : loc:arr -> scale:arr -> arr -> arr
val logistic_logpdf : loc:arr -> scale:arr -> arr -> arr
val logistic_cdf : loc:arr -> scale:arr -> arr -> arr
val logistic_logcdf : loc:arr -> scale:arr -> arr -> arr
val logistic_ppf : loc:arr -> scale:arr -> arr -> arr
val logistic_sf : loc:arr -> scale:arr -> arr -> arr
val logistic_logsf : loc:arr -> scale:arr -> arr -> arr
val logistic_isf : loc:arr -> scale:arr -> arr -> arr
val lognormal_rvs : mu:arr -> sigma:arr -> n:int -> arr
val lognormal_pdf : mu:arr -> sigma:arr -> arr -> arr
val lognormal_logpdf : mu:arr -> sigma:arr -> arr -> arr
val lognormal_cdf : mu:arr -> sigma:arr -> arr -> arr
val lognormal_logcdf : mu:arr -> sigma:arr -> arr -> arr
val lognormal_ppf : mu:arr -> sigma:arr -> arr -> arr
val lognormal_sf : mu:arr -> sigma:arr -> arr -> arr
val lognormal_logsf : mu:arr -> sigma:arr -> arr -> arr
val lognormal_isf : mu:arr -> sigma:arr -> arr -> arr
val rayleigh_rvs : sigma:arr -> n:int -> arr
val rayleigh_pdf : sigma:arr -> arr -> arr
val rayleigh_logpdf : sigma:arr -> arr -> arr
val rayleigh_cdf : sigma:arr -> arr -> arr
val rayleigh_logcdf : sigma:arr -> arr -> arr
val rayleigh_ppf : sigma:arr -> arr -> arr
val rayleigh_sf : sigma:arr -> arr -> arr
val rayleigh_logsf : sigma:arr -> arr -> arr
val rayleigh_isf : sigma:arr -> arr -> arr
\ No newline at end of file diff --git a/owl/Owl_dense_ndarray_intf/module-type-NN/index.html b/owl/Owl_dense_ndarray_intf/module-type-NN/index.html deleted file mode 100644 index 9a09e5580..000000000 --- a/owl/Owl_dense_ndarray_intf/module-type-NN/index.html +++ /dev/null @@ -1,372 +0,0 @@ - -NN (owl.Owl_dense_ndarray_intf.NN)

Module type Owl_dense_ndarray_intf.NN

include Owl_base_dense_ndarray_intf.NN
type arr
val conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val dilated_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val transpose_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val max_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val upsampling2d : arr -> int array -> arr
val conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val dilated_conv1d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv1d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val transpose_conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val max_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val upsampling2d_backward : arr -> int array -> arr -> arr
val max_pool2d_argmax : - ?padding:Owl_types.padding -> - arr -> - int array -> - int array -> - arr - * (int64, Stdlib.Bigarray.int64_elt, Stdlib.Bigarray.c_layout) - Stdlib.Bigarray.Genarray.t
val conv1d_ : - out:arr -> - ?padding:Owl_types.padding -> - arr -> - arr -> - int array -> - unit
val conv2d_ : - out:arr -> - ?padding:Owl_types.padding -> - arr -> - arr -> - int array -> - unit
val conv3d_ : - out:arr -> - ?padding:Owl_types.padding -> - arr -> - arr -> - int array -> - unit
val dilated_conv1d_ : - out:arr -> - ?padding:Owl_types.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val dilated_conv2d_ : - out:arr -> - ?padding:Owl_types.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val dilated_conv3d_ : - out:arr -> - ?padding:Owl_types.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val transpose_conv1d_ : - out:arr -> - ?padding:Owl_types.padding -> - arr -> - arr -> - int array -> - unit
val transpose_conv2d_ : - out:arr -> - ?padding:Owl_types.padding -> - arr -> - arr -> - int array -> - unit
val transpose_conv3d_ : - out:arr -> - ?padding:Owl_types.padding -> - arr -> - arr -> - int array -> - unit
val max_pool1d_ : - out:arr -> - ?padding:Owl_types.padding -> - arr -> - int array -> - int array -> - unit
val max_pool2d_ : - out:arr -> - ?padding:Owl_types.padding -> - arr -> - int array -> - int array -> - unit
val max_pool3d_ : - out:arr -> - ?padding:Owl_types.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool1d_ : - out:arr -> - ?padding:Owl_types.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool2d_ : - out:arr -> - ?padding:Owl_types.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool3d_ : - out:arr -> - ?padding:Owl_types.padding -> - arr -> - int array -> - int array -> - unit
val upsampling2d_ : out:arr -> arr -> int array -> unit
val conv1d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv1d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv2d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv2d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv3d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv3d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val dilated_conv1d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv1d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv2d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv2d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv3d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv3d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val transpose_conv1d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv1d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv2d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv2d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv3d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv3d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val max_pool1d_backward_ : - out:arr -> - Owl_types.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val max_pool2d_backward_ : - out:arr -> - Owl_types.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val max_pool3d_backward_ : - out:arr -> - Owl_types.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool1d_backward_ : - out:arr -> - Owl_types.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool2d_backward_ : - out:arr -> - Owl_types.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool3d_backward_ : - out:arr -> - Owl_types.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val upsampling2d_backward_ : out:arr -> arr -> int array -> arr -> unit
\ No newline at end of file diff --git a/owl/Owl_dense_ndarray_intf/module-type-Real/index.html b/owl/Owl_dense_ndarray_intf/module-type-Real/index.html deleted file mode 100644 index 404d49962..000000000 --- a/owl/Owl_dense_ndarray_intf/module-type-Real/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Real (owl.Owl_dense_ndarray_intf.Real)

Module type Owl_dense_ndarray_intf.Real

include Owl_base_dense_ndarray_intf.Real
type elt
type arr
val sum_slices : ?axis:int -> arr -> arr
val signum : arr -> arr
val relu : arr -> arr
val dawsn : arr -> arr
val l1norm' : arr -> elt
val l2norm' : arr -> elt
val l2norm_sqr' : arr -> elt
val clip_by_value : ?amin:elt -> ?amax:elt -> arr -> arr
val clip_by_l2norm : elt -> arr -> arr
val atan2 : arr -> arr -> arr
val approx_equal : ?eps:float -> arr -> arr -> bool
val approx_equal_scalar : ?eps:float -> arr -> float -> bool
val approx_elt_equal : ?eps:float -> arr -> arr -> arr
val approx_elt_equal_scalar : ?eps:float -> arr -> float -> arr
val dot : arr -> arr -> arr
val trace : arr -> elt
Helper functions
val float_to_elt : float -> elt
val elt_to_float : elt -> float
Real operations
val i0 : arr -> arr
val i0e : arr -> arr
val i1 : arr -> arr
val i1e : arr -> arr
val iv : v:arr -> arr -> arr
val scalar_iv : v:elt -> arr -> arr
val iv_scalar : v:arr -> elt -> arr
val j0 : arr -> arr
val j1 : arr -> arr
val jv : v:arr -> arr -> arr
val scalar_jv : v:elt -> arr -> arr
val jv_scalar : v:arr -> elt -> arr
val erf : arr -> arr
val erfc : arr -> arr
val logistic : arr -> arr
val elu : ?alpha:elt -> arr -> arr
val leaky_relu : ?alpha:elt -> arr -> arr
val softplus : arr -> arr
val softsign : arr -> arr
val softmax : ?axis:int -> arr -> arr
val sigmoid : arr -> arr
val log_sum_exp' : arr -> float
val log_sum_exp : ?axis:int -> ?keep_dims:bool -> arr -> arr
val scalar_atan2 : elt -> arr -> arr
val atan2_scalar : arr -> elt -> arr
val hypot : arr -> arr -> arr
val fmod : arr -> arr -> arr
val fmod_scalar : arr -> elt -> arr
val scalar_fmod : elt -> arr -> arr
val cross_entropy' : arr -> arr -> float
val fused_adagrad_ : ?out:arr -> rate:float -> eps:float -> arr -> unit
val poisson : mu:elt -> int array -> arr
val poisson_ : mu:elt -> out:arr -> unit
\ No newline at end of file diff --git a/owl/Owl_dense_ndarray_s/.dummy b/owl/Owl_dense_ndarray_s/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl/Owl_dense_ndarray_z/.dummy b/owl/Owl_dense_ndarray_z/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl/Owl_distribution/.dummy b/owl/Owl_distribution/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl/Owl_distribution/Make/Beta/index.html b/owl/Owl_distribution/Make/Beta/index.html deleted file mode 100644 index f8cc2b86d..000000000 --- a/owl/Owl_distribution/Make/Beta/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Beta (owl.Owl_distribution.Make.Beta)

Module Make.Beta

type t = {
  1. a : A.arr;
  2. b : A.arr;
}

Type definition of a specific distribution

val make : a:A.arr -> b:A.arr -> t

Make a distribution of the given parameters.

val sample : t -> int -> A.arr

Sample a distribution of the given parameters.

val pdf : t -> A.arr -> A.arr

Probability density/mass function of the distribution.

val logpdf : t -> A.arr -> A.arr

Logarithm of the probability density/mass function of the distribution.

val cdf : t -> A.arr -> A.arr

Cumulative density/mass function of the distribution.

val logcdf : t -> A.arr -> A.arr

Logarithm of the cumulative density/mass function of the distribution.

val ppf : t -> A.arr -> A.arr

Percentile function of the distribution.

val sf : t -> A.arr -> A.arr

Survival function of the distribution.

val logsf : t -> A.arr -> A.arr

Logarithm of the survival function of the distribution.

val isf : t -> A.arr -> A.arr

Inverse survival function of the distribution.

\ No newline at end of file diff --git a/owl/Owl_distribution/Make/Cauchy/index.html b/owl/Owl_distribution/Make/Cauchy/index.html deleted file mode 100644 index 9bc6aa4ba..000000000 --- a/owl/Owl_distribution/Make/Cauchy/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Cauchy (owl.Owl_distribution.Make.Cauchy)

Module Make.Cauchy

type t = {
  1. loc : A.arr;
  2. scale : A.arr;
}

Type definition of a specific distribution

val make : loc:A.arr -> scale:A.arr -> t

Make a distribution of the given parameters.

val sample : t -> int -> A.arr

Sample a distribution of the given parameters.

val pdf : t -> A.arr -> A.arr

Probability density/mass function of the distribution.

val logpdf : t -> A.arr -> A.arr

Logarithm of the probability density/mass function of the distribution.

val cdf : t -> A.arr -> A.arr

Cumulative density/mass function of the distribution.

val logcdf : t -> A.arr -> A.arr

Logarithm of the cumulative density/mass function of the distribution.

val ppf : t -> A.arr -> A.arr

Percentile function of the distribution.

val sf : t -> A.arr -> A.arr

Survival function of the distribution.

val logsf : t -> A.arr -> A.arr

Logarithm of the survival function of the distribution.

val isf : t -> A.arr -> A.arr

Inverse survival function of the distribution.

\ No newline at end of file diff --git a/owl/Owl_distribution/Make/Chi2/index.html b/owl/Owl_distribution/Make/Chi2/index.html deleted file mode 100644 index a21afcf1c..000000000 --- a/owl/Owl_distribution/Make/Chi2/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Chi2 (owl.Owl_distribution.Make.Chi2)

Module Make.Chi2

type t = {
  1. df : A.arr;
}

Type definition of a specific distribution

val make : df:A.arr -> _sigma:'a -> t

Make a distribution of the given parameters.

val sample : t -> int -> A.arr

Sample a distribution of the given parameters.

val pdf : t -> A.arr -> A.arr

Probability density/mass function of the distribution.

val logpdf : t -> A.arr -> A.arr

Logarithm of the probability density/mass function of the distribution.

val cdf : t -> A.arr -> A.arr

Cumulative density/mass function of the distribution.

val logcdf : t -> A.arr -> A.arr

Logarithm of the cumulative density/mass function of the distribution.

val ppf : t -> A.arr -> A.arr

Percentile function of the distribution.

val sf : t -> A.arr -> A.arr

Survival function of the distribution.

val logsf : t -> A.arr -> A.arr

Logarithm of the survival function of the distribution.

val isf : t -> A.arr -> A.arr

Inverse survival function of the distribution.

\ No newline at end of file diff --git a/owl/Owl_distribution/Make/Exponential/index.html b/owl/Owl_distribution/Make/Exponential/index.html deleted file mode 100644 index e523a22eb..000000000 --- a/owl/Owl_distribution/Make/Exponential/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Exponential (owl.Owl_distribution.Make.Exponential)

Module Make.Exponential

type t = {
  1. lambda : A.arr;
}

Type definition of a specific distribution

val make : lambda:A.arr -> t

Make a distribution of the given parameters.

val sample : t -> int -> A.arr

Sample a distribution of the given parameters.

val pdf : t -> A.arr -> A.arr

Probability density/mass function of the distribution.

val logpdf : t -> A.arr -> A.arr

Logarithm of the probability density/mass function of the distribution.

val cdf : t -> A.arr -> A.arr

Cumulative density/mass function of the distribution.

val logcdf : t -> A.arr -> A.arr

Logarithm of the cumulative density/mass function of the distribution.

val ppf : t -> A.arr -> A.arr

Percentile function of the distribution.

val sf : t -> A.arr -> A.arr

Survival function of the distribution.

val logsf : t -> A.arr -> A.arr

Logarithm of the survival function of the distribution.

val isf : t -> A.arr -> A.arr

Inverse survival function of the distribution.

\ No newline at end of file diff --git a/owl/Owl_distribution/Make/F/index.html b/owl/Owl_distribution/Make/F/index.html deleted file mode 100644 index 469b5ee89..000000000 --- a/owl/Owl_distribution/Make/F/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -F (owl.Owl_distribution.Make.F)

Module Make.F

type t = {
  1. dfnum : A.arr;
  2. dfden : A.arr;
}

Type definition of a specific distribution

val make : dfnum:A.arr -> dfden:A.arr -> t

Make a distribution of the given parameters.

val sample : t -> int -> A.arr

Sample a distribution of the given parameters.

val pdf : t -> A.arr -> A.arr

Probability density/mass function of the distribution.

val logpdf : t -> A.arr -> A.arr

Logarithm of the probability density/mass function of the distribution.

val cdf : t -> A.arr -> A.arr

Cumulative density/mass function of the distribution.

val logcdf : t -> A.arr -> A.arr

Logarithm of the cumulative density/mass function of the distribution.

val ppf : t -> A.arr -> A.arr

Percentile function of the distribution.

val sf : t -> A.arr -> A.arr

Survival function of the distribution.

val logsf : t -> A.arr -> A.arr

Logarithm of the survival function of the distribution.

val isf : t -> A.arr -> A.arr

Inverse survival function of the distribution.

\ No newline at end of file diff --git a/owl/Owl_distribution/Make/Gamma/index.html b/owl/Owl_distribution/Make/Gamma/index.html deleted file mode 100644 index 9fdce06b6..000000000 --- a/owl/Owl_distribution/Make/Gamma/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Gamma (owl.Owl_distribution.Make.Gamma)

Module Make.Gamma

type t = {
  1. shape : A.arr;
  2. scale : A.arr;
}

Type definition of a specific distribution

val make : shape:A.arr -> scale:A.arr -> t

Make a distribution of the given parameters.

val sample : t -> int -> A.arr

Sample a distribution of the given parameters.

val pdf : t -> A.arr -> A.arr

Probability density/mass function of the distribution.

val logpdf : t -> A.arr -> A.arr

Logarithm of the probability density/mass function of the distribution.

val cdf : t -> A.arr -> A.arr

Cumulative density/mass function of the distribution.

val logcdf : t -> A.arr -> A.arr

Logarithm of the cumulative density/mass function of the distribution.

val ppf : t -> A.arr -> A.arr

Percentile function of the distribution.

val sf : t -> A.arr -> A.arr

Survival function of the distribution.

val logsf : t -> A.arr -> A.arr

Logarithm of the survival function of the distribution.

val isf : t -> A.arr -> A.arr

Inverse survival function of the distribution.

\ No newline at end of file diff --git a/owl/Owl_distribution/Make/Gaussian/index.html b/owl/Owl_distribution/Make/Gaussian/index.html deleted file mode 100644 index da2d740ad..000000000 --- a/owl/Owl_distribution/Make/Gaussian/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Gaussian (owl.Owl_distribution.Make.Gaussian)

Module Make.Gaussian

type t = {
  1. mu : A.arr;
  2. sigma : A.arr;
}

Type definition of a specific distribution

val make : mu:A.arr -> sigma:A.arr -> t

Make a distribution of the given parameters.

val sample : t -> int -> A.arr

Sample a distribution of the given parameters.

val pdf : t -> A.arr -> A.arr

Probability density/mass function of the distribution.

val logpdf : t -> A.arr -> A.arr

Logarithm of the probability density/mass function of the distribution.

val cdf : t -> A.arr -> A.arr

Cumulative density/mass function of the distribution.

val logcdf : t -> A.arr -> A.arr

Logarithm of the cumulative density/mass function of the distribution.

val ppf : t -> A.arr -> A.arr

Percentile function of the distribution.

val sf : t -> A.arr -> A.arr

Survival function of the distribution.

val logsf : t -> A.arr -> A.arr

Logarithm of the survival function of the distribution.

val isf : t -> A.arr -> A.arr

Inverse survival function of the distribution.

\ No newline at end of file diff --git a/owl/Owl_distribution/Make/Gumbel1/index.html b/owl/Owl_distribution/Make/Gumbel1/index.html deleted file mode 100644 index 8e291d867..000000000 --- a/owl/Owl_distribution/Make/Gumbel1/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Gumbel1 (owl.Owl_distribution.Make.Gumbel1)

Module Make.Gumbel1

type t = {
  1. a : A.arr;
  2. b : A.arr;
}

Type definition of a specific distribution

val make : a:A.arr -> b:A.arr -> t

Make a distribution of the given parameters.

val sample : t -> int -> A.arr

Sample a distribution of the given parameters.

val pdf : t -> A.arr -> A.arr

Probability density/mass function of the distribution.

val logpdf : t -> A.arr -> A.arr

Logarithm of the probability density/mass function of the distribution.

val cdf : t -> A.arr -> A.arr

Cumulative density/mass function of the distribution.

val logcdf : t -> A.arr -> A.arr

Logarithm of the cumulative density/mass function of the distribution.

val ppf : t -> A.arr -> A.arr

Percentile function of the distribution.

val sf : t -> A.arr -> A.arr

Survival function of the distribution.

val logsf : t -> A.arr -> A.arr

Logarithm of the survival function of the distribution.

val isf : t -> A.arr -> A.arr

Inverse survival function of the distribution.

\ No newline at end of file diff --git a/owl/Owl_distribution/Make/Gumbel2/index.html b/owl/Owl_distribution/Make/Gumbel2/index.html deleted file mode 100644 index 7e1089c27..000000000 --- a/owl/Owl_distribution/Make/Gumbel2/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Gumbel2 (owl.Owl_distribution.Make.Gumbel2)

Module Make.Gumbel2

type t = {
  1. a : A.arr;
  2. b : A.arr;
}

Type definition of a specific distribution

val make : a:A.arr -> b:A.arr -> t

Make a distribution of the given parameters.

val sample : t -> int -> A.arr

Sample a distribution of the given parameters.

val pdf : t -> A.arr -> A.arr

Probability density/mass function of the distribution.

val logpdf : t -> A.arr -> A.arr

Logarithm of the probability density/mass function of the distribution.

val cdf : t -> A.arr -> A.arr

Cumulative density/mass function of the distribution.

val logcdf : t -> A.arr -> A.arr

Logarithm of the cumulative density/mass function of the distribution.

val ppf : t -> A.arr -> A.arr

Percentile function of the distribution.

val sf : t -> A.arr -> A.arr

Survival function of the distribution.

val logsf : t -> A.arr -> A.arr

Logarithm of the survival function of the distribution.

val isf : t -> A.arr -> A.arr

Inverse survival function of the distribution.

\ No newline at end of file diff --git a/owl/Owl_distribution/Make/Laplace/index.html b/owl/Owl_distribution/Make/Laplace/index.html deleted file mode 100644 index 1dfcc12fa..000000000 --- a/owl/Owl_distribution/Make/Laplace/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Laplace (owl.Owl_distribution.Make.Laplace)

Module Make.Laplace

type t = {
  1. loc : A.arr;
  2. scale : A.arr;
}

Type definition of a specific distribution

val make : loc:A.arr -> scale:A.arr -> t

Make a distribution of the given parameters.

val sample : t -> int -> A.arr

Sample a distribution of the given parameters.

val pdf : t -> A.arr -> A.arr

Probability density/mass function of the distribution.

val logpdf : t -> A.arr -> A.arr

Logarithm of the probability density/mass function of the distribution.

val cdf : t -> A.arr -> A.arr

Cumulative density/mass function of the distribution.

val logcdf : t -> A.arr -> A.arr

Logarithm of the cumulative density/mass function of the distribution.

val ppf : t -> A.arr -> A.arr

Percentile function of the distribution.

val sf : t -> A.arr -> A.arr

Survival function of the distribution.

val logsf : t -> A.arr -> A.arr

Logarithm of the survival function of the distribution.

val isf : t -> A.arr -> A.arr

Inverse survival function of the distribution.

\ No newline at end of file diff --git a/owl/Owl_distribution/Make/Logistic/index.html b/owl/Owl_distribution/Make/Logistic/index.html deleted file mode 100644 index 8951fd1ff..000000000 --- a/owl/Owl_distribution/Make/Logistic/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Logistic (owl.Owl_distribution.Make.Logistic)

Module Make.Logistic

type t = {
  1. loc : A.arr;
  2. scale : A.arr;
}

Type definition of a specific distribution

val make : loc:A.arr -> scale:A.arr -> t

Make a distribution of the given parameters.

val sample : t -> int -> A.arr

Sample a distribution of the given parameters.

val pdf : t -> A.arr -> A.arr

Probability density/mass function of the distribution.

val logpdf : t -> A.arr -> A.arr

Logarithm of the probability density/mass function of the distribution.

val cdf : t -> A.arr -> A.arr

Cumulative density/mass function of the distribution.

val logcdf : t -> A.arr -> A.arr

Logarithm of the cumulative density/mass function of the distribution.

val ppf : t -> A.arr -> A.arr

Percentile function of the distribution.

val sf : t -> A.arr -> A.arr

Survival function of the distribution.

val logsf : t -> A.arr -> A.arr

Logarithm of the survival function of the distribution.

val isf : t -> A.arr -> A.arr

Inverse survival function of the distribution.

\ No newline at end of file diff --git a/owl/Owl_distribution/Make/Lognormal/index.html b/owl/Owl_distribution/Make/Lognormal/index.html deleted file mode 100644 index d9d52e093..000000000 --- a/owl/Owl_distribution/Make/Lognormal/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Lognormal (owl.Owl_distribution.Make.Lognormal)

Module Make.Lognormal

type t = {
  1. mu : A.arr;
  2. sigma : A.arr;
}

Type definition of a specific distribution

val make : mu:A.arr -> sigma:A.arr -> t

Make a distribution of the given parameters.

val sample : t -> int -> A.arr

Sample a distribution of the given parameters.

val pdf : t -> A.arr -> A.arr

Probability density/mass function of the distribution.

val logpdf : t -> A.arr -> A.arr

Logarithm of the probability density/mass function of the distribution.

val cdf : t -> A.arr -> A.arr

Cumulative density/mass function of the distribution.

val logcdf : t -> A.arr -> A.arr

Logarithm of the cumulative density/mass function of the distribution.

val ppf : t -> A.arr -> A.arr

Percentile function of the distribution.

val sf : t -> A.arr -> A.arr

Survival function of the distribution.

val logsf : t -> A.arr -> A.arr

Logarithm of the survival function of the distribution.

val isf : t -> A.arr -> A.arr

Inverse survival function of the distribution.

\ No newline at end of file diff --git a/owl/Owl_distribution/Make/Lomax/index.html b/owl/Owl_distribution/Make/Lomax/index.html deleted file mode 100644 index c38457ff6..000000000 --- a/owl/Owl_distribution/Make/Lomax/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Lomax (owl.Owl_distribution.Make.Lomax)

Module Make.Lomax

type t = {
  1. shape : A.arr;
  2. scale : A.arr;
}

Type definition of a specific distribution

val make : shape:A.arr -> scale:A.arr -> t

Make a distribution of the given parameters.

val sample : t -> int -> A.arr

Sample a distribution of the given parameters.

val pdf : t -> A.arr -> A.arr

Probability density/mass function of the distribution.

val logpdf : t -> A.arr -> A.arr

Logarithm of the probability density/mass function of the distribution.

val cdf : t -> A.arr -> A.arr

Cumulative density/mass function of the distribution.

val logcdf : t -> A.arr -> A.arr

Logarithm of the cumulative density/mass function of the distribution.

val ppf : t -> A.arr -> A.arr

Percentile function of the distribution.

val sf : t -> A.arr -> A.arr

Survival function of the distribution.

val logsf : t -> A.arr -> A.arr

Logarithm of the survival function of the distribution.

val isf : t -> A.arr -> A.arr

Inverse survival function of the distribution.

\ No newline at end of file diff --git a/owl/Owl_distribution/Make/Poisson/index.html b/owl/Owl_distribution/Make/Poisson/index.html deleted file mode 100644 index a71bb42d3..000000000 --- a/owl/Owl_distribution/Make/Poisson/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Poisson (owl.Owl_distribution.Make.Poisson)

Module Make.Poisson

type t = {
  1. mu : A.arr;
}

Type definition of a specific distribution

val make : mu:A.arr -> t

Make a distribution of the given parameters.

val sample : t -> int -> A.arr

Sample a distribution of the given parameters.

\ No newline at end of file diff --git a/owl/Owl_distribution/Make/Rayleigh/index.html b/owl/Owl_distribution/Make/Rayleigh/index.html deleted file mode 100644 index 3708139cb..000000000 --- a/owl/Owl_distribution/Make/Rayleigh/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Rayleigh (owl.Owl_distribution.Make.Rayleigh)

Module Make.Rayleigh

type t = {
  1. sigma : A.arr;
}

Type definition of a specific distribution

val make : sigma:A.arr -> t

Make a distribution of the given parameters.

val sample : t -> int -> A.arr

Sample a distribution of the given parameters.

val pdf : t -> A.arr -> A.arr

Probability density/mass function of the distribution.

val logpdf : t -> A.arr -> A.arr

Logarithm of the probability density/mass function of the distribution.

val cdf : t -> A.arr -> A.arr

Cumulative density/mass function of the distribution.

val logcdf : t -> A.arr -> A.arr

Logarithm of the cumulative density/mass function of the distribution.

val ppf : t -> A.arr -> A.arr

Percentile function of the distribution.

val sf : t -> A.arr -> A.arr

Survival function of the distribution.

val logsf : t -> A.arr -> A.arr

Logarithm of the survival function of the distribution.

val isf : t -> A.arr -> A.arr

Inverse survival function of the distribution.

\ No newline at end of file diff --git a/owl/Owl_distribution/Make/Uniform/index.html b/owl/Owl_distribution/Make/Uniform/index.html deleted file mode 100644 index 6d8cda225..000000000 --- a/owl/Owl_distribution/Make/Uniform/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Uniform (owl.Owl_distribution.Make.Uniform)

Module Make.Uniform

type t = {
  1. a : A.arr;
  2. b : A.arr;
}

Type definition of a specific distribution

val make : a:A.arr -> b:A.arr -> t

Make a distribution of the given parameters.

val sample : t -> int -> A.arr

Sample a distribution of the given parameters.

val pdf : t -> A.arr -> A.arr

Probability density/mass function of the distribution.

val logpdf : t -> A.arr -> A.arr

Logarithm of the probability density/mass function of the distribution.

val cdf : t -> A.arr -> A.arr

Cumulative density/mass function of the distribution.

val logcdf : t -> A.arr -> A.arr

Logarithm of the cumulative density/mass function of the distribution.

val ppf : t -> A.arr -> A.arr

Percentile function of the distribution.

val sf : t -> A.arr -> A.arr

Survival function of the distribution.

val logsf : t -> A.arr -> A.arr

Logarithm of the survival function of the distribution.

val isf : t -> A.arr -> A.arr

Inverse survival function of the distribution.

\ No newline at end of file diff --git a/owl/Owl_distribution/Make/Weibull/index.html b/owl/Owl_distribution/Make/Weibull/index.html deleted file mode 100644 index dcea1a663..000000000 --- a/owl/Owl_distribution/Make/Weibull/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Weibull (owl.Owl_distribution.Make.Weibull)

Module Make.Weibull

type t = {
  1. shape : A.arr;
  2. scale : A.arr;
}

Type definition of a specific distribution

val make : shape:A.arr -> scale:A.arr -> t

Make a distribution of the given parameters.

val sample : t -> int -> A.arr

Sample a distribution of the given parameters.

val pdf : t -> A.arr -> A.arr

Probability density/mass function of the distribution.

val logpdf : t -> A.arr -> A.arr

Logarithm of the probability density/mass function of the distribution.

val cdf : t -> A.arr -> A.arr

Cumulative density/mass function of the distribution.

val logcdf : t -> A.arr -> A.arr

Logarithm of the cumulative density/mass function of the distribution.

val ppf : t -> A.arr -> A.arr

Percentile function of the distribution.

val sf : t -> A.arr -> A.arr

Survival function of the distribution.

val logsf : t -> A.arr -> A.arr

Logarithm of the survival function of the distribution.

val isf : t -> A.arr -> A.arr

Inverse survival function of the distribution.

\ No newline at end of file diff --git a/owl/Owl_distribution/Make/argument-1-A/Linalg/index.html b/owl/Owl_distribution/Make/argument-1-A/Linalg/index.html deleted file mode 100644 index b625f9b2c..000000000 --- a/owl/Owl_distribution/Make/argument-1-A/Linalg/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -Linalg (owl.Owl_distribution.Make.A.Linalg)

Module A.Linalg

val inv : arr -> arr
val logdet : arr -> elt
val chol : ?upper:bool -> arr -> arr
val svd : ?thin:bool -> arr -> arr * arr * arr
val qr : arr -> arr * arr
val lq : arr -> arr * arr
val sylvester : arr -> arr -> arr -> arr
val lyapunov : arr -> arr -> arr
val discrete_lyapunov : - ?solver:[ `default | `bilinear | `direct ] -> - arr -> - arr -> - arr
val linsolve : ?trans:bool -> ?typ:[ `n | `u | `l ] -> arr -> arr -> arr
val care : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
val dare : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
\ No newline at end of file diff --git a/owl/Owl_distribution/Make/argument-1-A/Mat/index.html b/owl/Owl_distribution/Make/argument-1-A/Mat/index.html deleted file mode 100644 index 3daeaeb5c..000000000 --- a/owl/Owl_distribution/Make/argument-1-A/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl.Owl_distribution.Make.A.Mat)

Module A.Mat

val diagm : ?k:int -> arr -> arr
val triu : ?k:int -> arr -> arr
val tril : ?k:int -> arr -> arr
val eye : int -> arr
\ No newline at end of file diff --git a/owl/Owl_distribution/Make/argument-1-A/Scalar/index.html b/owl/Owl_distribution/Make/argument-1-A/Scalar/index.html deleted file mode 100644 index 3db0b6574..000000000 --- a/owl/Owl_distribution/Make/argument-1-A/Scalar/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Scalar (owl.Owl_distribution.Make.A.Scalar)

Module A.Scalar

val add : elt -> elt -> elt
val sub : elt -> elt -> elt
val mul : elt -> elt -> elt
val div : elt -> elt -> elt
val pow : elt -> elt -> elt
val atan2 : elt -> elt -> elt
val abs : elt -> elt
val neg : elt -> elt
val sqr : elt -> elt
val sqrt : elt -> elt
val exp : elt -> elt
val log : elt -> elt
val log2 : elt -> elt
val log10 : elt -> elt
val signum : elt -> elt
val floor : elt -> elt
val ceil : elt -> elt
val round : elt -> elt
val sin : elt -> elt
val cos : elt -> elt
val tan : elt -> elt
val sinh : elt -> elt
val cosh : elt -> elt
val tanh : elt -> elt
val asin : elt -> elt
val acos : elt -> elt
val atan : elt -> elt
val asinh : elt -> elt
val acosh : elt -> elt
val atanh : elt -> elt
val relu : elt -> elt
val dawsn : elt -> elt
val sigmoid : elt -> elt
\ No newline at end of file diff --git a/owl/Owl_distribution/Make/argument-1-A/index.html b/owl/Owl_distribution/Make/argument-1-A/index.html deleted file mode 100644 index 24855b14e..000000000 --- a/owl/Owl_distribution/Make/argument-1-A/index.html +++ /dev/null @@ -1,379 +0,0 @@ - -A (owl.Owl_distribution.Make.A)

Parameter Make.A

include Owl_types_stats_dist.Sig
include Owl_types_ndarray_mutable.Sig
include Owl_types_ndarray_algodiff.Sig
include Owl_types_ndarray_eltcmp.Sig
include Owl_types_ndarray_basic.Sig
type arr
type elt
val empty : int array -> arr
val zeros : int array -> arr
val ones : int array -> arr
val create : int array -> elt -> arr
val sequential : ?a:elt -> ?step:elt -> int array -> arr
val uniform : ?a:elt -> ?b:elt -> int array -> arr
val gaussian : ?mu:elt -> ?sigma:elt -> int array -> arr
val bernoulli : ?p:elt -> int array -> arr
val init : int array -> (int -> elt) -> arr
val init_nd : int array -> (int array -> elt) -> arr
val shape : arr -> int array
val numel : arr -> int
val get : arr -> int array -> elt
val set : arr -> int array -> elt -> unit
val get_slice : int list list -> arr -> arr
val set_slice : int list list -> arr -> arr -> unit
val get_fancy : Owl_types_common.index list -> arr -> arr
val set_fancy : Owl_types_common.index list -> arr -> arr -> unit
val copy : arr -> arr
val copy_ : out:arr -> arr -> unit
val reset : arr -> unit
val reshape : arr -> int array -> arr
val reverse : arr -> arr
val tile : arr -> int array -> arr
val repeat : arr -> int array -> arr
val concatenate : ?axis:int -> arr array -> arr
val stack : ?axis:int -> arr array -> arr
val split : ?axis:int -> int array -> arr -> arr array
val expand : ?hi:bool -> arr -> int -> arr
val squeeze : ?axis:int array -> arr -> arr
val draw : ?axis:int -> arr -> int -> arr * int array
val map : (elt -> elt) -> arr -> arr
val fold : ?axis:int -> (elt -> elt -> elt) -> elt -> arr -> arr
val scan : ?axis:int -> (elt -> elt -> elt) -> arr -> arr
val one_hot : int -> arr -> arr
val pad : ?v:elt -> int list list -> arr -> arr
val print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(elt -> string) -> - arr -> - unit
val abs : arr -> arr
val neg : arr -> arr
val floor : arr -> arr
val ceil : arr -> arr
val round : arr -> arr
val sqr : arr -> arr
val sqrt : arr -> arr
val log : arr -> arr
val log2 : arr -> arr
val log10 : arr -> arr
val exp : arr -> arr
val sin : arr -> arr
val cos : arr -> arr
val tan : arr -> arr
val sinh : arr -> arr
val cosh : arr -> arr
val tanh : arr -> arr
val asin : arr -> arr
val acos : arr -> arr
val atan : arr -> arr
val asinh : arr -> arr
val acosh : arr -> arr
val atanh : arr -> arr
val sum_reduce : ?axis:int array -> arr -> arr
val signum : arr -> arr
val sigmoid : arr -> arr
val relu : arr -> arr
val dawsn : arr -> arr
val min' : arr -> elt
val max' : arr -> elt
val sum' : arr -> elt
val log_sum_exp' : arr -> elt
val log_sum_exp : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm' : arr -> elt
val l2norm' : arr -> elt
val l2norm_sqr' : arr -> elt
val clip_by_value : ?amin:elt -> ?amax:elt -> arr -> arr
val clip_by_l2norm : elt -> arr -> arr
val pow : arr -> arr -> arr
val scalar_pow : elt -> arr -> arr
val pow_scalar : arr -> elt -> arr
val atan2 : arr -> arr -> arr
val scalar_atan2 : elt -> arr -> arr
val atan2_scalar : arr -> elt -> arr
val add : arr -> arr -> arr
val sub : arr -> arr -> arr
val mul : arr -> arr -> arr
val div : arr -> arr -> arr
val add_scalar : arr -> elt -> arr
val sub_scalar : arr -> elt -> arr
val mul_scalar : arr -> elt -> arr
val div_scalar : arr -> elt -> arr
val scalar_add : elt -> arr -> arr
val scalar_sub : elt -> arr -> arr
val scalar_mul : elt -> arr -> arr
val scalar_div : elt -> arr -> arr
val fma : arr -> arr -> arr -> arr
val conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val dilated_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val transpose_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val max_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val upsampling2d : arr -> int array -> arr
val conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val dilated_conv1d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv1d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val transpose_conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val max_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val upsampling2d_backward : arr -> int array -> arr -> arr
val row_num : arr -> int
val col_num : arr -> int
val row : arr -> int -> arr
val rows : arr -> int array -> arr
val copy_row_to : arr -> arr -> int -> unit
val copy_col_to : arr -> arr -> int -> unit
val dot : arr -> arr -> arr
val diag : ?k:int -> arr -> arr
val trace : arr -> elt
val transpose : ?axis:int array -> arr -> arr
val to_rows : arr -> arr array
val of_rows : arr array -> arr
val to_cols : arr -> arr array
val of_cols : arr array -> arr
val of_array : elt array -> int array -> arr
val of_arrays : elt array array -> arr
val float_to_elt : float -> elt
val elt_to_float : elt -> float
val elt_equal : arr -> arr -> arr
val elt_not_equal : arr -> arr -> arr
val elt_less : arr -> arr -> arr
val elt_greater : arr -> arr -> arr
val elt_less_equal : arr -> arr -> arr
val elt_greater_equal : arr -> arr -> arr
val elt_equal_scalar : arr -> elt -> arr
val elt_not_equal_scalar : arr -> elt -> arr
val elt_less_scalar : arr -> elt -> arr
val elt_greater_scalar : arr -> elt -> arr
val elt_less_equal_scalar : arr -> elt -> arr
val elt_greater_equal_scalar : arr -> elt -> arr
module Scalar : sig ... end
module Mat : sig ... end
module Linalg : sig ... end
val create_ : out:arr -> elt -> unit
val uniform_ : ?a:elt -> ?b:elt -> out:arr -> unit
val gaussian_ : ?mu:elt -> ?sigma:elt -> out:arr -> unit
val sequential_ : ?a:elt -> ?step:elt -> out:arr -> unit
val bernoulli_ : ?p:elt -> out:arr -> unit
val zeros_ : out:arr -> unit
val ones_ : out:arr -> unit
val one_hot_ : out:arr -> int -> arr -> unit
val get_slice_ : out:arr -> int list list -> arr -> unit
val set_slice_ : out:arr -> int list list -> arr -> arr -> unit
val sub_left : arr -> int -> int -> arr
val reshape_ : out:arr -> arr -> unit
val reverse_ : out:arr -> arr -> unit
val transpose_ : out:arr -> ?axis:int array -> arr -> unit
val repeat_ : out:arr -> arr -> int array -> unit
val tile_ : out:arr -> arr -> int array -> unit
val pad_ : out:arr -> ?v:elt -> int list list -> arr -> unit
val hypot : arr -> arr -> arr
val fmod : arr -> arr -> arr
val min2 : arr -> arr -> arr
val max2 : arr -> arr -> arr
val add_ : ?out:arr -> arr -> arr -> unit
val sub_ : ?out:arr -> arr -> arr -> unit
val mul_ : ?out:arr -> arr -> arr -> unit
val div_ : ?out:arr -> arr -> arr -> unit
val pow_ : ?out:arr -> arr -> arr -> unit
val atan2_ : ?out:arr -> arr -> arr -> unit
val hypot_ : ?out:arr -> arr -> arr -> unit
val fmod_ : ?out:arr -> arr -> arr -> unit
val min2_ : ?out:arr -> arr -> arr -> unit
val max2_ : ?out:arr -> arr -> arr -> unit
val add_scalar_ : ?out:arr -> arr -> elt -> unit
val sub_scalar_ : ?out:arr -> arr -> elt -> unit
val mul_scalar_ : ?out:arr -> arr -> elt -> unit
val div_scalar_ : ?out:arr -> arr -> elt -> unit
val pow_scalar_ : ?out:arr -> arr -> elt -> unit
val atan2_scalar_ : ?out:arr -> arr -> elt -> unit
val fmod_scalar_ : ?out:arr -> arr -> elt -> unit
val scalar_add_ : ?out:arr -> elt -> arr -> unit
val scalar_sub_ : ?out:arr -> elt -> arr -> unit
val scalar_mul_ : ?out:arr -> elt -> arr -> unit
val scalar_div_ : ?out:arr -> elt -> arr -> unit
val scalar_pow_ : ?out:arr -> elt -> arr -> unit
val scalar_atan2_ : ?out:arr -> elt -> arr -> unit
val scalar_fmod_ : ?out:arr -> elt -> arr -> unit
val fma_ : ?out:arr -> arr -> arr -> arr -> unit
val clip_by_value_ : ?out:arr -> ?amin:elt -> ?amax:elt -> arr -> unit
val clip_by_l2norm_ : ?out:arr -> elt -> arr -> unit
val dot_ : - ?transa:bool -> - ?transb:bool -> - ?alpha:elt -> - ?beta:elt -> - c:arr -> - arr -> - arr -> - unit
val abs_ : ?out:arr -> arr -> unit
val neg_ : ?out:arr -> arr -> unit
val conj_ : ?out:arr -> arr -> unit
val reci_ : ?out:arr -> arr -> unit
val signum_ : ?out:arr -> arr -> unit
val sqr_ : ?out:arr -> arr -> unit
val sqrt_ : ?out:arr -> arr -> unit
val cbrt_ : ?out:arr -> arr -> unit
val exp_ : ?out:arr -> arr -> unit
val exp2_ : ?out:arr -> arr -> unit
val exp10_ : ?out:arr -> arr -> unit
val expm1_ : ?out:arr -> arr -> unit
val log_ : ?out:arr -> arr -> unit
val log2_ : ?out:arr -> arr -> unit
val log10_ : ?out:arr -> arr -> unit
val log1p_ : ?out:arr -> arr -> unit
val sin_ : ?out:arr -> arr -> unit
val cos_ : ?out:arr -> arr -> unit
val tan_ : ?out:arr -> arr -> unit
val asin_ : ?out:arr -> arr -> unit
val acos_ : ?out:arr -> arr -> unit
val atan_ : ?out:arr -> arr -> unit
val sinh_ : ?out:arr -> arr -> unit
val cosh_ : ?out:arr -> arr -> unit
val tanh_ : ?out:arr -> arr -> unit
val asinh_ : ?out:arr -> arr -> unit
val acosh_ : ?out:arr -> arr -> unit
val atanh_ : ?out:arr -> arr -> unit
val floor_ : ?out:arr -> arr -> unit
val ceil_ : ?out:arr -> arr -> unit
val round_ : ?out:arr -> arr -> unit
val trunc_ : ?out:arr -> arr -> unit
val fix_ : ?out:arr -> arr -> unit
val erf_ : ?out:arr -> arr -> unit
val erfc_ : ?out:arr -> arr -> unit
val relu_ : ?out:arr -> arr -> unit
val softplus_ : ?out:arr -> arr -> unit
val softsign_ : ?out:arr -> arr -> unit
val softmax_ : ?out:arr -> ?axis:int -> arr -> unit
val sigmoid_ : ?out:arr -> arr -> unit
val sum_ : out:arr -> axis:int -> arr -> unit
val min_ : out:arr -> axis:int -> arr -> unit
val max_ : out:arr -> axis:int -> arr -> unit
val sum : ?axis:int -> ?keep_dims:bool -> arr -> arr
val prod : ?axis:int -> ?keep_dims:bool -> arr -> arr
val min : ?axis:int -> ?keep_dims:bool -> arr -> arr
val max : ?axis:int -> ?keep_dims:bool -> arr -> arr
val mean : ?axis:int -> ?keep_dims:bool -> arr -> arr
val var : ?axis:int -> ?keep_dims:bool -> arr -> arr
val std : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l2norm : ?axis:int -> ?keep_dims:bool -> arr -> arr
val cumsum_ : ?out:arr -> ?axis:int -> arr -> unit
val cumprod_ : ?out:arr -> ?axis:int -> arr -> unit
val cummin_ : ?out:arr -> ?axis:int -> arr -> unit
val cummax_ : ?out:arr -> ?axis:int -> arr -> unit
val dropout_ : ?out:arr -> ?rate:float -> arr -> unit
val prod' : arr -> elt
val mean' : arr -> elt
val var' : arr -> elt
val std' : arr -> elt
val elt_equal_ : ?out:arr -> arr -> arr -> unit
val elt_not_equal_ : ?out:arr -> arr -> arr -> unit
val elt_less_ : ?out:arr -> arr -> arr -> unit
val elt_greater_ : ?out:arr -> arr -> arr -> unit
val elt_less_equal_ : ?out:arr -> arr -> arr -> unit
val elt_greater_equal_ : ?out:arr -> arr -> arr -> unit
val elt_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_not_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_less_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_greater_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_less_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val elt_greater_equal_scalar_ : ?out:arr -> arr -> elt -> unit
val conv1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val conv2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val conv3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val dilated_conv1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val dilated_conv2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val dilated_conv3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - unit
val transpose_conv1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val transpose_conv2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val transpose_conv3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - unit
val max_pool1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val max_pool2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val max_pool3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool1d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool2d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val avg_pool3d_ : - out:arr -> - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - unit
val upsampling2d_ : out:arr -> arr -> int array -> unit
val conv1d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv1d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv2d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv2d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv3d_backward_input_ : out:arr -> arr -> arr -> int array -> arr -> unit
val conv3d_backward_kernel_ : out:arr -> arr -> arr -> int array -> arr -> unit
val dilated_conv1d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv1d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv2d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv2d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv3d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val dilated_conv3d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - int array -> - arr -> - unit
val transpose_conv1d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv1d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv2d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv2d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv3d_backward_input_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val transpose_conv3d_backward_kernel_ : - out:arr -> - arr -> - arr -> - int array -> - arr -> - unit
val max_pool1d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val max_pool2d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val max_pool3d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool1d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool2d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val avg_pool3d_backward_ : - out:arr -> - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - unit
val upsampling2d_backward_ : out:arr -> arr -> int array -> arr -> unit
val fused_adagrad_ : ?out:arr -> rate:float -> eps:float -> arr -> unit
val uniform_rvs : a:arr -> b:arr -> n:int -> arr
val uniform_pdf : a:arr -> b:arr -> arr -> arr
val uniform_logpdf : a:arr -> b:arr -> arr -> arr
val uniform_cdf : a:arr -> b:arr -> arr -> arr
val uniform_logcdf : a:arr -> b:arr -> arr -> arr
val uniform_ppf : a:arr -> b:arr -> arr -> arr
val uniform_sf : a:arr -> b:arr -> arr -> arr
val uniform_logsf : a:arr -> b:arr -> arr -> arr
val uniform_isf : a:arr -> b:arr -> arr -> arr
val gaussian_rvs : mu:arr -> sigma:arr -> n:int -> arr
val gaussian_pdf : mu:arr -> sigma:arr -> arr -> arr
val gaussian_logpdf : mu:arr -> sigma:arr -> arr -> arr
val gaussian_cdf : mu:arr -> sigma:arr -> arr -> arr
val gaussian_logcdf : mu:arr -> sigma:arr -> arr -> arr
val gaussian_ppf : mu:arr -> sigma:arr -> arr -> arr
val gaussian_sf : mu:arr -> sigma:arr -> arr -> arr
val gaussian_logsf : mu:arr -> sigma:arr -> arr -> arr
val gaussian_isf : mu:arr -> sigma:arr -> arr -> arr
val exponential_rvs : lambda:arr -> n:int -> arr
val exponential_pdf : lambda:arr -> arr -> arr
val exponential_logpdf : lambda:arr -> arr -> arr
val exponential_cdf : lambda:arr -> arr -> arr
val exponential_logcdf : lambda:arr -> arr -> arr
val exponential_ppf : lambda:arr -> arr -> arr
val exponential_sf : lambda:arr -> arr -> arr
val exponential_logsf : lambda:arr -> arr -> arr
val exponential_isf : lambda:arr -> arr -> arr
val poisson_rvs : mu:arr -> n:int -> arr
val gamma_rvs : shape:arr -> scale:arr -> n:int -> arr
val gamma_pdf : shape:arr -> scale:arr -> arr -> arr
val gamma_logpdf : shape:arr -> scale:arr -> arr -> arr
val gamma_cdf : shape:arr -> scale:arr -> arr -> arr
val gamma_logcdf : shape:arr -> scale:arr -> arr -> arr
val gamma_ppf : shape:arr -> scale:arr -> arr -> arr
val gamma_sf : shape:arr -> scale:arr -> arr -> arr
val gamma_logsf : shape:arr -> scale:arr -> arr -> arr
val gamma_isf : shape:arr -> scale:arr -> arr -> arr
val beta_rvs : a:arr -> b:arr -> n:int -> arr
val beta_pdf : a:arr -> b:arr -> arr -> arr
val beta_logpdf : a:arr -> b:arr -> arr -> arr
val beta_cdf : a:arr -> b:arr -> arr -> arr
val beta_logcdf : a:arr -> b:arr -> arr -> arr
val beta_ppf : a:arr -> b:arr -> arr -> arr
val beta_sf : a:arr -> b:arr -> arr -> arr
val beta_logsf : a:arr -> b:arr -> arr -> arr
val beta_isf : a:arr -> b:arr -> arr -> arr
val chi2_rvs : df:arr -> n:int -> arr
val chi2_pdf : df:arr -> arr -> arr
val chi2_logpdf : df:arr -> arr -> arr
val chi2_cdf : df:arr -> arr -> arr
val chi2_logcdf : df:arr -> arr -> arr
val chi2_ppf : df:arr -> arr -> arr
val chi2_sf : df:arr -> arr -> arr
val chi2_logsf : df:arr -> arr -> arr
val chi2_isf : df:arr -> arr -> arr
val f_rvs : dfnum:arr -> dfden:arr -> n:int -> arr
val f_pdf : dfnum:arr -> dfden:arr -> arr -> arr
val f_logpdf : dfnum:arr -> dfden:arr -> arr -> arr
val f_cdf : dfnum:arr -> dfden:arr -> arr -> arr
val f_logcdf : dfnum:arr -> dfden:arr -> arr -> arr
val f_ppf : dfnum:arr -> dfden:arr -> arr -> arr
val f_sf : dfnum:arr -> dfden:arr -> arr -> arr
val f_logsf : dfnum:arr -> dfden:arr -> arr -> arr
val f_isf : dfnum:arr -> dfden:arr -> arr -> arr
val cauchy_rvs : loc:arr -> scale:arr -> n:int -> arr
val cauchy_pdf : loc:arr -> scale:arr -> arr -> arr
val cauchy_logpdf : loc:arr -> scale:arr -> arr -> arr
val cauchy_cdf : loc:arr -> scale:arr -> arr -> arr
val cauchy_logcdf : loc:arr -> scale:arr -> arr -> arr
val cauchy_ppf : loc:arr -> scale:arr -> arr -> arr
val cauchy_sf : loc:arr -> scale:arr -> arr -> arr
val cauchy_logsf : loc:arr -> scale:arr -> arr -> arr
val cauchy_isf : loc:arr -> scale:arr -> arr -> arr
val lomax_rvs : shape:arr -> scale:arr -> n:int -> arr
val lomax_pdf : shape:arr -> scale:arr -> arr -> arr
val lomax_logpdf : shape:arr -> scale:arr -> arr -> arr
val lomax_cdf : shape:arr -> scale:arr -> arr -> arr
val lomax_logcdf : shape:arr -> scale:arr -> arr -> arr
val lomax_ppf : shape:arr -> scale:arr -> arr -> arr
val lomax_sf : shape:arr -> scale:arr -> arr -> arr
val lomax_logsf : shape:arr -> scale:arr -> arr -> arr
val lomax_isf : shape:arr -> scale:arr -> arr -> arr
val weibull_rvs : shape:arr -> scale:arr -> n:int -> arr
val weibull_pdf : shape:arr -> scale:arr -> arr -> arr
val weibull_logpdf : shape:arr -> scale:arr -> arr -> arr
val weibull_cdf : shape:arr -> scale:arr -> arr -> arr
val weibull_logcdf : shape:arr -> scale:arr -> arr -> arr
val weibull_ppf : shape:arr -> scale:arr -> arr -> arr
val weibull_sf : shape:arr -> scale:arr -> arr -> arr
val weibull_logsf : shape:arr -> scale:arr -> arr -> arr
val weibull_isf : shape:arr -> scale:arr -> arr -> arr
val laplace_rvs : loc:arr -> scale:arr -> n:int -> arr
val laplace_pdf : loc:arr -> scale:arr -> arr -> arr
val laplace_logpdf : loc:arr -> scale:arr -> arr -> arr
val laplace_cdf : loc:arr -> scale:arr -> arr -> arr
val laplace_logcdf : loc:arr -> scale:arr -> arr -> arr
val laplace_ppf : loc:arr -> scale:arr -> arr -> arr
val laplace_sf : loc:arr -> scale:arr -> arr -> arr
val laplace_logsf : loc:arr -> scale:arr -> arr -> arr
val laplace_isf : loc:arr -> scale:arr -> arr -> arr
val gumbel1_rvs : a:arr -> b:arr -> n:int -> arr
val gumbel1_pdf : a:arr -> b:arr -> arr -> arr
val gumbel1_logpdf : a:arr -> b:arr -> arr -> arr
val gumbel1_cdf : a:arr -> b:arr -> arr -> arr
val gumbel1_logcdf : a:arr -> b:arr -> arr -> arr
val gumbel1_ppf : a:arr -> b:arr -> arr -> arr
val gumbel1_sf : a:arr -> b:arr -> arr -> arr
val gumbel1_logsf : a:arr -> b:arr -> arr -> arr
val gumbel1_isf : a:arr -> b:arr -> arr -> arr
val gumbel2_rvs : a:arr -> b:arr -> n:int -> arr
val gumbel2_pdf : a:arr -> b:arr -> arr -> arr
val gumbel2_logpdf : a:arr -> b:arr -> arr -> arr
val gumbel2_cdf : a:arr -> b:arr -> arr -> arr
val gumbel2_logcdf : a:arr -> b:arr -> arr -> arr
val gumbel2_ppf : a:arr -> b:arr -> arr -> arr
val gumbel2_sf : a:arr -> b:arr -> arr -> arr
val gumbel2_logsf : a:arr -> b:arr -> arr -> arr
val gumbel2_isf : a:arr -> b:arr -> arr -> arr
val logistic_rvs : loc:arr -> scale:arr -> n:int -> arr
val logistic_pdf : loc:arr -> scale:arr -> arr -> arr
val logistic_logpdf : loc:arr -> scale:arr -> arr -> arr
val logistic_cdf : loc:arr -> scale:arr -> arr -> arr
val logistic_logcdf : loc:arr -> scale:arr -> arr -> arr
val logistic_ppf : loc:arr -> scale:arr -> arr -> arr
val logistic_sf : loc:arr -> scale:arr -> arr -> arr
val logistic_logsf : loc:arr -> scale:arr -> arr -> arr
val logistic_isf : loc:arr -> scale:arr -> arr -> arr
val lognormal_rvs : mu:arr -> sigma:arr -> n:int -> arr
val lognormal_pdf : mu:arr -> sigma:arr -> arr -> arr
val lognormal_logpdf : mu:arr -> sigma:arr -> arr -> arr
val lognormal_cdf : mu:arr -> sigma:arr -> arr -> arr
val lognormal_logcdf : mu:arr -> sigma:arr -> arr -> arr
val lognormal_ppf : mu:arr -> sigma:arr -> arr -> arr
val lognormal_sf : mu:arr -> sigma:arr -> arr -> arr
val lognormal_logsf : mu:arr -> sigma:arr -> arr -> arr
val lognormal_isf : mu:arr -> sigma:arr -> arr -> arr
val rayleigh_rvs : sigma:arr -> n:int -> arr
val rayleigh_pdf : sigma:arr -> arr -> arr
val rayleigh_logpdf : sigma:arr -> arr -> arr
val rayleigh_cdf : sigma:arr -> arr -> arr
val rayleigh_logcdf : sigma:arr -> arr -> arr
val rayleigh_ppf : sigma:arr -> arr -> arr
val rayleigh_sf : sigma:arr -> arr -> arr
val rayleigh_logsf : sigma:arr -> arr -> arr
val rayleigh_isf : sigma:arr -> arr -> arr
\ No newline at end of file diff --git a/owl/Owl_distribution/Make/index.html b/owl/Owl_distribution/Make/index.html deleted file mode 100644 index 572391259..000000000 --- a/owl/Owl_distribution/Make/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Make (owl.Owl_distribution.Make)

Module Owl_distribution.Make

Parameters

Signature

Uniform distribution
module Uniform : sig ... end
Gaussian distribution
module Gaussian : sig ... end
Exponential distribution
module Exponential : sig ... end
Poisson distribution
module Poisson : sig ... end
Gamma distribution
module Gamma : sig ... end
Beta distribution
module Beta : sig ... end
Chi2 distribution
module Chi2 : sig ... end
F distribution
module F : sig ... end
Cauchy distribution
module Cauchy : sig ... end
Lomax distribution
module Lomax : sig ... end
Weibull distribution
module Weibull : sig ... end
Laplace distribution
module Laplace : sig ... end
Gumbel1 distribution
module Gumbel1 : sig ... end
Gumbel2 distribution
module Gumbel2 : sig ... end
Logistic distribution
module Logistic : sig ... end
Lognormal distribution
module Lognormal : sig ... end
Rayleigh distribution
module Rayleigh : sig ... end
Type definition
type dist =
  1. | Uniform of Uniform.t
  2. | Gaussian of Gaussian.t
  3. | Exponential of Exponential.t
  4. | Gamma of Gamma.t
  5. | Beta of Beta.t
  6. | Chi2 of Chi2.t
  7. | F of F.t
  8. | Cauchy of Cauchy.t
  9. | Lomax of Lomax.t
  10. | Weibull of Weibull.t
  11. | Laplace of Laplace.t
  12. | Gumbel1 of Gumbel1.t
  13. | Gumbel2 of Gumbel2.t
  14. | Logistic of Logistic.t
  15. | Lognormal of Lognormal.t
  16. | Rayleigh of Rayleigh.t
    (*

    Type definition of various distributions

    *)
Core functions
val sample : dist -> int -> A.arr

Sample a given distribution of the given parameters.

val prob : dist -> A.arr -> A.arr

Probability density/mass function of a given distribution.

val log_prob : dist -> A.arr -> A.arr

logarithmic probability density/mass function of a given distribution.

val cdf : dist -> A.arr -> A.arr

Cumulative density/mass function of the distribution.

val logcdf : dist -> A.arr -> A.arr

Logarithm of the cumulative density/mass function of the distribution.

\ No newline at end of file diff --git a/owl/Owl_distribution_common/.dummy b/owl/Owl_distribution_common/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl/Owl_distribution_generic/.dummy b/owl/Owl_distribution_generic/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl/Owl_fft/.dummy b/owl/Owl_fft/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl/Owl_fft/D/index.html b/owl/Owl_fft/D/index.html deleted file mode 100644 index a91527c4c..000000000 --- a/owl/Owl_fft/D/index.html +++ /dev/null @@ -1,19 +0,0 @@ - -D (owl.Owl_fft.D)

Module Owl_fft.D

include module type of struct include Owl_fft_d end
val fft : - ?axis:int -> - (Stdlib.Complex.t, Stdlib.Bigarray.complex64_elt) Owl_dense_ndarray_generic.t -> - (Stdlib.Complex.t, Stdlib.Bigarray.complex64_elt) Owl_dense_ndarray_generic.t
val ifft : - ?axis:int -> - (Stdlib.Complex.t, Stdlib.Bigarray.complex64_elt) Owl_dense_ndarray_generic.t -> - (Stdlib.Complex.t, Stdlib.Bigarray.complex64_elt) Owl_dense_ndarray_generic.t
val rfft : - ?axis:int -> - (float, Stdlib.Bigarray.float64_elt) Owl_dense_ndarray_generic.t -> - (Stdlib.Complex.t, Stdlib.Bigarray.complex64_elt) Owl_dense_ndarray_generic.t
val irfft : - ?axis:int -> - ?n:int -> - (Stdlib.Complex.t, Stdlib.Bigarray.complex64_elt) Owl_dense_ndarray_generic.t -> - (float, Stdlib.Bigarray.float64_elt) Owl_dense_ndarray_generic.t
val fft2 : - (Stdlib.Complex.t, Stdlib.Bigarray.complex64_elt) Owl_dense_ndarray_generic.t -> - (Stdlib.Complex.t, Stdlib.Bigarray.complex64_elt) Owl_dense_ndarray_generic.t
val ifft2 : - (Stdlib.Complex.t, Stdlib.Bigarray.complex64_elt) Owl_dense_ndarray_generic.t -> - (Stdlib.Complex.t, Stdlib.Bigarray.complex64_elt) Owl_dense_ndarray_generic.t
\ No newline at end of file diff --git a/owl/Owl_fft/Generic/index.html b/owl/Owl_fft/Generic/index.html deleted file mode 100644 index f00466ab8..000000000 --- a/owl/Owl_fft/Generic/index.html +++ /dev/null @@ -1,21 +0,0 @@ - -Generic (owl.Owl_fft.Generic)

Module Owl_fft.Generic

include module type of struct include Owl_fft_generic end
Basic functions
val fft : - ?axis:int -> - (Stdlib.Complex.t, 'a) Owl_dense_ndarray_generic.t -> - (Stdlib.Complex.t, 'a) Owl_dense_ndarray_generic.t

fft ~axis x performs 1-dimensional FFT on a complex input. axis is the highest dimension if not specified. The return is not scaled.

val ifft : - ?axis:int -> - (Stdlib.Complex.t, 'a) Owl_dense_ndarray_generic.t -> - (Stdlib.Complex.t, 'a) Owl_dense_ndarray_generic.t

ifft ~axis x performs inverse 1-dimensional FFT on a complex input. axis is the highest dimension by default.

val rfft : - ?axis:int -> - otyp:(Stdlib.Complex.t, 'a) Owl_dense_ndarray_generic.kind -> - (float, 'b) Owl_dense_ndarray_generic.t -> - (Stdlib.Complex.t, 'a) Owl_dense_ndarray_generic.t

rfft ~axis ~otyp x performs 1-dimensional FFT on real input along the axis. otyp is used to specify the output type, it must be the consistent precision with input x. You can skip this parameter by using a submodule with specific precision such as Owl.Fft.S or Owl.Fft.D.

val irfft : - ?axis:int -> - ?n:int -> - otyp:(float, 'a) Owl_dense_ndarray_generic.kind -> - (Stdlib.Complex.t, 'b) Owl_dense_ndarray_generic.t -> - (float, 'a) Owl_dense_ndarray_generic.t

irfft ~axis ~n x is the inverse function of rfft. Note the n parameter is used to specified the size of output.

val fft2 : - (Stdlib.Complex.t, 'a) Owl_dense_ndarray_generic.t -> - (Stdlib.Complex.t, 'a) Owl_dense_ndarray_generic.t

fft2 x performs 2-dimensional FFT on a complex input. The return is not scaled.

val ifft2 : - (Stdlib.Complex.t, 'a) Owl_dense_ndarray_generic.t -> - (Stdlib.Complex.t, 'a) Owl_dense_ndarray_generic.t

ifft2 x performs inverse 2-dimensional FFT on a complex input.

\ No newline at end of file diff --git a/owl/Owl_fft/S/index.html b/owl/Owl_fft/S/index.html deleted file mode 100644 index c43a71d4c..000000000 --- a/owl/Owl_fft/S/index.html +++ /dev/null @@ -1,19 +0,0 @@ - -S (owl.Owl_fft.S)

Module Owl_fft.S

include module type of struct include Owl_fft_s end
val fft : - ?axis:int -> - (Stdlib.Complex.t, Stdlib.Bigarray.complex32_elt) Owl_dense_ndarray_generic.t -> - (Stdlib.Complex.t, Stdlib.Bigarray.complex32_elt) Owl_dense_ndarray_generic.t
val ifft : - ?axis:int -> - (Stdlib.Complex.t, Stdlib.Bigarray.complex32_elt) Owl_dense_ndarray_generic.t -> - (Stdlib.Complex.t, Stdlib.Bigarray.complex32_elt) Owl_dense_ndarray_generic.t
val rfft : - ?axis:int -> - (float, Stdlib.Bigarray.float32_elt) Owl_dense_ndarray_generic.t -> - (Stdlib.Complex.t, Stdlib.Bigarray.complex32_elt) Owl_dense_ndarray_generic.t
val irfft : - ?axis:int -> - ?n:int -> - (Stdlib.Complex.t, Stdlib.Bigarray.complex32_elt) Owl_dense_ndarray_generic.t -> - (float, Stdlib.Bigarray.float32_elt) Owl_dense_ndarray_generic.t
val fft2 : - (Stdlib.Complex.t, Stdlib.Bigarray.complex32_elt) Owl_dense_ndarray_generic.t -> - (Stdlib.Complex.t, Stdlib.Bigarray.complex32_elt) Owl_dense_ndarray_generic.t
val ifft2 : - (Stdlib.Complex.t, Stdlib.Bigarray.complex32_elt) Owl_dense_ndarray_generic.t -> - (Stdlib.Complex.t, Stdlib.Bigarray.complex32_elt) Owl_dense_ndarray_generic.t
\ No newline at end of file diff --git a/owl/Owl_fft_d/.dummy b/owl/Owl_fft_d/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl/Owl_fft_generic/.dummy b/owl/Owl_fft_generic/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl/Owl_fft_s/.dummy b/owl/Owl_fft_s/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl/Owl_fftpack/.dummy b/owl/Owl_fftpack/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl/Owl_lapacke/.dummy b/owl/Owl_lapacke/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl/Owl_lapacke_generated/.dummy b/owl/Owl_lapacke_generated/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl/Owl_linalg/.dummy b/owl/Owl_linalg/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl/Owl_linalg/C/index.html b/owl/Owl_linalg/C/index.html deleted file mode 100644 index a5e67b4e8..000000000 --- a/owl/Owl_linalg/C/index.html +++ /dev/null @@ -1,25 +0,0 @@ - -C (owl.Owl_linalg.C)

Module Owl_linalg.C

include module type of struct include Owl_linalg_c end
type elt = Stdlib.Complex.t
type int32_mat = (int32, Stdlib.Bigarray.int32_elt) Owl_dense_matrix_generic.t
include Owl_linalg_intf.Common - with type elt := elt - and type mat := mat - and type complex_mat = mat - and type int32_mat := int32_mat
include Owl_base_linalg_intf.Common - with type elt := elt - with type mat := mat - with type complex_mat = mat - with type int32_mat := int32_mat
type complex_mat = mat
Basic functions
val inv : mat -> mat
val det : mat -> elt
val logdet : mat -> elt
val is_triu : mat -> bool
val is_tril : mat -> bool
val is_symmetric : mat -> bool
val is_diag : mat -> bool
Factorisation
val svd : ?thin:bool -> mat -> mat * mat * mat
val chol : ?upper:bool -> mat -> mat
val qr : ?thin:bool -> ?pivot:bool -> mat -> mat * mat * int32_mat
val lq : ?thin:bool -> mat -> mat * mat
Linear system of equations
val linsolve : ?trans:bool -> ?typ:[ `n | `u | `l ] -> mat -> mat -> mat
val sylvester : mat -> mat -> mat -> mat
val lyapunov : mat -> mat -> mat
val discrete_lyapunov : - ?solver:[ `default | `direct | `bilinear ] -> - mat -> - mat -> - mat
Basic functions
val pinv : ?tol:float -> mat -> mat
val rank : ?tol:float -> mat -> int
val norm : ?p:float -> mat -> float
val vecnorm : ?p:float -> mat -> float
val cond : ?p:float -> mat -> float
val rcond : mat -> float
val is_posdef : mat -> bool
Factorisation
val lu : mat -> mat * mat * int32_mat
val svdvals : mat -> mat
val gsvd : mat -> mat -> mat * mat * mat * mat * mat * mat
val gsvdvals : mat -> mat -> mat
val schur : mat -> mat * mat * complex_mat
val schur_tz : mat -> mat * mat
val ordschur : select:int32_mat -> mat -> mat -> mat * mat * complex_mat
val qz : mat -> mat -> mat * mat * mat * mat * complex_mat
val ordqz : - select:int32_mat -> - mat -> - mat -> - mat -> - mat -> - mat * mat * mat * mat * complex_mat
val qzvals : mat -> mat -> complex_mat
val hess : mat -> mat * mat
Eigenvalues & eigenvectors
val eig : ?permute:bool -> ?scale:bool -> mat -> complex_mat * complex_mat
val eigvals : ?permute:bool -> ?scale:bool -> mat -> complex_mat
Linear system of equations
val null : mat -> mat
val triangular_solve : upper:bool -> ?trans:bool -> mat -> mat -> mat
val linreg : mat -> mat -> elt * elt
Low-level factorisation functions
val lufact : mat -> mat * int32_mat
val qrfact : ?pivot:bool -> mat -> mat * mat * int32_mat
val bkfact : - ?upper:bool -> - ?symmetric:bool -> - ?rook:bool -> - mat -> - mat * int32_mat
Matrix functions
val mpow : mat -> float -> mat
val expm : mat -> mat
val sinm : mat -> mat
val cosm : mat -> mat
val tanm : mat -> mat
val sincosm : mat -> mat * mat
val sinhm : mat -> mat
val coshm : mat -> mat
val tanhm : mat -> mat
val sinhcoshm : mat -> mat * mat
Helper functions
val select_ev : [ `LHP | `RHP | `UDI | `UDO ] -> mat -> int32_mat
val peakflops : ?n:int -> unit -> float
\ No newline at end of file diff --git a/owl/Owl_linalg/D/index.html b/owl/Owl_linalg/D/index.html deleted file mode 100644 index fca25e436..000000000 --- a/owl/Owl_linalg/D/index.html +++ /dev/null @@ -1,25 +0,0 @@ - -D (owl.Owl_linalg.D)

Module Owl_linalg.D

include module type of struct include Owl_linalg_d end
type elt = float
type complex_mat = Owl_dense_matrix_z.mat
type int32_mat = (int32, Stdlib.Bigarray.int32_elt) Owl_dense_matrix_generic.t
include Owl_linalg_intf.Common - with type elt := elt - and type mat := mat - and type complex_mat := complex_mat - and type int32_mat := int32_mat
include Owl_base_linalg_intf.Common - with type elt := elt - with type mat := mat - with type complex_mat := complex_mat - with type int32_mat := int32_mat
Basic functions
val inv : mat -> mat
val det : mat -> elt
val logdet : mat -> elt
val is_triu : mat -> bool
val is_tril : mat -> bool
val is_symmetric : mat -> bool
val is_diag : mat -> bool
Factorisation
val svd : ?thin:bool -> mat -> mat * mat * mat
val chol : ?upper:bool -> mat -> mat
val qr : ?thin:bool -> ?pivot:bool -> mat -> mat * mat * int32_mat
val lq : ?thin:bool -> mat -> mat * mat
Linear system of equations
val linsolve : ?trans:bool -> ?typ:[ `n | `u | `l ] -> mat -> mat -> mat
val sylvester : mat -> mat -> mat -> mat
val lyapunov : mat -> mat -> mat
val discrete_lyapunov : - ?solver:[ `default | `direct | `bilinear ] -> - mat -> - mat -> - mat
Basic functions
val pinv : ?tol:float -> mat -> mat
val rank : ?tol:float -> mat -> int
val norm : ?p:float -> mat -> float
val vecnorm : ?p:float -> mat -> float
val cond : ?p:float -> mat -> float
val rcond : mat -> float
val is_posdef : mat -> bool
Factorisation
val lu : mat -> mat * mat * int32_mat
val svdvals : mat -> mat
val gsvd : mat -> mat -> mat * mat * mat * mat * mat * mat
val gsvdvals : mat -> mat -> mat
val schur : mat -> mat * mat * complex_mat
val schur_tz : mat -> mat * mat
val ordschur : select:int32_mat -> mat -> mat -> mat * mat * complex_mat
val qz : mat -> mat -> mat * mat * mat * mat * complex_mat
val ordqz : - select:int32_mat -> - mat -> - mat -> - mat -> - mat -> - mat * mat * mat * mat * complex_mat
val qzvals : mat -> mat -> complex_mat
val hess : mat -> mat * mat
Eigenvalues & eigenvectors
val eig : ?permute:bool -> ?scale:bool -> mat -> complex_mat * complex_mat
val eigvals : ?permute:bool -> ?scale:bool -> mat -> complex_mat
Linear system of equations
val null : mat -> mat
val triangular_solve : upper:bool -> ?trans:bool -> mat -> mat -> mat
val linreg : mat -> mat -> elt * elt
Low-level factorisation functions
val lufact : mat -> mat * int32_mat
val qrfact : ?pivot:bool -> mat -> mat * mat * int32_mat
val bkfact : - ?upper:bool -> - ?symmetric:bool -> - ?rook:bool -> - mat -> - mat * int32_mat
Matrix functions
val mpow : mat -> float -> mat
val expm : mat -> mat
val sinm : mat -> mat
val cosm : mat -> mat
val tanm : mat -> mat
val sincosm : mat -> mat * mat
val sinhm : mat -> mat
val coshm : mat -> mat
val tanhm : mat -> mat
val sinhcoshm : mat -> mat * mat
Helper functions
val select_ev : [ `LHP | `RHP | `UDI | `UDO ] -> mat -> int32_mat
val peakflops : ?n:int -> unit -> float
include Owl_linalg_intf.Real with type mat := mat and type elt := elt
include Owl_base_linalg_intf.Real with type mat := mat with type elt := elt
val care : ?diag_r:bool -> mat -> mat -> mat -> mat -> mat
val dare : ?diag_r:bool -> mat -> mat -> mat -> mat -> mat
\ No newline at end of file diff --git a/owl/Owl_linalg/Generic/index.html b/owl/Owl_linalg/Generic/index.html deleted file mode 100644 index bf294f787..000000000 --- a/owl/Owl_linalg/Generic/index.html +++ /dev/null @@ -1,81 +0,0 @@ - -Generic (owl.Owl_linalg.Generic)

Module Owl_linalg.Generic

include module type of struct include Owl_linalg_generic end

The module includes a set of advanced linear algebra operations such as singular value decomposition, and etc.

Currently, Linalg module supports dense matrix of four different number types, including float32, float64, complex32, and complex64. The support for sparse matrices will be provided in future.

Type definition
type ('a, 'b) t = ('a, 'b) Owl_dense_matrix_generic.t

Matrix type, a special case of N-dimensional array.

Basic functions
val inv : ('a, 'b) t -> ('a, 'b) t

inv x calculates the inverse of an invertible square matrix x such that x *@ x = I wherein I is an identity matrix. (If x is singular, inv will return a useless result.)

val pinv : ?tol:float -> ('a, 'b) t -> ('a, 'b) t

pinv x computes Moore-Penrose pseudoinverse of matrix x. tol specifies the tolerance, the absolute value of the elements smaller than tol will be set to zeros.

val det : ('a, 'b) t -> 'a

det x computes the determinant of a square matrix x.

val logdet : ('a, 'b) t -> 'a

logdet x computes the log of the determinant of a square matrix x. It is equivalent to log (det x) but may provide more accuracy and efficiency.

val rank : ?tol:float -> ('a, 'b) t -> int

rank x calculates the rank of a rectangular matrix x of shape m x n. The function does so by counting the number of singular values of x which are beyond a pre-defined threshold tol. By default, tol = max(m,n) * eps where eps = 1e-10.

val norm : ?p:float -> ('a, 'b) t -> float

norm ~p x computes the matrix p-norm of the passed in matrix x.

Parameters: * p is the order of norm, the default value is 2. * x is the input matrix.

Returns: * If p = 1, then returns the maximum absolute column sum of the matrix. * If p = 2, then returns approximately max (svd x). * If p = infinity, then returns the maximum absolute row sum of the matrix. * If p = -1, then returns the minimum absolute column sum of the matrix. * If p = -2, then returns approximately min (svd x). * If p = -infinity, then returns the minimum absolute row sum of the matrix.

val vecnorm : ?p:float -> ('a, 'b) t -> float

vecnorm ~p x calculates the generalised vector p-norm, defined as below. If x is a martrix, it will be flatten to a vector first. Different from the function of the same name in :doc:`owl_dense_ndarray_generic`, this function assumes the input is either 1d vector or 2d matrix.

.. math:: ||v||_p = \Big \sum_{k=0}^{N-1} |v_k|^p \Big^

/p

Parameters: * p is the order of norm, the default value is 2. * x is the input vector or matrix.

Returns: * If p = infinity, then returns :math:`||v||_\infty = \max_i(|v(i)|)`. * If p = -infinity, then returns :math:`||v||_

\infty

}

= \min_i(|v(i)|)`. * If p = 2 and x is a matrix, then returns Frobenius norm of x. * Otherwise returns generalised vector p-norm defined above.

val cond : ?p:float -> ('a, 'b) t -> float

cond ~p x computes the p-norm condition number of matrix x.

cond ~p:1. x returns the 1-norm condition number;

cond ~p:2. x or cond x returns the 2-norm condition number.

cond ~p:infinity x returns the infinity norm condition number.

The default value of p is 2.

val rcond : ('a, 'b) t -> float

rcond x returns an estimate for the reciprocal condition of x in 1-norm. If x is well conditioned, the returned result is near 1.0. If x is badly conditioned, the result is near 0.

Check matrix types
val is_square : ('a, 'b) t -> bool

is_square x returns true if x is a square matrix otherwise false.

val is_triu : ('a, 'b) t -> bool

is_triu x returns true if x is upper triangular otherwise false.

val is_tril : ('a, 'b) t -> bool

is_tril x returns true if x is lower triangular otherwise false.

val is_symmetric : ('a, 'b) t -> bool

is_symmetric x returns true if x is symmetric otherwise false.

val is_hermitian : (Stdlib.Complex.t, 'a) t -> bool

is_hermitian x returns true if x is hermitian otherwise false.

val is_diag : ('a, 'b) t -> bool

is_diag x returns true if x is diagonal otherwise false.

val is_posdef : ('a, 'b) t -> bool

is_posdef x checks whether x is a positive semi-definite matrix.

Factorisation
val lu : - ('a, 'b) t -> - ('a, 'b) t * ('a, 'b) t * (int32, Stdlib.Bigarray.int32_elt) t

lu x -> (l, u, ipiv) calculates LU decomposition of x. The pivoting is used by default.

val lq : ?thin:bool -> ('a, 'b) t -> ('a, 'b) t * ('a, 'b) t

lq x -> (l, q) calculates the LQ decomposition of x. By default, the reduced LQ decomposition is performed. But you can get full Q by setting parameter thin = false.

val qr : - ?thin:bool -> - ?pivot:bool -> - ('a, 'b) t -> - ('a, 'b) t * ('a, 'b) t * (int32, Stdlib.Bigarray.int32_elt) t

qr x calculates QR decomposition for an m by n matrix x as x = Q R. Q is an m by n matrix (where Q^T Q = I) and R is an n by n upper-triangular matrix.

The function returns a 3-tuple, the first two are q and r, and the third is the permutation vector of columns. The default value of pivot is false, setting pivot = true lets qr performs pivoted factorisation. Note that the returned indices are not adjusted to 0-based C layout.

By default, qr performs a reduced QR factorisation, full factorisation can be enabled by setting thin parameter to false.

val chol : ?upper:bool -> ('a, 'b) t -> ('a, 'b) t

chol x -> u calculates the Cholesky factorisation of a positive definite matrix x such that x = u' *@ u. By default, the upper triangular matrix is returned. The lower triangular part can be obtained by setting the parameter upper = false.

val svd : ?thin:bool -> ('a, 'b) t -> ('a, 'b) t * ('a, 'b) t * ('a, 'b) t

svd x -> (u, s, vt) calculates the singular value decomposition of x, and returns a 3-tuple (u,s,vt). By default, a reduced svd is performed: E.g., for a m x n matrix x wherein m <= n, u is returned as an m by m orthogonal matrix, s an 1 by m row vector of singular values, and vt is the transpose of an n by m orthogonal rectangular matrix.

The full svd can be performed by setting thin = false. Note that for complex numbers, the type of returned singular values are also complex, the imaginary part is zero.

val svdvals : ('a, 'b) t -> ('a, 'b) t

svdvals x -> s performs the singular value decomposition of x like svd x, but the function only returns the singular values without u and vt. Note that for complex numbers, the return is also complex type.

val gsvd : - ('a, 'b) t -> - ('a, 'b) t -> - ('a, 'b) t * ('a, 'b) t * ('a, 'b) t * ('a, 'b) t * ('a, 'b) t * ('a, 'b) t

gsvd x y -> (u, v, q, d1, d2, r) computes the generalised singular value decomposition of a pair of general rectangular matrices x and y. d1 and d2 contain the generalised singular value pairs of x and y. The shape of x is m x n and the shape of y is p x n.

.. code-block:: ocaml

let x = Mat.uniform 5 5;; let y = Mat.uniform 2 5;; let u, v, q, d1, d2, r = Linalg.gsvd x y;; Mat.(u *@ d1 *@ r *@ transpose q =~ x);; Mat.(v *@ d2 *@ r *@ transpose q =~ y);;

Please refer to: `Intel MKL Reference <https://software.intel.com/en-us/mkl-developer-reference-c-ggsvd3>`_

val gsvdvals : ('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t

gsvdvals x y is similar to gsvd x y but only returns the singular values of the generalised singular value decomposition of x and y.

val schur : - otyp:('c, 'd) Stdlib.Bigarray.kind -> - ('a, 'b) t -> - ('a, 'b) t * ('a, 'b) t * ('c, 'd) t

schur x -> (t, z, w) calculates Schur factorisation of x in the following form.

.. math:: X = Z T Z^H

Parameters: * otyp: the complex type of eigen values. * x: the n x n square matrix.

Returns: * t is (quasi) triangular Schur factor. * z is orthogonal/unitary Schur vectors. The eigen values are not sorted, they have the same order as that they appear on the diagonal of the output of Schur form t. * w contains the eigen values of x. otyp is used to specify the type of w. It needs to be consistent with input type. E.g., if the input x is float32 then otyp must be complex32. However, if you use S, D, C, Z module, then you do not need to worry about otyp.

val schur_tz : ('a, 'b) t -> ('a, 'b) t * ('a, 'b) t

schur_tz x is similar to schur but only returns (t, z).

val ordschur : - otyp:('c, 'd) Stdlib.Bigarray.kind -> - select:(int32, Stdlib.Bigarray.int32_elt) t -> - ('a, 'b) t -> - ('a, 'b) t -> - ('a, 'b) t * ('a, 'b) t * ('c, 'd) t

ordschur ~select t z -> (r, p) reorders t and z returned by Schur factorization schur x -> (t, z) according select such that

.. math:: X = P R P^H

Parameters: * otyp: the complex type of eigen values * select the logical vector to select eigenvalues, refer to select_ev. * t: the Schur matrix returned by schur x. * z: the unitary matrix z returned by schur x.

Returns: * r: reordered Schur matrix t. * p: reordered orthogonal matrix z.

val qz : - otyp:('c, 'd) Stdlib.Bigarray.kind -> - ('a, 'b) t -> - ('a, 'b) t -> - ('a, 'b) t * ('a, 'b) t * ('a, 'b) t * ('a, 'b) t * ('c, 'd) t

qz x -> (s, t, q, z, w) calculates generalised Schur factorisation of x in the following form. It is also known as QZ decomposition.

.. math:: X = Q S Z^H Y = Z T Z^H

Parameters: * otyp: the complex type of eigen values. * x: the n x n square matrix. * y: the n x n square matrix.

Returns: * s: the upper quasitriangular matrices S. * t: the upper quasitriangular matrices T. * q: the unitary matrices Q. * z: the unitary matrices Z. * w: the generalised eigenvalue for a pair of matrices (X,Y).

val ordqz : - otyp:('c, 'd) Stdlib.Bigarray.kind -> - select:(int32, Stdlib.Bigarray.int32_elt) t -> - ('a, 'b) t -> - ('a, 'b) t -> - ('a, 'b) t -> - ('a, 'b) t -> - ('a, 'b) t * ('a, 'b) t * ('a, 'b) t * ('a, 'b) t * ('c, 'd) t

ordqz ~select a b q z reorders the generalised Schur decomposition of a pair of matrices (X,Y) so that a selected cluster of eigenvalues appears in the leading diagonal blocks of (X,Y).

val qzvals : - otyp:('c, 'd) Stdlib.Bigarray.kind -> - ('a, 'b) t -> - ('a, 'b) t -> - ('c, 'd) t

qzvals ~otyp x y is similar to qz ~otyp x y but only returns the generalised eigen values.

val hess : ('a, 'b) t -> ('a, 'b) t * ('a, 'b) t

hess x -> (h, q) calculates the Hessenberg form of a given matrix x. Both Hessenberg matrix h and unitary matrix q is returned, such that x = q *@ h *@ (transpose q).

.. math:: X = Q H Q^T

Eigenvalues & eigenvectors
val eig : - ?permute:bool -> - ?scale:bool -> - otyp:('a, 'b) Stdlib.Bigarray.kind -> - ('c, 'd) t -> - ('a, 'b) t * ('a, 'b) t

eig x -> v, w computes the right eigenvectors v and eigenvalues w of an arbitrary square matrix x. The eigenvectors are column vectors in v, their corresponding eigenvalues have the same order in w as that in v.

Note that otyp specifies the complex type of the output, but you do not need worry about this parameter if you use S, D, C, Z modules in Linalg.

val eigvals : - ?permute:bool -> - ?scale:bool -> - otyp:('a, 'b) Stdlib.Bigarray.kind -> - ('c, 'd) t -> - ('a, 'b) t

eigvals x -> w is similar to eig but only computes the eigenvalues of an arbitrary square matrix x.

Linear system of equations
val null : ('a, 'b) t -> ('a, 'b) t

null a -> x computes an orthonormal basis x for the null space of a obtained from the singular value decomposition. Namely, a *@ x has negligible elements, M.col_num x is the nullity of a, and transpose x *@ x = I. Namely,

.. math:: X^T X = I

val triangular_solve : - upper:bool -> - ?trans:bool -> - ('a, 'b) t -> - ('a, 'b) t -> - ('a, 'b) t

triangular_linsolve a b -> x solves a linear system of equations a * x = b where a is either a upper or a lower triangular matrix. This function uses cblas trsm under the hood.

.. math:: AX = B

By default, trans = false indicates no transpose. If trans = true, then function will solve A^T * x = b for real matrices; A^H * x = b for complex matrices.

.. math:: A^H X = B

val linsolve : - ?trans:bool -> - ?typ:[ `n | `u | `l ] -> - ('a, 'b) t -> - ('a, 'b) t -> - ('a, 'b) t

linsolve a b -> x solves a linear system of equations a * x = b in the following form. By default, typ=`n and the function use LU factorisation with partial pivoting when a is square and QR factorisation with column pivoting otherwise. The number of rows of a must equal the number of rows of b. If a is a upper(lower) triangular matrix, the function calls the solve_triangular function when typ=`u(typ=`l).

.. math:: AX = B

By default, trans = false indicates no transpose. If trans = true, then function will solve A^T * x = b for real matrices; A^H * x = b for complex matrices.

.. math:: A^H X = B

The associated operator is /@, so you can simply use a /@ b to solve the linear equation system to get x. Please refer to :doc:`owl_operator`.

val linreg : ('a, 'b) t -> ('a, 'b) t -> 'a * 'a

linreg x y -> (a, b) solves y = a + b*x using Ordinary Least Squares.

.. math:: Y = A + BX

val sylvester : ('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t

sylvester a b c solves a Sylvester equation in the following form. The function calls LAPACKE function trsyl solve the system.

.. math:: AX + XB = C

Parameters: * a : m x m matrix A. * b : n x n matrix B. * c : m x n matrix C.

Returns: * x : m x n matrix X.

val lyapunov : ('a, 'b) t -> ('a, 'b) t -> ('a, 'b) t

lyapunov a q solves a continuous Lyapunov equation in the following form. The function calls LAPACKE function trsyl solve the system. In Matlab, the same function is called lyap.

.. math:: AX + XA^H = Q

Parameters: * a : m x m matrix A. * q : n x n matrix Q.

Returns: * x : m x n matrix X.

val discrete_lyapunov : - ?solver:[ `default | `bilinear | `direct ] -> - ('a, 'b) t -> - ('a, 'b) t -> - ('a, 'b) t

discrete_lyapunov a q solves a discrete-time Lyapunov equation in the following form.

.. math:: X - AXA^H = Q

Parameters: * a : m x m matrix A. * q : n x n matrix Q.

Returns: * x : m x n matrix X.

val care : - ?diag_r:bool -> - (float, 'a) t -> - (float, 'a) t -> - (float, 'a) t -> - (float, 'a) t -> - (float, 'a) t

care ?diag_r a b q r solves the continuous-time algebraic Riccati equation system in the following form. The algorithm is based on :cite:`laub1979schur`.

.. math:: A^T X + X A − X B R^

1

}

B^T X + Q = 0

Parameters: * a : real cofficient matrix A. * b : real cofficient matrix B. * q : real cofficient matrix Q. * r : real cofficient matrix R. R must be non-singular. * diag_r : true if R is a diagonal matrix, false by default.

Returns: * x : a solution matrix X.

val dare : - ?diag_r:bool -> - (float, 'a) t -> - (float, 'a) t -> - (float, 'a) t -> - (float, 'a) t -> - (float, 'a) t

dare ?diag_r a b q r solves the discrete-time algebraic Riccati equation system in the following form. The algorithm is based on :cite:`laub1979schur`.

.. math:: A^T X A - X - (A^T X B) (B^T X B + R)^

1

}

(B^T X A) + Q = 0

Parameters: * a : real cofficient matrix A. A must be non-singular. * b : real cofficient matrix B. * q : real cofficient matrix Q. * r : real cofficient matrix R. R must be non-singular. * diag_r : true if R is a diagonal matrix, false by default.

Returns: * x : a symmetric solution matrix X.

Low-level factorisation functions
val lufact : ('a, 'b) t -> ('a, 'b) t * (int32, Stdlib.Bigarray.int32_elt) t

lufact x -> (a, ipiv) calculates LU factorisation with pivot of a general matrix x.

val qrfact : - ?pivot:bool -> - ('a, 'b) t -> - ('a, 'b) t * ('a, 'b) t * (int32, Stdlib.Bigarray.int32_elt) t

qrfact x -> (a, tau, jpvt) calculates QR factorisation of a general matrix x.

val bkfact : - ?upper:bool -> - ?symmetric:bool -> - ?rook:bool -> - ('a, 'b) t -> - ('a, 'b) t * (int32, Stdlib.Bigarray.int32_elt) t

bk x -> (a, ipiv) calculates Bunch-Kaufman factorisation of x. If symmetric = true then x is symmetric, if symmetric = false then x is hermitian. If rook = true the function performs bounded Bunch-Kaufman ("rook") diagonal pivoting method, if rook = false then Bunch-Kaufman diagonal pivoting method is used. a contains details of the block-diagonal matrix d and the multipliers used to obtain the factor u (or l).

The upper indicates whether the upper or lower triangular part of x is stored and how x is factored. If upper = true then upper triangular part is stored: x = u*d*u' else x = l*d*l'.

For ipiv, it indicates the details of the interchanges and the block structure of d. Please refer to the function sytrf, hetrf in MKL documentation for more details.

Matrix functions
val mpow : ('a, 'b) t -> float -> ('a, 'b) t

mpow x r returns the dot product of square matrix x with itself r times, and more generally raises the matrix to the rth power. r is a float that must be equal to an integer; it can be be negative, zero, or positive. Non-integer exponents are not yet implemented. (If r is negative, mpow calls inv, and warnings in documentation for inv apply.)

val expm : ('a, 'b) t -> ('a, 'b) t

expm x computes the matrix exponential of x defined by

.. math:: e^x = \sum_k=0^\infty \frac

k! x^k

The function implements the scaling and squaring algorithm which uses Padé approximation to compute the matrix exponential :cite:`al2009new`.

val sinm : ('a, 'b) t -> ('a, 'b) t

sinm x computes the matrix sine of input x. The function uses expm to compute the matrix exponentials.

val cosm : ('a, 'b) t -> ('a, 'b) t

cosm x computes the matrix cosine of input x. The function uses expm to compute the matrix exponentials.

val tanm : ('a, 'b) t -> ('a, 'b) t

tanm x computes the matrix tangent of input x. The function uses expm to compute the matrix exponentials.

val sincosm : ('a, 'b) t -> ('a, 'b) t * ('a, 'b) t

sincosm x returns both matrix sine and cosine of x.

val sinhm : ('a, 'b) t -> ('a, 'b) t

sinhm x computes the hyperbolic matrix sine of input x. The function uses expm to compute the matrix exponentials.

val coshm : ('a, 'b) t -> ('a, 'b) t

coshm x computes the hyperbolic matrix cosine of input x. The function uses expm to compute the matrix exponentials.

val tanhm : ('a, 'b) t -> ('a, 'b) t

tanhm x computes the hyperbolic matrix tangent of input x. The function uses expm to compute the matrix exponentials.

val sinhcoshm : ('a, 'b) t -> ('a, 'b) t * ('a, 'b) t

sinhcoshm x returns both hyperbolic matrix sine and cosine of x.

Helper functions
val select_ev : - [ `LHP | `RHP | `UDI | `UDO ] -> - ('a, 'b) t -> - (int32, Stdlib.Bigarray.int32_elt) t

select_ev keyword ev generates a logical vector (of same shape as ev) from eigen values ev according to the passed in keywards.

  • LHP: Left-half plane :math:`(real(e) < 0)`.
  • RHP: Left-half plane :math:`(real(e) \ge 0)`.
  • UDI: Left-half plane :math:`(abs(e) < 1)`.
  • UDO: Left-half plane :math:`(abs(e) \ge 0)`.
val peakflops : ?n:int -> unit -> float

peakflops () returns the peak number of float point operations using Owl_cblas_basic.dgemm function. The default matrix size is 2000 x 2000, but you can change this by setting n to other numbers as you like.

\ No newline at end of file diff --git a/owl/Owl_linalg/S/index.html b/owl/Owl_linalg/S/index.html deleted file mode 100644 index c720b4ef9..000000000 --- a/owl/Owl_linalg/S/index.html +++ /dev/null @@ -1,25 +0,0 @@ - -S (owl.Owl_linalg.S)

Module Owl_linalg.S

include module type of struct include Owl_linalg_s end
type elt = float
type complex_mat = Owl_dense_matrix_c.mat
type int32_mat = (int32, Stdlib.Bigarray.int32_elt) Owl_dense_matrix_generic.t
include Owl_linalg_intf.Common - with type elt := elt - and type mat := mat - and type complex_mat := complex_mat - and type int32_mat := int32_mat
include Owl_base_linalg_intf.Common - with type elt := elt - with type mat := mat - with type complex_mat := complex_mat - with type int32_mat := int32_mat
Basic functions
val inv : mat -> mat
val det : mat -> elt
val logdet : mat -> elt
val is_triu : mat -> bool
val is_tril : mat -> bool
val is_symmetric : mat -> bool
val is_diag : mat -> bool
Factorisation
val svd : ?thin:bool -> mat -> mat * mat * mat
val chol : ?upper:bool -> mat -> mat
val qr : ?thin:bool -> ?pivot:bool -> mat -> mat * mat * int32_mat
val lq : ?thin:bool -> mat -> mat * mat
Linear system of equations
val linsolve : ?trans:bool -> ?typ:[ `n | `u | `l ] -> mat -> mat -> mat
val sylvester : mat -> mat -> mat -> mat
val lyapunov : mat -> mat -> mat
val discrete_lyapunov : - ?solver:[ `default | `direct | `bilinear ] -> - mat -> - mat -> - mat
Basic functions
val pinv : ?tol:float -> mat -> mat
val rank : ?tol:float -> mat -> int
val norm : ?p:float -> mat -> float
val vecnorm : ?p:float -> mat -> float
val cond : ?p:float -> mat -> float
val rcond : mat -> float
val is_posdef : mat -> bool
Factorisation
val lu : mat -> mat * mat * int32_mat
val svdvals : mat -> mat
val gsvd : mat -> mat -> mat * mat * mat * mat * mat * mat
val gsvdvals : mat -> mat -> mat
val schur : mat -> mat * mat * complex_mat
val schur_tz : mat -> mat * mat
val ordschur : select:int32_mat -> mat -> mat -> mat * mat * complex_mat
val qz : mat -> mat -> mat * mat * mat * mat * complex_mat
val ordqz : - select:int32_mat -> - mat -> - mat -> - mat -> - mat -> - mat * mat * mat * mat * complex_mat
val qzvals : mat -> mat -> complex_mat
val hess : mat -> mat * mat
Eigenvalues & eigenvectors
val eig : ?permute:bool -> ?scale:bool -> mat -> complex_mat * complex_mat
val eigvals : ?permute:bool -> ?scale:bool -> mat -> complex_mat
Linear system of equations
val null : mat -> mat
val triangular_solve : upper:bool -> ?trans:bool -> mat -> mat -> mat
val linreg : mat -> mat -> elt * elt
Low-level factorisation functions
val lufact : mat -> mat * int32_mat
val qrfact : ?pivot:bool -> mat -> mat * mat * int32_mat
val bkfact : - ?upper:bool -> - ?symmetric:bool -> - ?rook:bool -> - mat -> - mat * int32_mat
Matrix functions
val mpow : mat -> float -> mat
val expm : mat -> mat
val sinm : mat -> mat
val cosm : mat -> mat
val tanm : mat -> mat
val sincosm : mat -> mat * mat
val sinhm : mat -> mat
val coshm : mat -> mat
val tanhm : mat -> mat
val sinhcoshm : mat -> mat * mat
Helper functions
val select_ev : [ `LHP | `RHP | `UDI | `UDO ] -> mat -> int32_mat
val peakflops : ?n:int -> unit -> float
include Owl_linalg_intf.Real with type mat := mat and type elt := elt
include Owl_base_linalg_intf.Real with type mat := mat with type elt := elt
val care : ?diag_r:bool -> mat -> mat -> mat -> mat -> mat
val dare : ?diag_r:bool -> mat -> mat -> mat -> mat -> mat
\ No newline at end of file diff --git a/owl/Owl_linalg/Z/index.html b/owl/Owl_linalg/Z/index.html deleted file mode 100644 index d03b9b321..000000000 --- a/owl/Owl_linalg/Z/index.html +++ /dev/null @@ -1,25 +0,0 @@ - -Z (owl.Owl_linalg.Z)

Module Owl_linalg.Z

include module type of struct include Owl_linalg_z end
type elt = Stdlib.Complex.t
type int32_mat = (int32, Stdlib.Bigarray.int32_elt) Owl_dense_matrix_generic.t
include Owl_linalg_intf.Common - with type elt := elt - and type mat := mat - and type complex_mat = mat - and type int32_mat := int32_mat
include Owl_base_linalg_intf.Common - with type elt := elt - with type mat := mat - with type complex_mat = mat - with type int32_mat := int32_mat
type complex_mat = mat
Basic functions
val inv : mat -> mat
val det : mat -> elt
val logdet : mat -> elt
val is_triu : mat -> bool
val is_tril : mat -> bool
val is_symmetric : mat -> bool
val is_diag : mat -> bool
Factorisation
val svd : ?thin:bool -> mat -> mat * mat * mat
val chol : ?upper:bool -> mat -> mat
val qr : ?thin:bool -> ?pivot:bool -> mat -> mat * mat * int32_mat
val lq : ?thin:bool -> mat -> mat * mat
Linear system of equations
val linsolve : ?trans:bool -> ?typ:[ `n | `u | `l ] -> mat -> mat -> mat
val sylvester : mat -> mat -> mat -> mat
val lyapunov : mat -> mat -> mat
val discrete_lyapunov : - ?solver:[ `default | `direct | `bilinear ] -> - mat -> - mat -> - mat
Basic functions
val pinv : ?tol:float -> mat -> mat
val rank : ?tol:float -> mat -> int
val norm : ?p:float -> mat -> float
val vecnorm : ?p:float -> mat -> float
val cond : ?p:float -> mat -> float
val rcond : mat -> float
val is_posdef : mat -> bool
Factorisation
val lu : mat -> mat * mat * int32_mat
val svdvals : mat -> mat
val gsvd : mat -> mat -> mat * mat * mat * mat * mat * mat
val gsvdvals : mat -> mat -> mat
val schur : mat -> mat * mat * complex_mat
val schur_tz : mat -> mat * mat
val ordschur : select:int32_mat -> mat -> mat -> mat * mat * complex_mat
val qz : mat -> mat -> mat * mat * mat * mat * complex_mat
val ordqz : - select:int32_mat -> - mat -> - mat -> - mat -> - mat -> - mat * mat * mat * mat * complex_mat
val qzvals : mat -> mat -> complex_mat
val hess : mat -> mat * mat
Eigenvalues & eigenvectors
val eig : ?permute:bool -> ?scale:bool -> mat -> complex_mat * complex_mat
val eigvals : ?permute:bool -> ?scale:bool -> mat -> complex_mat
Linear system of equations
val null : mat -> mat
val triangular_solve : upper:bool -> ?trans:bool -> mat -> mat -> mat
val linreg : mat -> mat -> elt * elt
Low-level factorisation functions
val lufact : mat -> mat * int32_mat
val qrfact : ?pivot:bool -> mat -> mat * mat * int32_mat
val bkfact : - ?upper:bool -> - ?symmetric:bool -> - ?rook:bool -> - mat -> - mat * int32_mat
Matrix functions
val mpow : mat -> float -> mat
val expm : mat -> mat
val sinm : mat -> mat
val cosm : mat -> mat
val tanm : mat -> mat
val sincosm : mat -> mat * mat
val sinhm : mat -> mat
val coshm : mat -> mat
val tanhm : mat -> mat
val sinhcoshm : mat -> mat * mat
Helper functions
val select_ev : [ `LHP | `RHP | `UDI | `UDO ] -> mat -> int32_mat
val peakflops : ?n:int -> unit -> float
\ No newline at end of file diff --git a/owl/Owl_linalg_c/.dummy b/owl/Owl_linalg_c/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl/Owl_linalg_d/.dummy b/owl/Owl_linalg_d/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl/Owl_linalg_generic/.dummy b/owl/Owl_linalg_generic/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl/Owl_linalg_intf/.dummy b/owl/Owl_linalg_intf/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl/Owl_linalg_intf/module-type-Common/index.html b/owl/Owl_linalg_intf/module-type-Common/index.html deleted file mode 100644 index 2e2eaefad..000000000 --- a/owl/Owl_linalg_intf/module-type-Common/index.html +++ /dev/null @@ -1,17 +0,0 @@ - -Common (owl.Owl_linalg_intf.Common)

Module type Owl_linalg_intf.Common

include Owl_base_linalg_intf.Common
type elt
type mat
type complex_mat
type int32_mat
Basic functions
val inv : mat -> mat
val det : mat -> elt
val logdet : mat -> elt
val is_triu : mat -> bool
val is_tril : mat -> bool
val is_symmetric : mat -> bool
val is_diag : mat -> bool
Factorisation
val svd : ?thin:bool -> mat -> mat * mat * mat
val chol : ?upper:bool -> mat -> mat
val qr : ?thin:bool -> ?pivot:bool -> mat -> mat * mat * int32_mat
val lq : ?thin:bool -> mat -> mat * mat
Linear system of equations
val linsolve : ?trans:bool -> ?typ:[ `n | `u | `l ] -> mat -> mat -> mat
val sylvester : mat -> mat -> mat -> mat
val lyapunov : mat -> mat -> mat
val discrete_lyapunov : - ?solver:[ `default | `direct | `bilinear ] -> - mat -> - mat -> - mat
Basic functions
val pinv : ?tol:float -> mat -> mat
val rank : ?tol:float -> mat -> int
val norm : ?p:float -> mat -> float
val vecnorm : ?p:float -> mat -> float
val cond : ?p:float -> mat -> float
val rcond : mat -> float
val is_posdef : mat -> bool
Factorisation
val lu : mat -> mat * mat * int32_mat
val svdvals : mat -> mat
val gsvd : mat -> mat -> mat * mat * mat * mat * mat * mat
val gsvdvals : mat -> mat -> mat
val schur : mat -> mat * mat * complex_mat
val schur_tz : mat -> mat * mat
val ordschur : select:int32_mat -> mat -> mat -> mat * mat * complex_mat
val qz : mat -> mat -> mat * mat * mat * mat * complex_mat
val ordqz : - select:int32_mat -> - mat -> - mat -> - mat -> - mat -> - mat * mat * mat * mat * complex_mat
val qzvals : mat -> mat -> complex_mat
val hess : mat -> mat * mat
Eigenvalues & eigenvectors
val eig : ?permute:bool -> ?scale:bool -> mat -> complex_mat * complex_mat
val eigvals : ?permute:bool -> ?scale:bool -> mat -> complex_mat
Linear system of equations
val null : mat -> mat
val triangular_solve : upper:bool -> ?trans:bool -> mat -> mat -> mat
val linreg : mat -> mat -> elt * elt
Low-level factorisation functions
val lufact : mat -> mat * int32_mat
val qrfact : ?pivot:bool -> mat -> mat * mat * int32_mat
val bkfact : - ?upper:bool -> - ?symmetric:bool -> - ?rook:bool -> - mat -> - mat * int32_mat
Matrix functions
val mpow : mat -> float -> mat
val expm : mat -> mat
val sinm : mat -> mat
val cosm : mat -> mat
val tanm : mat -> mat
val sincosm : mat -> mat * mat
val sinhm : mat -> mat
val coshm : mat -> mat
val tanhm : mat -> mat
val sinhcoshm : mat -> mat * mat
Helper functions
val select_ev : [ `LHP | `RHP | `UDI | `UDO ] -> mat -> int32_mat
val peakflops : ?n:int -> unit -> float
\ No newline at end of file diff --git a/owl/Owl_linalg_intf/module-type-Real/index.html b/owl/Owl_linalg_intf/module-type-Real/index.html deleted file mode 100644 index c1dec688e..000000000 --- a/owl/Owl_linalg_intf/module-type-Real/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Real (owl.Owl_linalg_intf.Real)

Module type Owl_linalg_intf.Real

include Owl_base_linalg_intf.Real
type elt
type mat
val care : ?diag_r:bool -> mat -> mat -> mat -> mat -> mat
val dare : ?diag_r:bool -> mat -> mat -> mat -> mat -> mat
\ No newline at end of file diff --git a/owl/Owl_linalg_s/.dummy b/owl/Owl_linalg_s/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl/Owl_linalg_z/.dummy b/owl/Owl_linalg_z/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl/Owl_maths/.dummy b/owl/Owl_maths/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl/Owl_maths_special/.dummy b/owl/Owl_maths_special/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl/Owl_matrix/.dummy b/owl/Owl_matrix/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl/Owl_matrix_check/.dummy b/owl/Owl_matrix_check/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl/Owl_matrix_swap/.dummy b/owl/Owl_matrix_swap/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl/Owl_ndarray/.dummy b/owl/Owl_ndarray/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl/Owl_ndarray_contract/.dummy b/owl/Owl_ndarray_contract/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl/Owl_ndarray_conv/.dummy b/owl/Owl_ndarray_conv/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl/Owl_ndarray_fma/.dummy b/owl/Owl_ndarray_fma/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl/Owl_ndarray_maths/.dummy b/owl/Owl_ndarray_maths/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl/Owl_ndarray_pool/.dummy b/owl/Owl_ndarray_pool/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl/Owl_ndarray_repeat/.dummy b/owl/Owl_ndarray_repeat/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl/Owl_ndarray_slide/.dummy b/owl/Owl_ndarray_slide/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl/Owl_ndarray_sort/.dummy b/owl/Owl_ndarray_sort/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl/Owl_ndarray_transpose/.dummy b/owl/Owl_ndarray_transpose/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl/Owl_ndarray_upsampling/.dummy b/owl/Owl_ndarray_upsampling/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl/Owl_ndarray_utils/.dummy b/owl/Owl_ndarray_utils/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl/Owl_neural/.dummy b/owl/Owl_neural/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl/Owl_neural/D/Graph/Neuron/Activation/index.html b/owl/Owl_neural/D/Graph/Neuron/Activation/index.html deleted file mode 100644 index 3ba9f17e5..000000000 --- a/owl/Owl_neural/D/Graph/Neuron/Activation/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -Activation (owl.Owl_neural.D.Graph.Neuron.Activation)

Module Neuron.Activation

type typ = - Owl_neural_generic.Make_Embedded(Owl_algodiff_primal_ops.D).Neuron.Activation.typ = -
  1. | Elu
  2. | Relu
  3. | Sigmoid
  4. | HardSigmoid
  5. | Softmax of int
  6. | Softplus
  7. | Softsign
  8. | Tanh
  9. | Relu6
  10. | LeakyRelu of float
  11. | TRelu of float
  12. | Custom of Optimise.Algodiff.t -> Optimise.Algodiff.t
  13. | None
type neuron_typ = - Owl_neural_generic.Make_Embedded(Owl_algodiff_primal_ops.D).Neuron.Activation.neuron_typ = - {
  1. mutable activation : typ;
  2. mutable in_shape : int array;
  3. mutable out_shape : int array;
}
val create : typ -> neuron_typ
val connect : int array -> neuron_typ -> unit
val run_activation : Optimise.Algodiff.t -> typ -> Optimise.Algodiff.t
val copy : neuron_typ -> neuron_typ
val activation_to_string : typ -> string
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl/Owl_neural/D/Graph/Neuron/Add/index.html b/owl/Owl_neural/D/Graph/Neuron/Add/index.html deleted file mode 100644 index a1ca092dc..000000000 --- a/owl/Owl_neural/D/Graph/Neuron/Add/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Add (owl.Owl_neural.D.Graph.Neuron.Add)

Module Neuron.Add

type neuron_typ = - Owl_neural_generic.Make_Embedded(Owl_algodiff_primal_ops.D).Neuron.Add.neuron_typ = - {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
}
val create : unit -> neuron_typ
val connect : int array array -> neuron_typ -> unit
val copy : 'a -> neuron_typ
val run : Optimise.Algodiff.t array -> 'a -> Optimise.Algodiff.t
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl/Owl_neural/D/Graph/Neuron/AlphaDropout/index.html b/owl/Owl_neural/D/Graph/Neuron/AlphaDropout/index.html deleted file mode 100644 index 71441cf42..000000000 --- a/owl/Owl_neural/D/Graph/Neuron/AlphaDropout/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -AlphaDropout (owl.Owl_neural.D.Graph.Neuron.AlphaDropout)

Module Neuron.AlphaDropout

type neuron_typ = - Owl_neural_generic.Make_Embedded(Owl_algodiff_primal_ops.D).Neuron.AlphaDropout.neuron_typ = - {
  1. mutable rate : float;
  2. mutable in_shape : int array;
  3. mutable out_shape : int array;
}
val create : float -> neuron_typ
val connect : int array -> neuron_typ -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl/Owl_neural/D/Graph/Neuron/Average/index.html b/owl/Owl_neural/D/Graph/Neuron/Average/index.html deleted file mode 100644 index ba213392d..000000000 --- a/owl/Owl_neural/D/Graph/Neuron/Average/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Average (owl.Owl_neural.D.Graph.Neuron.Average)

Module Neuron.Average

type neuron_typ = - Owl_neural_generic.Make_Embedded(Owl_algodiff_primal_ops.D).Neuron.Average.neuron_typ = - {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
}
val create : unit -> neuron_typ
val connect : int array array -> neuron_typ -> unit
val copy : 'a -> neuron_typ
val run : Optimise.Algodiff.t array -> 'a -> Optimise.Algodiff.t
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl/Owl_neural/D/Graph/Neuron/AvgPool1D/index.html b/owl/Owl_neural/D/Graph/Neuron/AvgPool1D/index.html deleted file mode 100644 index fbe3e80fe..000000000 --- a/owl/Owl_neural/D/Graph/Neuron/AvgPool1D/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -AvgPool1D (owl.Owl_neural.D.Graph.Neuron.AvgPool1D)

Module Neuron.AvgPool1D

type neuron_typ = - Owl_neural_generic.Make_Embedded(Owl_algodiff_primal_ops.D).Neuron.AvgPool1D.neuron_typ = - {
  1. mutable padding : Owl_types.padding;
  2. mutable kernel : int array;
  3. mutable stride : int array;
  4. mutable in_shape : int array;
  5. mutable out_shape : int array;
}
val create : Owl_types.padding -> int array -> int array -> neuron_typ
val connect : int array -> neuron_typ -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl/Owl_neural/D/Graph/Neuron/AvgPool2D/index.html b/owl/Owl_neural/D/Graph/Neuron/AvgPool2D/index.html deleted file mode 100644 index a7e39f9b7..000000000 --- a/owl/Owl_neural/D/Graph/Neuron/AvgPool2D/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -AvgPool2D (owl.Owl_neural.D.Graph.Neuron.AvgPool2D)

Module Neuron.AvgPool2D

type neuron_typ = - Owl_neural_generic.Make_Embedded(Owl_algodiff_primal_ops.D).Neuron.AvgPool2D.neuron_typ = - {
  1. mutable padding : Owl_types.padding;
  2. mutable kernel : int array;
  3. mutable stride : int array;
  4. mutable in_shape : int array;
  5. mutable out_shape : int array;
}
val create : Owl_types.padding -> int array -> int array -> neuron_typ
val connect : int array -> neuron_typ -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl/Owl_neural/D/Graph/Neuron/Concatenate/index.html b/owl/Owl_neural/D/Graph/Neuron/Concatenate/index.html deleted file mode 100644 index 5599800b7..000000000 --- a/owl/Owl_neural/D/Graph/Neuron/Concatenate/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Concatenate (owl.Owl_neural.D.Graph.Neuron.Concatenate)

Module Neuron.Concatenate

type neuron_typ = - Owl_neural_generic.Make_Embedded(Owl_algodiff_primal_ops.D).Neuron.Concatenate.neuron_typ = - {
  1. mutable axis : int;
  2. mutable in_shape : int array;
  3. mutable out_shape : int array;
}
val create : int -> neuron_typ
val connect : int array array -> neuron_typ -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl/Owl_neural/D/Graph/Neuron/Conv1D/index.html b/owl/Owl_neural/D/Graph/Neuron/Conv1D/index.html deleted file mode 100644 index a79069947..000000000 --- a/owl/Owl_neural/D/Graph/Neuron/Conv1D/index.html +++ /dev/null @@ -1,10 +0,0 @@ - -Conv1D (owl.Owl_neural.D.Graph.Neuron.Conv1D)

Module Neuron.Conv1D

type neuron_typ = - Owl_neural_generic.Make_Embedded(Owl_algodiff_primal_ops.D).Neuron.Conv1D.neuron_typ = - {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable b : Optimise.Algodiff.t;
  3. mutable kernel : int array;
  4. mutable stride : int array;
  5. mutable padding : Owl_types.padding;
  6. mutable init_typ : Init.typ;
  7. mutable in_shape : int array;
  8. mutable out_shape : int array;
}
val create : - ?inputs:int array -> - Owl_types.padding -> - int array -> - int array -> - Init.typ -> - neuron_typ
val connect : int array -> neuron_typ -> unit
val init : neuron_typ -> unit
val reset : neuron_typ -> unit
val mktag : int -> neuron_typ -> unit
val mkpar : neuron_typ -> Optimise.Algodiff.t array
val mkpri : neuron_typ -> Optimise.Algodiff.t array
val mkadj : neuron_typ -> Optimise.Algodiff.t array
val update : neuron_typ -> Optimise.Algodiff.t array -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl/Owl_neural/D/Graph/Neuron/Conv2D/index.html b/owl/Owl_neural/D/Graph/Neuron/Conv2D/index.html deleted file mode 100644 index 25cbb854a..000000000 --- a/owl/Owl_neural/D/Graph/Neuron/Conv2D/index.html +++ /dev/null @@ -1,10 +0,0 @@ - -Conv2D (owl.Owl_neural.D.Graph.Neuron.Conv2D)

Module Neuron.Conv2D

type neuron_typ = - Owl_neural_generic.Make_Embedded(Owl_algodiff_primal_ops.D).Neuron.Conv2D.neuron_typ = - {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable b : Optimise.Algodiff.t;
  3. mutable kernel : int array;
  4. mutable stride : int array;
  5. mutable padding : Owl_types.padding;
  6. mutable init_typ : Init.typ;
  7. mutable in_shape : int array;
  8. mutable out_shape : int array;
}
val create : - ?inputs:int array -> - Owl_types.padding -> - int array -> - int array -> - Init.typ -> - neuron_typ
val connect : int array -> neuron_typ -> unit
val init : neuron_typ -> unit
val reset : neuron_typ -> unit
val mktag : int -> neuron_typ -> unit
val mkpar : neuron_typ -> Optimise.Algodiff.t array
val mkpri : neuron_typ -> Optimise.Algodiff.t array
val mkadj : neuron_typ -> Optimise.Algodiff.t array
val update : neuron_typ -> Optimise.Algodiff.t array -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl/Owl_neural/D/Graph/Neuron/Conv3D/index.html b/owl/Owl_neural/D/Graph/Neuron/Conv3D/index.html deleted file mode 100644 index 6a2631f7a..000000000 --- a/owl/Owl_neural/D/Graph/Neuron/Conv3D/index.html +++ /dev/null @@ -1,10 +0,0 @@ - -Conv3D (owl.Owl_neural.D.Graph.Neuron.Conv3D)

Module Neuron.Conv3D

type neuron_typ = - Owl_neural_generic.Make_Embedded(Owl_algodiff_primal_ops.D).Neuron.Conv3D.neuron_typ = - {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable b : Optimise.Algodiff.t;
  3. mutable kernel : int array;
  4. mutable stride : int array;
  5. mutable padding : Owl_types.padding;
  6. mutable init_typ : Init.typ;
  7. mutable in_shape : int array;
  8. mutable out_shape : int array;
}
val create : - ?inputs:int array -> - Owl_types.padding -> - int array -> - int array -> - Init.typ -> - neuron_typ
val connect : int array -> neuron_typ -> unit
val init : neuron_typ -> unit
val reset : neuron_typ -> unit
val mktag : int -> neuron_typ -> unit
val mkpar : neuron_typ -> Optimise.Algodiff.t array
val mkpri : neuron_typ -> Optimise.Algodiff.t array
val mkadj : neuron_typ -> Optimise.Algodiff.t array
val update : neuron_typ -> Optimise.Algodiff.t array -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl/Owl_neural/D/Graph/Neuron/DilatedConv1D/index.html b/owl/Owl_neural/D/Graph/Neuron/DilatedConv1D/index.html deleted file mode 100644 index 5895a8730..000000000 --- a/owl/Owl_neural/D/Graph/Neuron/DilatedConv1D/index.html +++ /dev/null @@ -1,11 +0,0 @@ - -DilatedConv1D (owl.Owl_neural.D.Graph.Neuron.DilatedConv1D)

Module Neuron.DilatedConv1D

type neuron_typ = - Owl_neural_generic.Make_Embedded(Owl_algodiff_primal_ops.D).Neuron.DilatedConv1D.neuron_typ = - {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable b : Optimise.Algodiff.t;
  3. mutable kernel : int array;
  4. mutable stride : int array;
  5. mutable rate : int array;
  6. mutable padding : Owl_types.padding;
  7. mutable init_typ : Init.typ;
  8. mutable in_shape : int array;
  9. mutable out_shape : int array;
}
val create : - ?inputs:int array -> - Owl_types.padding -> - int array -> - int array -> - int array -> - Init.typ -> - neuron_typ
val connect : int array -> neuron_typ -> unit
val init : neuron_typ -> unit
val reset : neuron_typ -> unit
val mktag : int -> neuron_typ -> unit
val mkpar : neuron_typ -> Optimise.Algodiff.t array
val mkpri : neuron_typ -> Optimise.Algodiff.t array
val mkadj : neuron_typ -> Optimise.Algodiff.t array
val update : neuron_typ -> Optimise.Algodiff.t array -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl/Owl_neural/D/Graph/Neuron/DilatedConv2D/index.html b/owl/Owl_neural/D/Graph/Neuron/DilatedConv2D/index.html deleted file mode 100644 index 7ff90156a..000000000 --- a/owl/Owl_neural/D/Graph/Neuron/DilatedConv2D/index.html +++ /dev/null @@ -1,11 +0,0 @@ - -DilatedConv2D (owl.Owl_neural.D.Graph.Neuron.DilatedConv2D)

Module Neuron.DilatedConv2D

type neuron_typ = - Owl_neural_generic.Make_Embedded(Owl_algodiff_primal_ops.D).Neuron.DilatedConv2D.neuron_typ = - {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable b : Optimise.Algodiff.t;
  3. mutable kernel : int array;
  4. mutable stride : int array;
  5. mutable rate : int array;
  6. mutable padding : Owl_types.padding;
  7. mutable init_typ : Init.typ;
  8. mutable in_shape : int array;
  9. mutable out_shape : int array;
}
val create : - ?inputs:int array -> - Owl_types.padding -> - int array -> - int array -> - int array -> - Init.typ -> - neuron_typ
val connect : int array -> neuron_typ -> unit
val init : neuron_typ -> unit
val reset : neuron_typ -> unit
val mktag : int -> neuron_typ -> unit
val mkpar : neuron_typ -> Optimise.Algodiff.t array
val mkpri : neuron_typ -> Optimise.Algodiff.t array
val mkadj : neuron_typ -> Optimise.Algodiff.t array
val update : neuron_typ -> Optimise.Algodiff.t array -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl/Owl_neural/D/Graph/Neuron/DilatedConv3D/index.html b/owl/Owl_neural/D/Graph/Neuron/DilatedConv3D/index.html deleted file mode 100644 index 2fca0dc79..000000000 --- a/owl/Owl_neural/D/Graph/Neuron/DilatedConv3D/index.html +++ /dev/null @@ -1,11 +0,0 @@ - -DilatedConv3D (owl.Owl_neural.D.Graph.Neuron.DilatedConv3D)

Module Neuron.DilatedConv3D

type neuron_typ = - Owl_neural_generic.Make_Embedded(Owl_algodiff_primal_ops.D).Neuron.DilatedConv3D.neuron_typ = - {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable b : Optimise.Algodiff.t;
  3. mutable kernel : int array;
  4. mutable stride : int array;
  5. mutable rate : int array;
  6. mutable padding : Owl_types.padding;
  7. mutable init_typ : Init.typ;
  8. mutable in_shape : int array;
  9. mutable out_shape : int array;
}
val create : - ?inputs:int array -> - Owl_types.padding -> - int array -> - int array -> - int array -> - Init.typ -> - neuron_typ
val connect : int array -> neuron_typ -> unit
val init : neuron_typ -> unit
val reset : neuron_typ -> unit
val mktag : int -> neuron_typ -> unit
val mkpar : neuron_typ -> Optimise.Algodiff.t array
val mkpri : neuron_typ -> Optimise.Algodiff.t array
val mkadj : neuron_typ -> Optimise.Algodiff.t array
val update : neuron_typ -> Optimise.Algodiff.t array -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl/Owl_neural/D/Graph/Neuron/Dot/index.html b/owl/Owl_neural/D/Graph/Neuron/Dot/index.html deleted file mode 100644 index ed1cf667c..000000000 --- a/owl/Owl_neural/D/Graph/Neuron/Dot/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Dot (owl.Owl_neural.D.Graph.Neuron.Dot)

Module Neuron.Dot

type neuron_typ = - Owl_neural_generic.Make_Embedded(Owl_algodiff_primal_ops.D).Neuron.Dot.neuron_typ = - {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
}
val create : unit -> neuron_typ
val connect : int array array -> neuron_typ -> unit
val copy : 'a -> neuron_typ
val run : Optimise.Algodiff.t array -> 'a -> Optimise.Algodiff.t
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl/Owl_neural/D/Graph/Neuron/Dropout/index.html b/owl/Owl_neural/D/Graph/Neuron/Dropout/index.html deleted file mode 100644 index 10feb6474..000000000 --- a/owl/Owl_neural/D/Graph/Neuron/Dropout/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Dropout (owl.Owl_neural.D.Graph.Neuron.Dropout)

Module Neuron.Dropout

type neuron_typ = - Owl_neural_generic.Make_Embedded(Owl_algodiff_primal_ops.D).Neuron.Dropout.neuron_typ = - {
  1. mutable rate : float;
  2. mutable in_shape : int array;
  3. mutable out_shape : int array;
}
val create : float -> neuron_typ
val connect : int array -> neuron_typ -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl/Owl_neural/D/Graph/Neuron/Embedding/index.html b/owl/Owl_neural/D/Graph/Neuron/Embedding/index.html deleted file mode 100644 index 15a901035..000000000 --- a/owl/Owl_neural/D/Graph/Neuron/Embedding/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Embedding (owl.Owl_neural.D.Graph.Neuron.Embedding)

Module Neuron.Embedding

type neuron_typ = - Owl_neural_generic.Make_Embedded(Owl_algodiff_primal_ops.D).Neuron.Embedding.neuron_typ = - {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable init_typ : Init.typ;
  3. mutable in_dim : int;
  4. mutable in_shape : int array;
  5. mutable out_shape : int array;
}
val create : ?inputs:int -> int -> int -> Init.typ -> neuron_typ
val connect : int array -> neuron_typ -> unit
val init : neuron_typ -> unit
val reset : neuron_typ -> unit
val mktag : int -> neuron_typ -> unit
val mkpar : neuron_typ -> Optimise.Algodiff.t array
val mkpri : neuron_typ -> Optimise.Algodiff.t array
val mkadj : neuron_typ -> Optimise.Algodiff.t array
val update : neuron_typ -> Optimise.Algodiff.t array -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl/Owl_neural/D/Graph/Neuron/Flatten/index.html b/owl/Owl_neural/D/Graph/Neuron/Flatten/index.html deleted file mode 100644 index 91acc23bc..000000000 --- a/owl/Owl_neural/D/Graph/Neuron/Flatten/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Flatten (owl.Owl_neural.D.Graph.Neuron.Flatten)

Module Neuron.Flatten

type neuron_typ = - Owl_neural_generic.Make_Embedded(Owl_algodiff_primal_ops.D).Neuron.Flatten.neuron_typ = - {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
}
val create : unit -> neuron_typ
val connect : int array -> neuron_typ -> unit
val copy : 'a -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl/Owl_neural/D/Graph/Neuron/FullyConnected/index.html b/owl/Owl_neural/D/Graph/Neuron/FullyConnected/index.html deleted file mode 100644 index 02b31db80..000000000 --- a/owl/Owl_neural/D/Graph/Neuron/FullyConnected/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -FullyConnected (owl.Owl_neural.D.Graph.Neuron.FullyConnected)

Module Neuron.FullyConnected

type neuron_typ = - Owl_neural_generic.Make_Embedded(Owl_algodiff_primal_ops.D).Neuron.FullyConnected.neuron_typ = - {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable b : Optimise.Algodiff.t;
  3. mutable init_typ : Init.typ;
  4. mutable in_shape : int array;
  5. mutable out_shape : int array;
}
val create : ?inputs:int -> int -> Init.typ -> neuron_typ
val connect : int array -> neuron_typ -> unit
val init : neuron_typ -> unit
val reset : neuron_typ -> unit
val mktag : int -> neuron_typ -> unit
val mkpar : neuron_typ -> Optimise.Algodiff.t array
val mkpri : neuron_typ -> Optimise.Algodiff.t array
val mkadj : neuron_typ -> Optimise.Algodiff.t array
val update : neuron_typ -> Optimise.Algodiff.t array -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl/Owl_neural/D/Graph/Neuron/GRU/index.html b/owl/Owl_neural/D/Graph/Neuron/GRU/index.html deleted file mode 100644 index 6a026572a..000000000 --- a/owl/Owl_neural/D/Graph/Neuron/GRU/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -GRU (owl.Owl_neural.D.Graph.Neuron.GRU)

Module Neuron.GRU

type neuron_typ = - Owl_neural_generic.Make_Embedded(Owl_algodiff_primal_ops.D).Neuron.GRU.neuron_typ = - {
  1. mutable wxz : Optimise.Algodiff.t;
  2. mutable whz : Optimise.Algodiff.t;
  3. mutable wxr : Optimise.Algodiff.t;
  4. mutable whr : Optimise.Algodiff.t;
  5. mutable wxh : Optimise.Algodiff.t;
  6. mutable whh : Optimise.Algodiff.t;
  7. mutable bz : Optimise.Algodiff.t;
  8. mutable br : Optimise.Algodiff.t;
  9. mutable bh : Optimise.Algodiff.t;
  10. mutable h : Optimise.Algodiff.t;
  11. mutable init_typ : Init.typ;
  12. mutable in_shape : int array;
  13. mutable out_shape : int array;
}
val create : ?time_steps:int -> ?inputs:int -> int -> Init.typ -> neuron_typ
val connect : int array -> neuron_typ -> unit
val init : neuron_typ -> unit
val reset : neuron_typ -> unit
val mktag : int -> neuron_typ -> unit
val mkpar : neuron_typ -> Optimise.Algodiff.t array
val mkpri : neuron_typ -> Optimise.Algodiff.t array
val mkadj : neuron_typ -> Optimise.Algodiff.t array
val update : neuron_typ -> Optimise.Algodiff.t array -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl/Owl_neural/D/Graph/Neuron/GaussianDropout/index.html b/owl/Owl_neural/D/Graph/Neuron/GaussianDropout/index.html deleted file mode 100644 index a6d875c41..000000000 --- a/owl/Owl_neural/D/Graph/Neuron/GaussianDropout/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -GaussianDropout (owl.Owl_neural.D.Graph.Neuron.GaussianDropout)

Module Neuron.GaussianDropout

type neuron_typ = - Owl_neural_generic.Make_Embedded(Owl_algodiff_primal_ops.D).Neuron.GaussianDropout.neuron_typ = - {
  1. mutable rate : float;
  2. mutable in_shape : int array;
  3. mutable out_shape : int array;
}
val create : float -> neuron_typ
val connect : int array -> neuron_typ -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl/Owl_neural/D/Graph/Neuron/GaussianNoise/index.html b/owl/Owl_neural/D/Graph/Neuron/GaussianNoise/index.html deleted file mode 100644 index 4d1582cfc..000000000 --- a/owl/Owl_neural/D/Graph/Neuron/GaussianNoise/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -GaussianNoise (owl.Owl_neural.D.Graph.Neuron.GaussianNoise)

Module Neuron.GaussianNoise

type neuron_typ = - Owl_neural_generic.Make_Embedded(Owl_algodiff_primal_ops.D).Neuron.GaussianNoise.neuron_typ = - {
  1. mutable sigma : float;
  2. mutable in_shape : int array;
  3. mutable out_shape : int array;
}
val create : float -> neuron_typ
val connect : int array -> neuron_typ -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl/Owl_neural/D/Graph/Neuron/GlobalAvgPool1D/index.html b/owl/Owl_neural/D/Graph/Neuron/GlobalAvgPool1D/index.html deleted file mode 100644 index a1bd45a07..000000000 --- a/owl/Owl_neural/D/Graph/Neuron/GlobalAvgPool1D/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -GlobalAvgPool1D (owl.Owl_neural.D.Graph.Neuron.GlobalAvgPool1D)

Module Neuron.GlobalAvgPool1D

type neuron_typ = - Owl_neural_generic.Make_Embedded(Owl_algodiff_primal_ops.D).Neuron.GlobalAvgPool1D.neuron_typ = - {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
}
val create : unit -> neuron_typ
val connect : int array -> neuron_typ -> unit
val copy : 'a -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl/Owl_neural/D/Graph/Neuron/GlobalAvgPool2D/index.html b/owl/Owl_neural/D/Graph/Neuron/GlobalAvgPool2D/index.html deleted file mode 100644 index e2504523b..000000000 --- a/owl/Owl_neural/D/Graph/Neuron/GlobalAvgPool2D/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -GlobalAvgPool2D (owl.Owl_neural.D.Graph.Neuron.GlobalAvgPool2D)

Module Neuron.GlobalAvgPool2D

type neuron_typ = - Owl_neural_generic.Make_Embedded(Owl_algodiff_primal_ops.D).Neuron.GlobalAvgPool2D.neuron_typ = - {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
}
val create : unit -> neuron_typ
val connect : int array -> neuron_typ -> unit
val copy : 'a -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl/Owl_neural/D/Graph/Neuron/GlobalMaxPool1D/index.html b/owl/Owl_neural/D/Graph/Neuron/GlobalMaxPool1D/index.html deleted file mode 100644 index 84355e393..000000000 --- a/owl/Owl_neural/D/Graph/Neuron/GlobalMaxPool1D/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -GlobalMaxPool1D (owl.Owl_neural.D.Graph.Neuron.GlobalMaxPool1D)

Module Neuron.GlobalMaxPool1D

type neuron_typ = - Owl_neural_generic.Make_Embedded(Owl_algodiff_primal_ops.D).Neuron.GlobalMaxPool1D.neuron_typ = - {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
}
val create : unit -> neuron_typ
val connect : int array -> neuron_typ -> unit
val copy : 'a -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl/Owl_neural/D/Graph/Neuron/GlobalMaxPool2D/index.html b/owl/Owl_neural/D/Graph/Neuron/GlobalMaxPool2D/index.html deleted file mode 100644 index 679801f6f..000000000 --- a/owl/Owl_neural/D/Graph/Neuron/GlobalMaxPool2D/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -GlobalMaxPool2D (owl.Owl_neural.D.Graph.Neuron.GlobalMaxPool2D)

Module Neuron.GlobalMaxPool2D

type neuron_typ = - Owl_neural_generic.Make_Embedded(Owl_algodiff_primal_ops.D).Neuron.GlobalMaxPool2D.neuron_typ = - {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
}
val create : unit -> neuron_typ
val connect : int array -> neuron_typ -> unit
val copy : 'a -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl/Owl_neural/D/Graph/Neuron/Init/index.html b/owl/Owl_neural/D/Graph/Neuron/Init/index.html deleted file mode 100644 index 02c56ef81..000000000 --- a/owl/Owl_neural/D/Graph/Neuron/Init/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Init (owl.Owl_neural.D.Graph.Neuron.Init)

Module Neuron.Init

type typ = - Owl_neural_generic.Make_Embedded(Owl_algodiff_primal_ops.D).Neuron.Init.typ = -
  1. | Uniform of float * float
  2. | Gaussian of float * float
  3. | Standard
  4. | Tanh
  5. | GlorotNormal
  6. | GlorotUniform
  7. | LecunNormal
  8. | HeNormal
  9. | Custom of int array -> Optimise.Algodiff.t
val calc_fans : int array -> float * float
val run : typ -> int array -> Optimise.Algodiff.t -> Optimise.Algodiff.t
val to_string : typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl/Owl_neural/D/Graph/Neuron/Input/index.html b/owl/Owl_neural/D/Graph/Neuron/Input/index.html deleted file mode 100644 index 4a4136262..000000000 --- a/owl/Owl_neural/D/Graph/Neuron/Input/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Input (owl.Owl_neural.D.Graph.Neuron.Input)

Module Neuron.Input

type neuron_typ = - Owl_neural_generic.Make_Embedded(Owl_algodiff_primal_ops.D).Neuron.Input.neuron_typ = - {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
}
val create : int array -> neuron_typ
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl/Owl_neural/D/Graph/Neuron/LSTM/index.html b/owl/Owl_neural/D/Graph/Neuron/LSTM/index.html deleted file mode 100644 index c4c54236a..000000000 --- a/owl/Owl_neural/D/Graph/Neuron/LSTM/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -LSTM (owl.Owl_neural.D.Graph.Neuron.LSTM)

Module Neuron.LSTM

type neuron_typ = - Owl_neural_generic.Make_Embedded(Owl_algodiff_primal_ops.D).Neuron.LSTM.neuron_typ = - {
  1. mutable wxi : Optimise.Algodiff.t;
  2. mutable whi : Optimise.Algodiff.t;
  3. mutable wxc : Optimise.Algodiff.t;
  4. mutable whc : Optimise.Algodiff.t;
  5. mutable wxf : Optimise.Algodiff.t;
  6. mutable whf : Optimise.Algodiff.t;
  7. mutable wxo : Optimise.Algodiff.t;
  8. mutable who : Optimise.Algodiff.t;
  9. mutable bi : Optimise.Algodiff.t;
  10. mutable bc : Optimise.Algodiff.t;
  11. mutable bf : Optimise.Algodiff.t;
  12. mutable bo : Optimise.Algodiff.t;
  13. mutable c : Optimise.Algodiff.t;
  14. mutable h : Optimise.Algodiff.t;
  15. mutable init_typ : Init.typ;
  16. mutable in_shape : int array;
  17. mutable out_shape : int array;
}
val create : ?time_steps:int -> ?inputs:int -> int -> Init.typ -> neuron_typ
val connect : int array -> neuron_typ -> unit
val init : neuron_typ -> unit
val reset : neuron_typ -> unit
val mktag : int -> neuron_typ -> unit
val mkpar : neuron_typ -> Optimise.Algodiff.t array
val mkpri : neuron_typ -> Optimise.Algodiff.t array
val mkadj : neuron_typ -> Optimise.Algodiff.t array
val update : neuron_typ -> Optimise.Algodiff.t array -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl/Owl_neural/D/Graph/Neuron/Lambda/index.html b/owl/Owl_neural/D/Graph/Neuron/Lambda/index.html deleted file mode 100644 index 4c1a957a1..000000000 --- a/owl/Owl_neural/D/Graph/Neuron/Lambda/index.html +++ /dev/null @@ -1,7 +0,0 @@ - -Lambda (owl.Owl_neural.D.Graph.Neuron.Lambda)

Module Neuron.Lambda

type neuron_typ = - Owl_neural_generic.Make_Embedded(Owl_algodiff_primal_ops.D).Neuron.Lambda.neuron_typ = - {
  1. mutable lambda : Optimise.Algodiff.t -> Optimise.Algodiff.t;
  2. mutable in_shape : int array;
  3. mutable out_shape : int array;
}
val create : - ?out_shape:int array -> - (Optimise.Algodiff.t -> Optimise.Algodiff.t) -> - neuron_typ
val connect : int array -> neuron_typ -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl/Owl_neural/D/Graph/Neuron/LambdaArray/index.html b/owl/Owl_neural/D/Graph/Neuron/LambdaArray/index.html deleted file mode 100644 index f51d199fb..000000000 --- a/owl/Owl_neural/D/Graph/Neuron/LambdaArray/index.html +++ /dev/null @@ -1,7 +0,0 @@ - -LambdaArray (owl.Owl_neural.D.Graph.Neuron.LambdaArray)

Module Neuron.LambdaArray

type neuron_typ = - Owl_neural_generic.Make_Embedded(Owl_algodiff_primal_ops.D).Neuron.LambdaArray.neuron_typ = - {
  1. mutable lambda : Optimise.Algodiff.t array -> Optimise.Algodiff.t;
  2. mutable in_shape : int array;
  3. mutable out_shape : int array;
}
val create : - int array -> - (Optimise.Algodiff.t array -> Optimise.Algodiff.t) -> - neuron_typ
val connect : int array array -> neuron_typ -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl/Owl_neural/D/Graph/Neuron/Linear/index.html b/owl/Owl_neural/D/Graph/Neuron/Linear/index.html deleted file mode 100644 index 5982661ff..000000000 --- a/owl/Owl_neural/D/Graph/Neuron/Linear/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Linear (owl.Owl_neural.D.Graph.Neuron.Linear)

Module Neuron.Linear

type neuron_typ = - Owl_neural_generic.Make_Embedded(Owl_algodiff_primal_ops.D).Neuron.Linear.neuron_typ = - {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable b : Optimise.Algodiff.t;
  3. mutable init_typ : Init.typ;
  4. mutable in_shape : int array;
  5. mutable out_shape : int array;
}
val create : ?inputs:int -> int -> Init.typ -> neuron_typ
val connect : int array -> neuron_typ -> unit
val init : neuron_typ -> unit
val reset : neuron_typ -> unit
val mktag : int -> neuron_typ -> unit
val mkpar : neuron_typ -> Optimise.Algodiff.t array
val mkpri : neuron_typ -> Optimise.Algodiff.t array
val mkadj : neuron_typ -> Optimise.Algodiff.t array
val update : neuron_typ -> Optimise.Algodiff.t array -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl/Owl_neural/D/Graph/Neuron/LinearNoBias/index.html b/owl/Owl_neural/D/Graph/Neuron/LinearNoBias/index.html deleted file mode 100644 index aefef8fba..000000000 --- a/owl/Owl_neural/D/Graph/Neuron/LinearNoBias/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -LinearNoBias (owl.Owl_neural.D.Graph.Neuron.LinearNoBias)

Module Neuron.LinearNoBias

type neuron_typ = - Owl_neural_generic.Make_Embedded(Owl_algodiff_primal_ops.D).Neuron.LinearNoBias.neuron_typ = - {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable init_typ : Init.typ;
  3. mutable in_shape : int array;
  4. mutable out_shape : int array;
}
val create : ?inputs:int -> int -> Init.typ -> neuron_typ
val connect : int array -> neuron_typ -> unit
val init : neuron_typ -> unit
val reset : neuron_typ -> unit
val mktag : int -> neuron_typ -> unit
val mkpar : neuron_typ -> Optimise.Algodiff.t array
val mkpri : neuron_typ -> Optimise.Algodiff.t array
val mkadj : neuron_typ -> Optimise.Algodiff.t array
val update : neuron_typ -> Optimise.Algodiff.t array -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl/Owl_neural/D/Graph/Neuron/Masking/index.html b/owl/Owl_neural/D/Graph/Neuron/Masking/index.html deleted file mode 100644 index b59af75b1..000000000 --- a/owl/Owl_neural/D/Graph/Neuron/Masking/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Masking (owl.Owl_neural.D.Graph.Neuron.Masking)

Module Neuron.Masking

\ No newline at end of file diff --git a/owl/Owl_neural/D/Graph/Neuron/Max/index.html b/owl/Owl_neural/D/Graph/Neuron/Max/index.html deleted file mode 100644 index fc92080f4..000000000 --- a/owl/Owl_neural/D/Graph/Neuron/Max/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Max (owl.Owl_neural.D.Graph.Neuron.Max)

Module Neuron.Max

type neuron_typ = - Owl_neural_generic.Make_Embedded(Owl_algodiff_primal_ops.D).Neuron.Max.neuron_typ = - {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
}
val create : unit -> neuron_typ
val connect : int array array -> neuron_typ -> unit
val copy : 'a -> neuron_typ
val run : Optimise.Algodiff.t array -> 'a -> Optimise.Algodiff.t
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl/Owl_neural/D/Graph/Neuron/MaxPool1D/index.html b/owl/Owl_neural/D/Graph/Neuron/MaxPool1D/index.html deleted file mode 100644 index 051041bda..000000000 --- a/owl/Owl_neural/D/Graph/Neuron/MaxPool1D/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -MaxPool1D (owl.Owl_neural.D.Graph.Neuron.MaxPool1D)

Module Neuron.MaxPool1D

type neuron_typ = - Owl_neural_generic.Make_Embedded(Owl_algodiff_primal_ops.D).Neuron.MaxPool1D.neuron_typ = - {
  1. mutable padding : Owl_types.padding;
  2. mutable kernel : int array;
  3. mutable stride : int array;
  4. mutable in_shape : int array;
  5. mutable out_shape : int array;
}
val create : Owl_types.padding -> int array -> int array -> neuron_typ
val connect : int array -> neuron_typ -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl/Owl_neural/D/Graph/Neuron/MaxPool2D/index.html b/owl/Owl_neural/D/Graph/Neuron/MaxPool2D/index.html deleted file mode 100644 index 13cc7ac35..000000000 --- a/owl/Owl_neural/D/Graph/Neuron/MaxPool2D/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -MaxPool2D (owl.Owl_neural.D.Graph.Neuron.MaxPool2D)

Module Neuron.MaxPool2D

type neuron_typ = - Owl_neural_generic.Make_Embedded(Owl_algodiff_primal_ops.D).Neuron.MaxPool2D.neuron_typ = - {
  1. mutable padding : Owl_types.padding;
  2. mutable kernel : int array;
  3. mutable stride : int array;
  4. mutable in_shape : int array;
  5. mutable out_shape : int array;
}
val create : Owl_types.padding -> int array -> int array -> neuron_typ
val connect : int array -> neuron_typ -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl/Owl_neural/D/Graph/Neuron/Mul/index.html b/owl/Owl_neural/D/Graph/Neuron/Mul/index.html deleted file mode 100644 index f30f8095f..000000000 --- a/owl/Owl_neural/D/Graph/Neuron/Mul/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Mul (owl.Owl_neural.D.Graph.Neuron.Mul)

Module Neuron.Mul

type neuron_typ = - Owl_neural_generic.Make_Embedded(Owl_algodiff_primal_ops.D).Neuron.Mul.neuron_typ = - {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
}
val create : unit -> neuron_typ
val connect : int array array -> neuron_typ -> unit
val copy : 'a -> neuron_typ
val run : Optimise.Algodiff.t array -> 'a -> Optimise.Algodiff.t
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl/Owl_neural/D/Graph/Neuron/Normalisation/index.html b/owl/Owl_neural/D/Graph/Neuron/Normalisation/index.html deleted file mode 100644 index 98906ae1e..000000000 --- a/owl/Owl_neural/D/Graph/Neuron/Normalisation/index.html +++ /dev/null @@ -1,10 +0,0 @@ - -Normalisation (owl.Owl_neural.D.Graph.Neuron.Normalisation)

Module Neuron.Normalisation

type neuron_typ = - Owl_neural_generic.Make_Embedded(Owl_algodiff_primal_ops.D).Neuron.Normalisation.neuron_typ = - {
  1. mutable axis : int;
  2. mutable beta : Optimise.Algodiff.t;
  3. mutable gamma : Optimise.Algodiff.t;
  4. mutable mu : Optimise.Algodiff.t;
  5. mutable var : Optimise.Algodiff.t;
  6. mutable decay : Optimise.Algodiff.t;
  7. mutable training : bool;
  8. mutable in_shape : int array;
  9. mutable out_shape : int array;
}
val create : - ?training:bool -> - ?decay:float -> - ?mu:Optimise.Algodiff.A.arr -> - ?var:Optimise.Algodiff.A.arr -> - int -> - neuron_typ
val connect : int array -> neuron_typ -> unit
val init : neuron_typ -> unit
val reset : neuron_typ -> unit
val mktag : int -> neuron_typ -> unit
val mkpar : neuron_typ -> Optimise.Algodiff.t array
val mkpri : neuron_typ -> Optimise.Algodiff.t array
val mkadj : neuron_typ -> Optimise.Algodiff.t array
val update : neuron_typ -> Optimise.Algodiff.t array -> unit
val load_weights : neuron_typ -> Optimise.Algodiff.t array -> unit
val save_weights : neuron_typ -> Optimise.Algodiff.t array
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl/Owl_neural/D/Graph/Neuron/Optimise/Algodiff/A/Linalg/index.html b/owl/Owl_neural/D/Graph/Neuron/Optimise/Algodiff/A/Linalg/index.html deleted file mode 100644 index 9433eed27..000000000 --- a/owl/Owl_neural/D/Graph/Neuron/Optimise/Algodiff/A/Linalg/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -Linalg (owl.Owl_neural.D.Graph.Neuron.Optimise.Algodiff.A.Linalg)

Module A.Linalg

val inv : arr -> arr
val logdet : arr -> elt
val chol : ?upper:bool -> arr -> arr
val svd : ?thin:bool -> arr -> arr * arr * arr
val qr : arr -> arr * arr
val lq : arr -> arr * arr
val sylvester : arr -> arr -> arr -> arr
val lyapunov : arr -> arr -> arr
val discrete_lyapunov : - ?solver:[ `bilinear | `default | `direct ] -> - arr -> - arr -> - arr
val linsolve : ?trans:bool -> ?typ:[ `l | `n | `u ] -> arr -> arr -> arr
val care : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
val dare : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
\ No newline at end of file diff --git a/owl/Owl_neural/D/Graph/Neuron/Optimise/Algodiff/A/Mat/index.html b/owl/Owl_neural/D/Graph/Neuron/Optimise/Algodiff/A/Mat/index.html deleted file mode 100644 index 5f5c8efd7..000000000 --- a/owl/Owl_neural/D/Graph/Neuron/Optimise/Algodiff/A/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl.Owl_neural.D.Graph.Neuron.Optimise.Algodiff.A.Mat)

Module A.Mat

val diagm : ?k:int -> arr -> arr
val triu : ?k:int -> arr -> arr
val tril : ?k:int -> arr -> arr
val eye : int -> arr
\ No newline at end of file diff --git a/owl/Owl_neural/D/Graph/Neuron/Optimise/Algodiff/A/Scalar/index.html b/owl/Owl_neural/D/Graph/Neuron/Optimise/Algodiff/A/Scalar/index.html deleted file mode 100644 index 32c6bf098..000000000 --- a/owl/Owl_neural/D/Graph/Neuron/Optimise/Algodiff/A/Scalar/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Scalar (owl.Owl_neural.D.Graph.Neuron.Optimise.Algodiff.A.Scalar)

Module A.Scalar

val add : elt -> elt -> elt
val sub : elt -> elt -> elt
val mul : elt -> elt -> elt
val div : elt -> elt -> elt
val pow : elt -> elt -> elt
val atan2 : elt -> elt -> elt
val abs : elt -> elt
val neg : elt -> elt
val sqr : elt -> elt
val sqrt : elt -> elt
val exp : elt -> elt
val log : elt -> elt
val log2 : elt -> elt
val log10 : elt -> elt
val signum : elt -> elt
val floor : elt -> elt
val ceil : elt -> elt
val round : elt -> elt
val sin : elt -> elt
val cos : elt -> elt
val tan : elt -> elt
val sinh : elt -> elt
val cosh : elt -> elt
val tanh : elt -> elt
val asin : elt -> elt
val acos : elt -> elt
val atan : elt -> elt
val asinh : elt -> elt
val acosh : elt -> elt
val atanh : elt -> elt
val relu : elt -> elt
val dawsn : elt -> elt
val sigmoid : elt -> elt
\ No newline at end of file diff --git a/owl/Owl_neural/D/Graph/Neuron/Optimise/Algodiff/A/index.html b/owl/Owl_neural/D/Graph/Neuron/Optimise/Algodiff/A/index.html deleted file mode 100644 index a40f5634f..000000000 --- a/owl/Owl_neural/D/Graph/Neuron/Optimise/Algodiff/A/index.html +++ /dev/null @@ -1,160 +0,0 @@ - -A (owl.Owl_neural.D.Graph.Neuron.Optimise.Algodiff.A)

Module Algodiff.A

val empty : int array -> arr
val zeros : int array -> arr
val ones : int array -> arr
val create : int array -> elt -> arr
val sequential : ?a:elt -> ?step:elt -> int array -> arr
val uniform : ?a:elt -> ?b:elt -> int array -> arr
val gaussian : ?mu:elt -> ?sigma:elt -> int array -> arr
val bernoulli : ?p:elt -> int array -> arr
val init : int array -> (int -> elt) -> arr
val init_nd : int array -> (int array -> elt) -> arr
val shape : arr -> int array
val numel : arr -> int
val get : arr -> int array -> elt
val set : arr -> int array -> elt -> unit
val get_slice : int list list -> arr -> arr
val set_slice : int list list -> arr -> arr -> unit
val get_fancy : Owl_types_common.index list -> arr -> arr
val set_fancy : Owl_types_common.index list -> arr -> arr -> unit
val copy : arr -> arr
val copy_ : out:arr -> arr -> unit
val reset : arr -> unit
val reshape : arr -> int array -> arr
val reverse : arr -> arr
val tile : arr -> int array -> arr
val repeat : arr -> int array -> arr
val concatenate : ?axis:int -> arr array -> arr
val stack : ?axis:int -> arr array -> arr
val split : ?axis:int -> int array -> arr -> arr array
val expand : ?hi:bool -> arr -> int -> arr
val squeeze : ?axis:int array -> arr -> arr
val draw : ?axis:int -> arr -> int -> arr * int array
val map : (elt -> elt) -> arr -> arr
val fold : ?axis:int -> (elt -> elt -> elt) -> elt -> arr -> arr
val scan : ?axis:int -> (elt -> elt -> elt) -> arr -> arr
val one_hot : int -> arr -> arr
val pad : ?v:elt -> int list list -> arr -> arr
val print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(elt -> string) -> - arr -> - unit
val abs : arr -> arr
val neg : arr -> arr
val floor : arr -> arr
val ceil : arr -> arr
val round : arr -> arr
val sqr : arr -> arr
val sqrt : arr -> arr
val log : arr -> arr
val log2 : arr -> arr
val log10 : arr -> arr
val exp : arr -> arr
val sin : arr -> arr
val cos : arr -> arr
val tan : arr -> arr
val sinh : arr -> arr
val cosh : arr -> arr
val tanh : arr -> arr
val asin : arr -> arr
val acos : arr -> arr
val atan : arr -> arr
val asinh : arr -> arr
val acosh : arr -> arr
val atanh : arr -> arr
val min : ?axis:int -> ?keep_dims:bool -> arr -> arr
val max : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum_reduce : ?axis:int array -> arr -> arr
val signum : arr -> arr
val sigmoid : arr -> arr
val relu : arr -> arr
val dawsn : arr -> arr
val min' : arr -> elt
val max' : arr -> elt
val sum' : arr -> elt
val log_sum_exp' : arr -> elt
val log_sum_exp : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm' : arr -> elt
val l2norm' : arr -> elt
val l2norm_sqr' : arr -> elt
val clip_by_value : ?amin:elt -> ?amax:elt -> arr -> arr
val clip_by_l2norm : elt -> arr -> arr
val pow : arr -> arr -> arr
val scalar_pow : elt -> arr -> arr
val pow_scalar : arr -> elt -> arr
val atan2 : arr -> arr -> arr
val scalar_atan2 : elt -> arr -> arr
val atan2_scalar : arr -> elt -> arr
val add : arr -> arr -> arr
val sub : arr -> arr -> arr
val mul : arr -> arr -> arr
val div : arr -> arr -> arr
val add_scalar : arr -> elt -> arr
val sub_scalar : arr -> elt -> arr
val mul_scalar : arr -> elt -> arr
val div_scalar : arr -> elt -> arr
val scalar_add : elt -> arr -> arr
val scalar_sub : elt -> arr -> arr
val scalar_mul : elt -> arr -> arr
val scalar_div : elt -> arr -> arr
val fma : arr -> arr -> arr -> arr
val conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val dilated_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val transpose_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val max_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val upsampling2d : arr -> int array -> arr
val conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val dilated_conv1d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv1d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val transpose_conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val max_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val upsampling2d_backward : arr -> int array -> arr -> arr
val row_num : arr -> int
val col_num : arr -> int
val row : arr -> int -> arr
val rows : arr -> int array -> arr
val copy_row_to : arr -> arr -> int -> unit
val copy_col_to : arr -> arr -> int -> unit
val dot : arr -> arr -> arr
val diag : ?k:int -> arr -> arr
val trace : arr -> elt
val transpose : ?axis:int array -> arr -> arr
val to_rows : arr -> arr array
val of_rows : arr array -> arr
val to_cols : arr -> arr array
val of_cols : arr array -> arr
val of_array : elt array -> int array -> arr
val of_arrays : elt array array -> arr
val float_to_elt : float -> elt
val elt_to_float : elt -> float
val elt_equal : arr -> arr -> arr
val elt_not_equal : arr -> arr -> arr
val elt_less : arr -> arr -> arr
val elt_greater : arr -> arr -> arr
val elt_less_equal : arr -> arr -> arr
val elt_greater_equal : arr -> arr -> arr
val elt_equal_scalar : arr -> elt -> arr
val elt_not_equal_scalar : arr -> elt -> arr
val elt_less_scalar : arr -> elt -> arr
val elt_greater_scalar : arr -> elt -> arr
val elt_less_equal_scalar : arr -> elt -> arr
val elt_greater_equal_scalar : arr -> elt -> arr
module Scalar : sig ... end
module Mat : sig ... end
module Linalg : sig ... end
\ No newline at end of file diff --git a/owl/Owl_neural/D/Graph/Neuron/Optimise/Algodiff/Arr/index.html b/owl/Owl_neural/D/Graph/Neuron/Optimise/Algodiff/Arr/index.html deleted file mode 100644 index e61735791..000000000 --- a/owl/Owl_neural/D/Graph/Neuron/Optimise/Algodiff/Arr/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Arr (owl.Owl_neural.D.Graph.Neuron.Optimise.Algodiff.Arr)

Module Algodiff.Arr

val empty : int array -> t
val zeros : int array -> t
val ones : int array -> t
val uniform : ?a:A.elt -> ?b:A.elt -> int array -> t
val gaussian : ?mu:A.elt -> ?sigma:A.elt -> int array -> t
val shape : t -> int array
val numel : t -> int
val reset : t -> unit
val reshape : t -> int array -> t
val add : t -> t -> t
val sub : t -> t -> t
val mul : t -> t -> t
val div : t -> t -> t
val dot : t -> t -> t
\ No newline at end of file diff --git a/owl/Owl_neural/D/Graph/Neuron/Optimise/Algodiff/Builder/index.html b/owl/Owl_neural/D/Graph/Neuron/Optimise/Algodiff/Builder/index.html deleted file mode 100644 index 832b3d906..000000000 --- a/owl/Owl_neural/D/Graph/Neuron/Optimise/Algodiff/Builder/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Builder (owl.Owl_neural.D.Graph.Neuron.Optimise.Algodiff.Builder)

Module Algodiff.Builder

module type Siso = sig ... end
val build_siso : (module Siso) -> t -> t
module type Sipo = sig ... end
val build_sipo : (module Sipo) -> t -> t * t
module type Sito = sig ... end
val build_sito : (module Sito) -> t -> t * t * t
module type Siao = sig ... end
val build_siao : (module Siao) -> t -> t array
module type Piso = sig ... end
val build_piso : (module Piso) -> t -> t -> t
module type Aiso = sig ... end
val build_aiso : (module Aiso) -> t array -> t
\ No newline at end of file diff --git a/owl/Owl_neural/D/Graph/Neuron/Optimise/Algodiff/Builder/module-type-Aiso/index.html b/owl/Owl_neural/D/Graph/Neuron/Optimise/Algodiff/Builder/module-type-Aiso/index.html deleted file mode 100644 index 8770e4f42..000000000 --- a/owl/Owl_neural/D/Graph/Neuron/Optimise/Algodiff/Builder/module-type-Aiso/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Aiso (owl.Owl_neural.D.Graph.Neuron.Optimise.Algodiff.Builder.Aiso)

Module type Builder.Aiso

val label : string
val ff : t array -> t
val df : int list -> t -> t array -> t array -> t
val dr : int list -> t array -> t -> t Stdlib.ref -> t list
\ No newline at end of file diff --git a/owl/Owl_neural/D/Graph/Neuron/Optimise/Algodiff/Builder/module-type-Piso/index.html b/owl/Owl_neural/D/Graph/Neuron/Optimise/Algodiff/Builder/module-type-Piso/index.html deleted file mode 100644 index 200cb5e9a..000000000 --- a/owl/Owl_neural/D/Graph/Neuron/Optimise/Algodiff/Builder/module-type-Piso/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Piso (owl.Owl_neural.D.Graph.Neuron.Optimise.Algodiff.Builder.Piso)

Module type Builder.Piso

val label : string
val ff_aa : A.elt -> A.elt -> t
val ff_ab : A.elt -> A.arr -> t
val ff_ba : A.arr -> A.elt -> t
val ff_bb : A.arr -> A.arr -> t
val df_da : t -> t -> t -> t -> t
val df_db : t -> t -> t -> t -> t
val df_dab : t -> t -> t -> t -> t -> t
val dr_ab : t -> t -> t -> t Stdlib.ref -> t * t
val dr_a : t -> t -> t -> t Stdlib.ref -> t
val dr_b : t -> t -> t -> t Stdlib.ref -> t
\ No newline at end of file diff --git a/owl/Owl_neural/D/Graph/Neuron/Optimise/Algodiff/Builder/module-type-Siao/index.html b/owl/Owl_neural/D/Graph/Neuron/Optimise/Algodiff/Builder/module-type-Siao/index.html deleted file mode 100644 index fa3266bf0..000000000 --- a/owl/Owl_neural/D/Graph/Neuron/Optimise/Algodiff/Builder/module-type-Siao/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Siao (owl.Owl_neural.D.Graph.Neuron.Optimise.Algodiff.Builder.Siao)

Module type Builder.Siao

val label : string
val ff_f : A.elt -> t array
val ff_arr : A.arr -> t array
val df : t array -> t -> t -> t array
val dr : t -> t -> t Stdlib.ref array -> t Stdlib.ref array -> t
\ No newline at end of file diff --git a/owl/Owl_neural/D/Graph/Neuron/Optimise/Algodiff/Builder/module-type-Sipo/index.html b/owl/Owl_neural/D/Graph/Neuron/Optimise/Algodiff/Builder/module-type-Sipo/index.html deleted file mode 100644 index dd9d07816..000000000 --- a/owl/Owl_neural/D/Graph/Neuron/Optimise/Algodiff/Builder/module-type-Sipo/index.html +++ /dev/null @@ -1,7 +0,0 @@ - -Sipo (owl.Owl_neural.D.Graph.Neuron.Optimise.Algodiff.Builder.Sipo)

Module type Builder.Sipo

val label : string
val ff_f : A.elt -> t * t
val ff_arr : A.arr -> t * t
val df : t -> t -> t -> t
val dr : - t -> - t -> - (t Stdlib.ref * t Stdlib.ref) -> - (t Stdlib.ref * t Stdlib.ref) -> - t
\ No newline at end of file diff --git a/owl/Owl_neural/D/Graph/Neuron/Optimise/Algodiff/Builder/module-type-Siso/index.html b/owl/Owl_neural/D/Graph/Neuron/Optimise/Algodiff/Builder/module-type-Siso/index.html deleted file mode 100644 index 494254633..000000000 --- a/owl/Owl_neural/D/Graph/Neuron/Optimise/Algodiff/Builder/module-type-Siso/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Siso (owl.Owl_neural.D.Graph.Neuron.Optimise.Algodiff.Builder.Siso)

Module type Builder.Siso

val label : string
val ff_f : A.elt -> t
val ff_arr : A.arr -> t
val df : t -> t -> t -> t
val dr : t -> t -> t Stdlib.ref -> t
\ No newline at end of file diff --git a/owl/Owl_neural/D/Graph/Neuron/Optimise/Algodiff/Builder/module-type-Sito/index.html b/owl/Owl_neural/D/Graph/Neuron/Optimise/Algodiff/Builder/module-type-Sito/index.html deleted file mode 100644 index 308280e7c..000000000 --- a/owl/Owl_neural/D/Graph/Neuron/Optimise/Algodiff/Builder/module-type-Sito/index.html +++ /dev/null @@ -1,7 +0,0 @@ - -Sito (owl.Owl_neural.D.Graph.Neuron.Optimise.Algodiff.Builder.Sito)

Module type Builder.Sito

val label : string
val ff_f : A.elt -> t * t * t
val ff_arr : A.arr -> t * t * t
val df : t -> t -> t -> t
val dr : - t -> - t -> - (t Stdlib.ref * t Stdlib.ref * t Stdlib.ref) -> - (t Stdlib.ref * t Stdlib.ref * t Stdlib.ref) -> - t
\ No newline at end of file diff --git a/owl/Owl_neural/D/Graph/Neuron/Optimise/Algodiff/Linalg/index.html b/owl/Owl_neural/D/Graph/Neuron/Optimise/Algodiff/Linalg/index.html deleted file mode 100644 index f0f60381e..000000000 --- a/owl/Owl_neural/D/Graph/Neuron/Optimise/Algodiff/Linalg/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -Linalg (owl.Owl_neural.D.Graph.Neuron.Optimise.Algodiff.Linalg)

Module Algodiff.Linalg

val inv : t -> t
val logdet : t -> t
val chol : ?upper:bool -> t -> t
val qr : t -> t * t
val lq : t -> t * t
val svd : ?thin:bool -> t -> t * t * t
val sylvester : t -> t -> t -> t
val lyapunov : t -> t -> t
val discrete_lyapunov : - ?solver:[ `bilinear | `default | `direct ] -> - t -> - t -> - t
val (/@) : t -> t -> t
val linsolve : ?trans:bool -> ?typ:[ `l | `n | `u ] -> t -> t -> t
val care : ?diag_r:bool -> t -> t -> t -> t -> t
val dare : ?diag_r:bool -> t -> t -> t -> t -> t
\ No newline at end of file diff --git a/owl/Owl_neural/D/Graph/Neuron/Optimise/Algodiff/Mat/index.html b/owl/Owl_neural/D/Graph/Neuron/Optimise/Algodiff/Mat/index.html deleted file mode 100644 index 036e428a8..000000000 --- a/owl/Owl_neural/D/Graph/Neuron/Optimise/Algodiff/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl.Owl_neural.D.Graph.Neuron.Optimise.Algodiff.Mat)

Module Algodiff.Mat

val empty : int -> int -> t
val zeros : int -> int -> t
val eye : int -> t
val ones : int -> int -> t
val uniform : ?a:A.elt -> ?b:A.elt -> int -> int -> t
val gaussian : ?mu:A.elt -> ?sigma:A.elt -> int -> int -> t
val shape : t -> int * int
val numel : t -> int
val row_num : t -> int
val col_num : t -> int
val reset : t -> unit
val reshape : int -> int -> t -> t
val get : t -> int -> int -> t
val set : t -> int -> int -> t -> t
val row : t -> int -> t
val mean : t -> t
val add : t -> t -> t
val sub : t -> t -> t
val mul : t -> t -> t
val div : t -> t -> t
val dot : t -> t -> t
val map_by_row : (t -> t) -> t -> t
val of_arrays : A.elt array array -> t
val init_2d : int -> int -> (int -> int -> t) -> t
val print : t -> unit
\ No newline at end of file diff --git a/owl/Owl_neural/D/Graph/Neuron/Optimise/Algodiff/Maths/index.html b/owl/Owl_neural/D/Graph/Neuron/Optimise/Algodiff/Maths/index.html deleted file mode 100644 index 483d762f3..000000000 --- a/owl/Owl_neural/D/Graph/Neuron/Optimise/Algodiff/Maths/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Maths (owl.Owl_neural.D.Graph.Neuron.Optimise.Algodiff.Maths)

Module Algodiff.Maths

val (+) : t -> t -> t
val (-) : t -> t -> t
val (*) : t -> t -> t
val (/) : t -> t -> t
val (*@) : t -> t -> t
val (**) : t -> t -> t
val add : t -> t -> t
val sub : t -> t -> t
val mul : t -> t -> t
val div : t -> t -> t
val kron : t -> t -> t
val dot : t -> t -> t
val pow : t -> t -> t
val atan2 : t -> t -> t
val min2 : t -> t -> t
val max2 : t -> t -> t
val cross_entropy : t -> t -> t
val inv : t -> t
val neg : t -> t
val abs : t -> t
val signum : t -> t
val floor : t -> t
val ceil : t -> t
val round : t -> t
val sqr : t -> t
val sqrt : t -> t
val log : t -> t
val log2 : t -> t
val log10 : t -> t
val exp : t -> t
val sin : t -> t
val cos : t -> t
val tan : t -> t
val sinh : t -> t
val cosh : t -> t
val tanh : t -> t
val asin : t -> t
val acos : t -> t
val atan : t -> t
val asinh : t -> t
val acosh : t -> t
val atanh : t -> t
val sum' : t -> t
val log_sum_exp' : t -> t
val log_sum_exp : ?axis:int -> ?keep_dims:bool -> t -> t
val sum : ?axis:int -> ?keep_dims:bool -> t -> t
val sum_reduce : ?axis:int array -> t -> t
val mean : t -> t
val transpose : ?axis:int array -> t -> t
val swap : int -> int -> t -> t
val l1norm' : t -> t
val l2norm' : t -> t
val l2norm_sqr' : t -> t
val sigmoid : t -> t
val relu : t -> t
val dawsn : t -> t
val softplus : t -> t
val softsign : t -> t
val softmax : ?axis:int -> t -> t
val reshape : t -> int array -> t
val flatten : t -> t
val get_item : t -> int -> int -> t
val get_row : t -> int -> t
val concat : axis:int -> t -> t -> t
val split : axis:int -> int array -> t -> t array
val of_arrays : t array array -> t
val to_arrays : t -> t array array
val concatenate : axis:int -> t array -> t
val stack : axis:int -> t array -> t
val get_slice : int list list -> t -> t
val set_slice : int list list -> t -> t -> t
val get_fancy : Owl_types.index list -> t -> t
val set_fancy : Owl_types.index list -> t -> t -> t
val diag : ?k:int -> t -> t
val diagm : ?k:int -> t -> t
val trace : t -> t
val triu : ?k:int -> t -> t
val tril : ?k:int -> t -> t
\ No newline at end of file diff --git a/owl/Owl_neural/D/Graph/Neuron/Optimise/Algodiff/NN/index.html b/owl/Owl_neural/D/Graph/Neuron/Optimise/Algodiff/NN/index.html deleted file mode 100644 index 52fe71191..000000000 --- a/owl/Owl_neural/D/Graph/Neuron/Optimise/Algodiff/NN/index.html +++ /dev/null @@ -1,20 +0,0 @@ - -NN (owl.Owl_neural.D.Graph.Neuron.Optimise.Algodiff.NN)

Module Algodiff.NN

val dropout : ?rate:float -> t -> t
val conv1d : ?padding:Owl_types.padding -> t -> t -> int array -> t
val conv2d : ?padding:Owl_types.padding -> t -> t -> int array -> t
val conv3d : ?padding:Owl_types.padding -> t -> t -> int array -> t
val dilated_conv1d : - ?padding:Owl_types.padding -> - t -> - t -> - int array -> - int array -> - t
val dilated_conv2d : - ?padding:Owl_types.padding -> - t -> - t -> - int array -> - int array -> - t
val dilated_conv3d : - ?padding:Owl_types.padding -> - t -> - t -> - int array -> - int array -> - t
val transpose_conv1d : ?padding:Owl_types.padding -> t -> t -> int array -> t
val transpose_conv2d : ?padding:Owl_types.padding -> t -> t -> int array -> t
val transpose_conv3d : ?padding:Owl_types.padding -> t -> t -> int array -> t
val max_pool1d : Owl_types.padding -> t -> int array -> int array -> t
val max_pool2d : Owl_types.padding -> t -> int array -> int array -> t
val max_pool3d : Owl_types.padding -> t -> int array -> int array -> t
val avg_pool1d : Owl_types.padding -> t -> int array -> int array -> t
val avg_pool2d : Owl_types.padding -> t -> int array -> int array -> t
val avg_pool3d : Owl_types.padding -> t -> int array -> int array -> t
val upsampling2d : t -> int array -> t
val pad : ?v:A.elt -> int list list -> t -> t
\ No newline at end of file diff --git a/owl/Owl_neural/D/Graph/Neuron/Optimise/Algodiff/index.html b/owl/Owl_neural/D/Graph/Neuron/Optimise/Algodiff/index.html deleted file mode 100644 index 4c5d95841..000000000 --- a/owl/Owl_neural/D/Graph/Neuron/Optimise/Algodiff/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Algodiff (owl.Owl_neural.D.Graph.Neuron.Optimise.Algodiff)

Module Optimise.Algodiff

module A : sig ... end
type t = - Owl_neural_generic.Make_Embedded(Owl_algodiff_primal_ops.D).Neuron.Optimise.Algodiff.t = -
  1. | F of A.elt
  2. | Arr of A.arr
  3. | DF of t * t * int
  4. | DR of t * t Stdlib.ref * op * int Stdlib.ref * int * int Stdlib.ref
and adjoint = t -> t Stdlib.ref -> (t * t) list -> (t * t) list
and register = t list -> t list
and label = string * t list
and op = adjoint * register * label
val tag : unit -> int
val primal : t -> t
val primal' : t -> t
val zero : t -> t
val reset_zero : t -> t
val tangent : t -> t
val adjref : t -> t Stdlib.ref
val adjval : t -> t
val shape : t -> int array
val is_float : t -> bool
val is_arr : t -> bool
val row_num : t -> int
val col_num : t -> int
val numel : t -> int
val clip_by_value : amin:A.elt -> amax:A.elt -> t -> t
val clip_by_l2norm : A.elt -> t -> t
val copy_primal' : t -> t
val tile : t -> int array -> t
val repeat : t -> int array -> t
val pack_elt : A.elt -> t
val unpack_elt : t -> A.elt
val pack_flt : float -> t
val _f : float -> t
val unpack_flt : t -> float
val pack_arr : A.arr -> t
val unpack_arr : t -> A.arr
val deep_info : t -> string
val type_info : t -> string
val error_binop : string -> t -> t -> 'a
val error_uniop : string -> t -> 'a
val make_forward : t -> t -> int -> t
val make_reverse : t -> int -> t
val reverse_prop : t -> t -> unit
val diff : (t -> t) -> t -> t
val diff' : (t -> t) -> t -> t * t
val grad : (t -> t) -> t -> t
val grad' : (t -> t) -> t -> t * t
val jacobian : (t -> t) -> t -> t
val jacobian' : (t -> t) -> t -> t * t
val jacobianv : (t -> t) -> t -> t -> t
val jacobianv' : (t -> t) -> t -> t -> t * t
val jacobianTv : (t -> t) -> t -> t -> t
val jacobianTv' : (t -> t) -> t -> t -> t * t
val hessian : (t -> t) -> t -> t
val hessian' : (t -> t) -> t -> t * t
val hessianv : (t -> t) -> t -> t -> t
val hessianv' : (t -> t) -> t -> t -> t * t
val laplacian : (t -> t) -> t -> t
val laplacian' : (t -> t) -> t -> t * t
val gradhessian : (t -> t) -> t -> t * t
val gradhessian' : (t -> t) -> t -> t * t * t
val gradhessianv : (t -> t) -> t -> t -> t * t
val gradhessianv' : (t -> t) -> t -> t -> t * t * t
module Builder : sig ... end
module Maths : sig ... end
module Linalg : sig ... end
module NN : sig ... end
module Mat : sig ... end
module Arr : sig ... end
val to_trace : t list -> string
val to_dot : t list -> string
val pp_num : Stdlib.Format.formatter -> t -> unit
\ No newline at end of file diff --git a/owl/Owl_neural/D/Graph/Neuron/Optimise/Batch/index.html b/owl/Owl_neural/D/Graph/Neuron/Optimise/Batch/index.html deleted file mode 100644 index 2c21cd166..000000000 --- a/owl/Owl_neural/D/Graph/Neuron/Optimise/Batch/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Batch (owl.Owl_neural.D.Graph.Neuron.Optimise.Batch)

Module Optimise.Batch

type typ = - Owl_neural_generic.Make_Embedded(Owl_algodiff_primal_ops.D).Neuron.Optimise.Batch.typ = -
  1. | Full
  2. | Mini of int
  3. | Sample of int
  4. | Stochastic
val run : typ -> Algodiff.t -> Algodiff.t -> int -> Algodiff.t * Algodiff.t
val batches : typ -> Algodiff.t -> int
val to_string : typ -> string
\ No newline at end of file diff --git a/owl/Owl_neural/D/Graph/Neuron/Optimise/Checkpoint/index.html b/owl/Owl_neural/D/Graph/Neuron/Optimise/Checkpoint/index.html deleted file mode 100644 index 1ebb5bff3..000000000 --- a/owl/Owl_neural/D/Graph/Neuron/Optimise/Checkpoint/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -Checkpoint (owl.Owl_neural.D.Graph.Neuron.Optimise.Checkpoint)

Module Optimise.Checkpoint

type state = - Owl_neural_generic.Make_Embedded(Owl_algodiff_primal_ops.D).Neuron.Optimise.Checkpoint.state = - {
  1. mutable current_batch : int;
  2. mutable batches_per_epoch : int;
  3. mutable epochs : float;
  4. mutable batches : int;
  5. mutable loss : Algodiff.t array;
  6. mutable start_at : float;
  7. mutable stop : bool;
  8. mutable gs : Algodiff.t array array;
  9. mutable ps : Algodiff.t array array;
  10. mutable us : Algodiff.t array array;
  11. mutable ch : Algodiff.t array array array;
}
type typ = - Owl_neural_generic.Make_Embedded(Owl_algodiff_primal_ops.D).Neuron.Optimise.Checkpoint.typ = -
  1. | Batch of int
  2. | Epoch of float
  3. | Custom of state -> unit
  4. | None
val init_state : int -> float -> state
val default_checkpoint_fun : (string -> 'a) -> 'a
val print_state_info : state -> unit
val print_summary : state -> unit
val run : typ -> (string -> unit) -> int -> Algodiff.t -> state -> unit
val to_string : typ -> string
\ No newline at end of file diff --git a/owl/Owl_neural/D/Graph/Neuron/Optimise/Clipping/index.html b/owl/Owl_neural/D/Graph/Neuron/Optimise/Clipping/index.html deleted file mode 100644 index 70054638d..000000000 --- a/owl/Owl_neural/D/Graph/Neuron/Optimise/Clipping/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Clipping (owl.Owl_neural.D.Graph.Neuron.Optimise.Clipping)

Module Optimise.Clipping

type typ = - Owl_neural_generic.Make_Embedded(Owl_algodiff_primal_ops.D).Neuron.Optimise.Clipping.typ = -
  1. | L2norm of float
  2. | Value of float * float
  3. | None
val run : typ -> Algodiff.t -> Algodiff.t
val default : typ -> typ
val to_string : typ -> string
\ No newline at end of file diff --git a/owl/Owl_neural/D/Graph/Neuron/Optimise/Gradient/index.html b/owl/Owl_neural/D/Graph/Neuron/Optimise/Gradient/index.html deleted file mode 100644 index 43cd85876..000000000 --- a/owl/Owl_neural/D/Graph/Neuron/Optimise/Gradient/index.html +++ /dev/null @@ -1,11 +0,0 @@ - -Gradient (owl.Owl_neural.D.Graph.Neuron.Optimise.Gradient)

Module Optimise.Gradient

type typ = - Owl_neural_generic.Make_Embedded(Owl_algodiff_primal_ops.D).Neuron.Optimise.Gradient.typ = -
  1. | GD
  2. | CG
  3. | CD
  4. | NonlinearCG
  5. | DaiYuanCG
  6. | NewtonCG
  7. | Newton
val run : - typ -> - (Algodiff.t -> Algodiff.t) -> - Algodiff.t -> - Algodiff.t -> - Algodiff.t -> - Algodiff.t -> - Algodiff.t
val to_string : typ -> string
\ No newline at end of file diff --git a/owl/Owl_neural/D/Graph/Neuron/Optimise/Learning_Rate/index.html b/owl/Owl_neural/D/Graph/Neuron/Optimise/Learning_Rate/index.html deleted file mode 100644 index ed1b49442..000000000 --- a/owl/Owl_neural/D/Graph/Neuron/Optimise/Learning_Rate/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Learning_Rate (owl.Owl_neural.D.Graph.Neuron.Optimise.Learning_Rate)

Module Optimise.Learning_Rate

type typ = - Owl_neural_generic.Make_Embedded(Owl_algodiff_primal_ops.D).Neuron.Optimise.Learning_Rate.typ = -
  1. | Adagrad of float
  2. | Const of float
  3. | Decay of float * float
  4. | Exp_decay of float * float
  5. | RMSprop of float * float
  6. | Adam of float * float * float
  7. | Schedule of float array
val run : typ -> int -> Algodiff.t -> Algodiff.t array -> Algodiff.t
val default : typ -> typ
val update_ch : typ -> Algodiff.t -> Algodiff.t array -> Algodiff.t array
val to_string : typ -> string
\ No newline at end of file diff --git a/owl/Owl_neural/D/Graph/Neuron/Optimise/Loss/index.html b/owl/Owl_neural/D/Graph/Neuron/Optimise/Loss/index.html deleted file mode 100644 index e33230f11..000000000 --- a/owl/Owl_neural/D/Graph/Neuron/Optimise/Loss/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Loss (owl.Owl_neural.D.Graph.Neuron.Optimise.Loss)

Module Optimise.Loss

type typ = - Owl_neural_generic.Make_Embedded(Owl_algodiff_primal_ops.D).Neuron.Optimise.Loss.typ = -
  1. | Hinge
  2. | L1norm
  3. | L2norm
  4. | Quadratic
  5. | Cross_entropy
  6. | Custom of Algodiff.t -> Algodiff.t -> Algodiff.t
val run : typ -> Algodiff.t -> Algodiff.t -> Algodiff.t
val to_string : typ -> string
\ No newline at end of file diff --git a/owl/Owl_neural/D/Graph/Neuron/Optimise/Momentum/index.html b/owl/Owl_neural/D/Graph/Neuron/Optimise/Momentum/index.html deleted file mode 100644 index 1f2ec651a..000000000 --- a/owl/Owl_neural/D/Graph/Neuron/Optimise/Momentum/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Momentum (owl.Owl_neural.D.Graph.Neuron.Optimise.Momentum)

Module Optimise.Momentum

type typ = - Owl_neural_generic.Make_Embedded(Owl_algodiff_primal_ops.D).Neuron.Optimise.Momentum.typ = -
  1. | Standard of float
  2. | Nesterov of float
  3. | None
val run : typ -> Algodiff.t -> Algodiff.t -> Algodiff.t
val default : typ -> typ
val to_string : typ -> string
\ No newline at end of file diff --git a/owl/Owl_neural/D/Graph/Neuron/Optimise/Params/index.html b/owl/Owl_neural/D/Graph/Neuron/Optimise/Params/index.html deleted file mode 100644 index 5b9885de6..000000000 --- a/owl/Owl_neural/D/Graph/Neuron/Optimise/Params/index.html +++ /dev/null @@ -1,16 +0,0 @@ - -Params (owl.Owl_neural.D.Graph.Neuron.Optimise.Params)

Module Optimise.Params

type typ = - Owl_neural_generic.Make_Embedded(Owl_algodiff_primal_ops.D).Neuron.Optimise.Params.typ = - {
  1. mutable epochs : float;
  2. mutable batch : Batch.typ;
  3. mutable gradient : Gradient.typ;
  4. mutable loss : Loss.typ;
  5. mutable learning_rate : Learning_Rate.typ;
  6. mutable regularisation : Regularisation.typ;
  7. mutable momentum : Momentum.typ;
  8. mutable clipping : Clipping.typ;
  9. mutable stopping : Stopping.typ;
  10. mutable checkpoint : Checkpoint.typ;
  11. mutable verbosity : bool;
}
val default : unit -> typ
val config : - ?batch:Batch.typ -> - ?gradient:Gradient.typ -> - ?loss:Loss.typ -> - ?learning_rate:Learning_Rate.typ -> - ?regularisation:Regularisation.typ -> - ?momentum:Momentum.typ -> - ?clipping:Clipping.typ -> - ?stopping:Stopping.typ -> - ?checkpoint:Checkpoint.typ -> - ?verbosity:bool -> - float -> - typ
val to_string : typ -> string
\ No newline at end of file diff --git a/owl/Owl_neural/D/Graph/Neuron/Optimise/Regularisation/index.html b/owl/Owl_neural/D/Graph/Neuron/Optimise/Regularisation/index.html deleted file mode 100644 index 022d96dac..000000000 --- a/owl/Owl_neural/D/Graph/Neuron/Optimise/Regularisation/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Regularisation (owl.Owl_neural.D.Graph.Neuron.Optimise.Regularisation)

Module Optimise.Regularisation

type typ = - Owl_neural_generic.Make_Embedded(Owl_algodiff_primal_ops.D).Neuron.Optimise.Regularisation.typ = -
  1. | L1norm of float
  2. | L2norm of float
  3. | Elastic_net of float * float
  4. | None
val run : typ -> Algodiff.t -> Algodiff.t
val to_string : typ -> string
\ No newline at end of file diff --git a/owl/Owl_neural/D/Graph/Neuron/Optimise/Stopping/index.html b/owl/Owl_neural/D/Graph/Neuron/Optimise/Stopping/index.html deleted file mode 100644 index 1ac5a60e2..000000000 --- a/owl/Owl_neural/D/Graph/Neuron/Optimise/Stopping/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Stopping (owl.Owl_neural.D.Graph.Neuron.Optimise.Stopping)

Module Optimise.Stopping

type typ = - Owl_neural_generic.Make_Embedded(Owl_algodiff_primal_ops.D).Neuron.Optimise.Stopping.typ = -
  1. | Const of float
  2. | Early of int * int
  3. | None
val run : typ -> float -> bool
val default : typ -> typ
val to_string : typ -> string
\ No newline at end of file diff --git a/owl/Owl_neural/D/Graph/Neuron/Optimise/Utils/index.html b/owl/Owl_neural/D/Graph/Neuron/Optimise/Utils/index.html deleted file mode 100644 index b0a3d6018..000000000 --- a/owl/Owl_neural/D/Graph/Neuron/Optimise/Utils/index.html +++ /dev/null @@ -1,7 +0,0 @@ - -Utils (owl.Owl_neural.D.Graph.Neuron.Optimise.Utils)

Module Optimise.Utils

val sample_num : Algodiff.t -> int
val draw_samples : Algodiff.t -> Algodiff.t -> int -> Algodiff.t * Algodiff.t
val get_chunk : - Algodiff.t -> - Algodiff.t -> - int -> - int -> - Algodiff.t * Algodiff.t
\ No newline at end of file diff --git a/owl/Owl_neural/D/Graph/Neuron/Optimise/index.html b/owl/Owl_neural/D/Graph/Neuron/Optimise/index.html deleted file mode 100644 index 8d4bf8c7f..000000000 --- a/owl/Owl_neural/D/Graph/Neuron/Optimise/index.html +++ /dev/null @@ -1,31 +0,0 @@ - -Optimise (owl.Owl_neural.D.Graph.Neuron.Optimise)

Module Neuron.Optimise

module Algodiff : sig ... end
module Utils : sig ... end
module Learning_Rate : sig ... end
module Batch : sig ... end
module Loss : sig ... end
module Gradient : sig ... end
module Momentum : sig ... end
module Regularisation : sig ... end
module Clipping : sig ... end
module Stopping : sig ... end
module Checkpoint : sig ... end
module Params : sig ... end
val minimise_weight : - ?state:Checkpoint.state -> - Params.typ -> - (Algodiff.t -> Algodiff.t -> Algodiff.t) -> - Algodiff.t -> - Algodiff.t -> - Algodiff.t -> - Checkpoint.state * Algodiff.t
val minimise_network : - ?state:Checkpoint.state -> - Params.typ -> - (Algodiff.t -> Algodiff.t * Algodiff.t array array) -> - (Algodiff.t -> Algodiff.t array array * Algodiff.t array array) -> - (Algodiff.t array array -> unit) -> - (string -> unit) -> - Algodiff.t -> - Algodiff.t -> - Checkpoint.state
val minimise_fun : - ?state:Checkpoint.state -> - Params.typ -> - (Algodiff.t -> Algodiff.t) -> - Algodiff.t -> - Checkpoint.state * Algodiff.t
val minimise_compiled_network : - ?state:Checkpoint.state -> - Params.typ -> - (Algodiff.t -> Algodiff.t -> Algodiff.t) -> - (unit -> unit) -> - (string -> unit) -> - Algodiff.t -> - Algodiff.t -> - Checkpoint.state
\ No newline at end of file diff --git a/owl/Owl_neural/D/Graph/Neuron/Padding1D/index.html b/owl/Owl_neural/D/Graph/Neuron/Padding1D/index.html deleted file mode 100644 index 19d02abea..000000000 --- a/owl/Owl_neural/D/Graph/Neuron/Padding1D/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Padding1D (owl.Owl_neural.D.Graph.Neuron.Padding1D)

Module Neuron.Padding1D

\ No newline at end of file diff --git a/owl/Owl_neural/D/Graph/Neuron/Padding2D/index.html b/owl/Owl_neural/D/Graph/Neuron/Padding2D/index.html deleted file mode 100644 index 125abea0b..000000000 --- a/owl/Owl_neural/D/Graph/Neuron/Padding2D/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Padding2D (owl.Owl_neural.D.Graph.Neuron.Padding2D)

Module Neuron.Padding2D

type neuron_typ = - Owl_neural_generic.Make_Embedded(Owl_algodiff_primal_ops.D).Neuron.Padding2D.neuron_typ = - {
  1. mutable padding : int array array;
  2. mutable in_shape : int array;
  3. mutable out_shape : int array;
}
val create : int array array -> neuron_typ
val connect : int array -> neuron_typ -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl/Owl_neural/D/Graph/Neuron/Padding3D/index.html b/owl/Owl_neural/D/Graph/Neuron/Padding3D/index.html deleted file mode 100644 index 509d3c72e..000000000 --- a/owl/Owl_neural/D/Graph/Neuron/Padding3D/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Padding3D (owl.Owl_neural.D.Graph.Neuron.Padding3D)

Module Neuron.Padding3D

\ No newline at end of file diff --git a/owl/Owl_neural/D/Graph/Neuron/Recurrent/index.html b/owl/Owl_neural/D/Graph/Neuron/Recurrent/index.html deleted file mode 100644 index fa9ef05ad..000000000 --- a/owl/Owl_neural/D/Graph/Neuron/Recurrent/index.html +++ /dev/null @@ -1,11 +0,0 @@ - -Recurrent (owl.Owl_neural.D.Graph.Neuron.Recurrent)

Module Neuron.Recurrent

type neuron_typ = - Owl_neural_generic.Make_Embedded(Owl_algodiff_primal_ops.D).Neuron.Recurrent.neuron_typ = - {
  1. mutable whh : Optimise.Algodiff.t;
  2. mutable wxh : Optimise.Algodiff.t;
  3. mutable why : Optimise.Algodiff.t;
  4. mutable bh : Optimise.Algodiff.t;
  5. mutable by : Optimise.Algodiff.t;
  6. mutable h : Optimise.Algodiff.t;
  7. mutable hiddens : int;
  8. mutable act : Activation.typ;
  9. mutable init_typ : Init.typ;
  10. mutable in_shape : int array;
  11. mutable out_shape : int array;
}
val create : - ?time_steps:int -> - ?inputs:int -> - int -> - int -> - Activation.typ -> - Init.typ -> - neuron_typ
val connect : int array -> neuron_typ -> unit
val init : neuron_typ -> unit
val reset : neuron_typ -> unit
val mktag : int -> neuron_typ -> unit
val mkpar : neuron_typ -> Optimise.Algodiff.t array
val mkpri : neuron_typ -> Optimise.Algodiff.t array
val mkadj : neuron_typ -> Optimise.Algodiff.t array
val update : neuron_typ -> Optimise.Algodiff.t array -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl/Owl_neural/D/Graph/Neuron/Reshape/index.html b/owl/Owl_neural/D/Graph/Neuron/Reshape/index.html deleted file mode 100644 index 6935ecbea..000000000 --- a/owl/Owl_neural/D/Graph/Neuron/Reshape/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Reshape (owl.Owl_neural.D.Graph.Neuron.Reshape)

Module Neuron.Reshape

type neuron_typ = - Owl_neural_generic.Make_Embedded(Owl_algodiff_primal_ops.D).Neuron.Reshape.neuron_typ = - {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
}
val create : ?inputs:int array -> int array -> neuron_typ
val connect : int array -> neuron_typ -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl/Owl_neural/D/Graph/Neuron/Slice/index.html b/owl/Owl_neural/D/Graph/Neuron/Slice/index.html deleted file mode 100644 index b0a2c05ab..000000000 --- a/owl/Owl_neural/D/Graph/Neuron/Slice/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Slice (owl.Owl_neural.D.Graph.Neuron.Slice)

Module Neuron.Slice

type neuron_typ = - Owl_neural_generic.Make_Embedded(Owl_algodiff_primal_ops.D).Neuron.Slice.neuron_typ = - {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
  3. mutable slice : int list list;
}
val create : int list list -> neuron_typ
val connect : int array -> neuron_typ -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl/Owl_neural/D/Graph/Neuron/TransposeConv1D/index.html b/owl/Owl_neural/D/Graph/Neuron/TransposeConv1D/index.html deleted file mode 100644 index 2bb2fef04..000000000 --- a/owl/Owl_neural/D/Graph/Neuron/TransposeConv1D/index.html +++ /dev/null @@ -1,10 +0,0 @@ - -TransposeConv1D (owl.Owl_neural.D.Graph.Neuron.TransposeConv1D)

Module Neuron.TransposeConv1D

type neuron_typ = - Owl_neural_generic.Make_Embedded(Owl_algodiff_primal_ops.D).Neuron.TransposeConv1D.neuron_typ = - {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable b : Optimise.Algodiff.t;
  3. mutable kernel : int array;
  4. mutable stride : int array;
  5. mutable padding : Owl_types.padding;
  6. mutable init_typ : Init.typ;
  7. mutable in_shape : int array;
  8. mutable out_shape : int array;
}
val create : - ?inputs:int array -> - Owl_types.padding -> - int array -> - int array -> - Init.typ -> - neuron_typ
val connect : int array -> neuron_typ -> unit
val init : neuron_typ -> unit
val reset : neuron_typ -> unit
val mktag : int -> neuron_typ -> unit
val mkpar : neuron_typ -> Optimise.Algodiff.t array
val mkpri : neuron_typ -> Optimise.Algodiff.t array
val mkadj : neuron_typ -> Optimise.Algodiff.t array
val update : neuron_typ -> Optimise.Algodiff.t array -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl/Owl_neural/D/Graph/Neuron/TransposeConv2D/index.html b/owl/Owl_neural/D/Graph/Neuron/TransposeConv2D/index.html deleted file mode 100644 index a529645b9..000000000 --- a/owl/Owl_neural/D/Graph/Neuron/TransposeConv2D/index.html +++ /dev/null @@ -1,10 +0,0 @@ - -TransposeConv2D (owl.Owl_neural.D.Graph.Neuron.TransposeConv2D)

Module Neuron.TransposeConv2D

type neuron_typ = - Owl_neural_generic.Make_Embedded(Owl_algodiff_primal_ops.D).Neuron.TransposeConv2D.neuron_typ = - {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable b : Optimise.Algodiff.t;
  3. mutable kernel : int array;
  4. mutable stride : int array;
  5. mutable padding : Owl_types.padding;
  6. mutable init_typ : Init.typ;
  7. mutable in_shape : int array;
  8. mutable out_shape : int array;
}
val create : - ?inputs:int array -> - Owl_types.padding -> - int array -> - int array -> - Init.typ -> - neuron_typ
val connect : int array -> neuron_typ -> unit
val init : neuron_typ -> unit
val reset : neuron_typ -> unit
val mktag : int -> neuron_typ -> unit
val mkpar : neuron_typ -> Optimise.Algodiff.t array
val mkpri : neuron_typ -> Optimise.Algodiff.t array
val mkadj : neuron_typ -> Optimise.Algodiff.t array
val update : neuron_typ -> Optimise.Algodiff.t array -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl/Owl_neural/D/Graph/Neuron/TransposeConv3D/index.html b/owl/Owl_neural/D/Graph/Neuron/TransposeConv3D/index.html deleted file mode 100644 index 92a805373..000000000 --- a/owl/Owl_neural/D/Graph/Neuron/TransposeConv3D/index.html +++ /dev/null @@ -1,10 +0,0 @@ - -TransposeConv3D (owl.Owl_neural.D.Graph.Neuron.TransposeConv3D)

Module Neuron.TransposeConv3D

type neuron_typ = - Owl_neural_generic.Make_Embedded(Owl_algodiff_primal_ops.D).Neuron.TransposeConv3D.neuron_typ = - {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable b : Optimise.Algodiff.t;
  3. mutable kernel : int array;
  4. mutable stride : int array;
  5. mutable padding : Owl_types.padding;
  6. mutable init_typ : Init.typ;
  7. mutable in_shape : int array;
  8. mutable out_shape : int array;
}
val create : - ?inputs:int array -> - Owl_types.padding -> - int array -> - int array -> - Init.typ -> - neuron_typ
val connect : int array -> neuron_typ -> unit
val init : neuron_typ -> unit
val reset : neuron_typ -> unit
val mktag : int -> neuron_typ -> unit
val mkpar : neuron_typ -> Optimise.Algodiff.t array
val mkpri : neuron_typ -> Optimise.Algodiff.t array
val mkadj : neuron_typ -> Optimise.Algodiff.t array
val update : neuron_typ -> Optimise.Algodiff.t array -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl/Owl_neural/D/Graph/Neuron/UpSampling1D/index.html b/owl/Owl_neural/D/Graph/Neuron/UpSampling1D/index.html deleted file mode 100644 index 49227fba7..000000000 --- a/owl/Owl_neural/D/Graph/Neuron/UpSampling1D/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -UpSampling1D (owl.Owl_neural.D.Graph.Neuron.UpSampling1D)

Module Neuron.UpSampling1D

\ No newline at end of file diff --git a/owl/Owl_neural/D/Graph/Neuron/UpSampling2D/index.html b/owl/Owl_neural/D/Graph/Neuron/UpSampling2D/index.html deleted file mode 100644 index facc1be80..000000000 --- a/owl/Owl_neural/D/Graph/Neuron/UpSampling2D/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -UpSampling2D (owl.Owl_neural.D.Graph.Neuron.UpSampling2D)

Module Neuron.UpSampling2D

type neuron_typ = - Owl_neural_generic.Make_Embedded(Owl_algodiff_primal_ops.D).Neuron.UpSampling2D.neuron_typ = - {
  1. mutable size : int array;
  2. mutable in_shape : int array;
  3. mutable out_shape : int array;
}
val create : int array -> neuron_typ
val connect : int array -> neuron_typ -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl/Owl_neural/D/Graph/Neuron/UpSampling3D/index.html b/owl/Owl_neural/D/Graph/Neuron/UpSampling3D/index.html deleted file mode 100644 index 350e9d2b3..000000000 --- a/owl/Owl_neural/D/Graph/Neuron/UpSampling3D/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -UpSampling3D (owl.Owl_neural.D.Graph.Neuron.UpSampling3D)

Module Neuron.UpSampling3D

\ No newline at end of file diff --git a/owl/Owl_neural/D/Graph/Neuron/index.html b/owl/Owl_neural/D/Graph/Neuron/index.html deleted file mode 100644 index 93cc29759..000000000 --- a/owl/Owl_neural/D/Graph/Neuron/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Neuron (owl.Owl_neural.D.Graph.Neuron)

Module Graph.Neuron

module Optimise : sig ... end
module Init : sig ... end
module Input : sig ... end
module Activation : sig ... end
module Linear : sig ... end
module LinearNoBias : sig ... end
module Recurrent : sig ... end
module LSTM : sig ... end
module GRU : sig ... end
module Conv1D : sig ... end
module Conv2D : sig ... end
module Conv3D : sig ... end
module DilatedConv1D : sig ... end
module DilatedConv2D : sig ... end
module DilatedConv3D : sig ... end
module TransposeConv1D : sig ... end
module TransposeConv2D : sig ... end
module TransposeConv3D : sig ... end
module FullyConnected : sig ... end
module MaxPool1D : sig ... end
module MaxPool2D : sig ... end
module AvgPool1D : sig ... end
module AvgPool2D : sig ... end
module GlobalMaxPool1D : sig ... end
module GlobalMaxPool2D : sig ... end
module GlobalAvgPool1D : sig ... end
module GlobalAvgPool2D : sig ... end
module UpSampling1D : sig ... end
module UpSampling2D : sig ... end
module UpSampling3D : sig ... end
module Padding1D : sig ... end
module Padding2D : sig ... end
module Padding3D : sig ... end
module Lambda : sig ... end
module LambdaArray : sig ... end
module Dropout : sig ... end
module Reshape : sig ... end
module Flatten : sig ... end
module Slice : sig ... end
module Add : sig ... end
module Mul : sig ... end
module Dot : sig ... end
module Max : sig ... end
module Average : sig ... end
module Concatenate : sig ... end
module Normalisation : sig ... end
module GaussianNoise : sig ... end
module GaussianDropout : sig ... end
module AlphaDropout : sig ... end
module Embedding : sig ... end
module Masking : sig ... end
type neuron = - Owl_neural_generic.Make_Embedded(Owl_algodiff_primal_ops.D).Neuron.neuron = -
  1. | Input of Input.neuron_typ
  2. | Linear of Linear.neuron_typ
  3. | LinearNoBias of LinearNoBias.neuron_typ
  4. | Embedding of Embedding.neuron_typ
  5. | LSTM of LSTM.neuron_typ
  6. | GRU of GRU.neuron_typ
  7. | Recurrent of Recurrent.neuron_typ
  8. | Conv1D of Conv1D.neuron_typ
  9. | Conv2D of Conv2D.neuron_typ
  10. | Conv3D of Conv3D.neuron_typ
  11. | DilatedConv1D of DilatedConv1D.neuron_typ
  12. | DilatedConv2D of DilatedConv2D.neuron_typ
  13. | DilatedConv3D of DilatedConv3D.neuron_typ
  14. | TransposeConv1D of TransposeConv1D.neuron_typ
  15. | TransposeConv2D of TransposeConv2D.neuron_typ
  16. | TransposeConv3D of TransposeConv3D.neuron_typ
  17. | FullyConnected of FullyConnected.neuron_typ
  18. | MaxPool1D of MaxPool1D.neuron_typ
  19. | MaxPool2D of MaxPool2D.neuron_typ
  20. | AvgPool1D of AvgPool1D.neuron_typ
  21. | AvgPool2D of AvgPool2D.neuron_typ
  22. | GlobalMaxPool1D of GlobalMaxPool1D.neuron_typ
  23. | GlobalMaxPool2D of GlobalMaxPool2D.neuron_typ
  24. | GlobalAvgPool1D of GlobalAvgPool1D.neuron_typ
  25. | GlobalAvgPool2D of GlobalAvgPool2D.neuron_typ
  26. | UpSampling2D of UpSampling2D.neuron_typ
  27. | Padding2D of Padding2D.neuron_typ
  28. | Dropout of Dropout.neuron_typ
  29. | Reshape of Reshape.neuron_typ
  30. | Flatten of Flatten.neuron_typ
  31. | Slice of Slice.neuron_typ
  32. | Lambda of Lambda.neuron_typ
  33. | LambdaArray of LambdaArray.neuron_typ
  34. | Activation of Activation.neuron_typ
  35. | GaussianNoise of GaussianNoise.neuron_typ
  36. | GaussianDropout of GaussianDropout.neuron_typ
  37. | AlphaDropout of AlphaDropout.neuron_typ
  38. | Normalisation of Normalisation.neuron_typ
  39. | Add of Add.neuron_typ
  40. | Mul of Mul.neuron_typ
  41. | Dot of Dot.neuron_typ
  42. | Max of Max.neuron_typ
  43. | Average of Average.neuron_typ
  44. | Concatenate of Concatenate.neuron_typ
val get_in_out_shape : neuron -> int array * int array
val get_in_shape : neuron -> int array
val get_out_shape : neuron -> int array
val connect : int array array -> neuron -> unit
val init : neuron -> unit
val reset : neuron -> unit
val mktag : int -> neuron -> unit
val mkpar : neuron -> Optimise.Algodiff.t array
val mkpri : neuron -> Optimise.Algodiff.t array
val mkadj : neuron -> Optimise.Algodiff.t array
val update : neuron -> Optimise.Algodiff.t array -> unit
val load_weights : neuron -> Optimise.Algodiff.t array -> unit
val save_weights : neuron -> Optimise.Algodiff.t array
val copy : neuron -> neuron
val to_string : neuron -> string
val to_name : neuron -> string
\ No newline at end of file diff --git a/owl/Owl_neural/D/Graph/index.html b/owl/Owl_neural/D/Graph/index.html deleted file mode 100644 index 1577abbb2..000000000 --- a/owl/Owl_neural/D/Graph/index.html +++ /dev/null @@ -1,245 +0,0 @@ - -Graph (owl.Owl_neural.D.Graph)

Module D.Graph

module Neuron : sig ... end
type node = Owl_neural_generic.Make_Embedded(Owl_algodiff_primal_ops.D).node = {
  1. mutable name : string;
  2. mutable prev : node array;
  3. mutable next : node array;
  4. mutable neuron : Neuron.neuron;
  5. mutable output : Neuron.Optimise.Algodiff.t option;
  6. mutable network : network;
  7. mutable train : bool;
}
and network = - Owl_neural_generic.Make_Embedded(Owl_algodiff_primal_ops.D).network = - {
  1. mutable nnid : string;
  2. mutable size : int;
  3. mutable roots : node array;
  4. mutable outputs : node array;
  5. mutable topo : node array;
}
val make_network : ?nnid:string -> int -> node array -> node array -> network
val make_node : - ?name:string -> - ?train:bool -> - node array -> - node array -> - Neuron.neuron -> - Neuron.Optimise.Algodiff.t option -> - network -> - node
val get_roots : network -> node array
val get_outputs : network -> node array
val get_node : network -> string -> node
val get_network : ?name:string -> node -> network
val outputs : ?name:string -> node array -> network
val get_network_name : network -> string
val set_network_name : network -> string -> unit
val collect_output : node array -> Neuron.Optimise.Algodiff.t array
val connect_pair : node -> node -> unit
val connect_to_parents : node array -> node -> unit
val add_node : - ?act_typ:Neuron.Activation.typ -> - network -> - node array -> - node -> - node
val input_shape : network -> int array
val input_shapes : network -> int array array
val init : network -> unit
val reset : network -> unit
val mktag : int -> network -> unit
val mkpar : network -> Neuron.Optimise.Algodiff.t array array
val mkpri : network -> Neuron.Optimise.Algodiff.t array array
val mkadj : network -> Neuron.Optimise.Algodiff.t array array
val update : network -> Neuron.Optimise.Algodiff.t array array -> unit
val run_inputs : - Neuron.Optimise.Algodiff.t array -> - network -> - Neuron.Optimise.Algodiff.t array
val forward_inputs : - network -> - Neuron.Optimise.Algodiff.t array -> - Neuron.Optimise.Algodiff.t array * Neuron.Optimise.Algodiff.t array array
val backward : - network -> - Neuron.Optimise.Algodiff.t -> - Neuron.Optimise.Algodiff.t array array - * Neuron.Optimise.Algodiff.t array array
val copy : network -> network
val model_inputs : - network -> - Neuron.Optimise.Algodiff.A.arr array -> - Neuron.Optimise.Algodiff.A.arr array
val input : ?name:string -> int array -> node
val inputs : ?names:string array -> int array array -> node array
val activation : ?name:string -> Neuron.Activation.typ -> node -> node
val linear : - ?name:string -> - ?init_typ:Neuron.Init.typ -> - ?act_typ:Neuron.Activation.typ -> - int -> - node -> - node
val linear_nobias : - ?name:string -> - ?init_typ:Neuron.Init.typ -> - ?act_typ:Neuron.Activation.typ -> - int -> - node -> - node
val embedding : - ?name:string -> - ?init_typ:Neuron.Init.typ -> - ?act_typ:Neuron.Activation.typ -> - int -> - int -> - node -> - node
val recurrent : - ?name:string -> - ?init_typ:Neuron.Init.typ -> - act_typ:Neuron.Activation.typ -> - int -> - int -> - node -> - node
val lstm : ?name:string -> ?init_typ:Neuron.Init.typ -> int -> node -> node
val gru : ?name:string -> ?init_typ:Neuron.Init.typ -> int -> node -> node
val conv1d : - ?name:string -> - ?padding:Owl_types.padding -> - ?init_typ:Neuron.Init.typ -> - ?act_typ:Neuron.Activation.typ -> - int array -> - int array -> - node -> - node
val conv2d : - ?name:string -> - ?padding:Owl_types.padding -> - ?init_typ:Neuron.Init.typ -> - ?act_typ:Neuron.Activation.typ -> - int array -> - int array -> - node -> - node
val conv3d : - ?name:string -> - ?padding:Owl_types.padding -> - ?init_typ:Neuron.Init.typ -> - ?act_typ:Neuron.Activation.typ -> - int array -> - int array -> - node -> - node
val dilated_conv1d : - ?name:string -> - ?padding:Owl_types.padding -> - ?init_typ:Neuron.Init.typ -> - ?act_typ:Neuron.Activation.typ -> - int array -> - int array -> - int array -> - node -> - node
val dilated_conv2d : - ?name:string -> - ?padding:Owl_types.padding -> - ?init_typ:Neuron.Init.typ -> - ?act_typ:Neuron.Activation.typ -> - int array -> - int array -> - int array -> - node -> - node
val dilated_conv3d : - ?name:string -> - ?padding:Owl_types.padding -> - ?init_typ:Neuron.Init.typ -> - ?act_typ:Neuron.Activation.typ -> - int array -> - int array -> - int array -> - node -> - node
val transpose_conv1d : - ?name:string -> - ?padding:Owl_types.padding -> - ?init_typ:Neuron.Init.typ -> - ?act_typ:Neuron.Activation.typ -> - int array -> - int array -> - node -> - node
val transpose_conv2d : - ?name:string -> - ?padding:Owl_types.padding -> - ?init_typ:Neuron.Init.typ -> - ?act_typ:Neuron.Activation.typ -> - int array -> - int array -> - node -> - node
val transpose_conv3d : - ?name:string -> - ?padding:Owl_types.padding -> - ?init_typ:Neuron.Init.typ -> - ?act_typ:Neuron.Activation.typ -> - int array -> - int array -> - node -> - node
val fully_connected : - ?name:string -> - ?init_typ:Neuron.Init.typ -> - ?act_typ:Neuron.Activation.typ -> - int -> - node -> - node
val max_pool1d : - ?name:string -> - ?padding:Owl_types.padding -> - ?act_typ:Neuron.Activation.typ -> - int array -> - int array -> - node -> - node
val max_pool2d : - ?name:string -> - ?padding:Owl_types.padding -> - ?act_typ:Neuron.Activation.typ -> - int array -> - int array -> - node -> - node
val avg_pool1d : - ?name:string -> - ?padding:Owl_types.padding -> - ?act_typ:Neuron.Activation.typ -> - int array -> - int array -> - node -> - node
val avg_pool2d : - ?name:string -> - ?padding:Owl_types.padding -> - ?act_typ:Neuron.Activation.typ -> - int array -> - int array -> - node -> - node
val global_max_pool1d : - ?name:string -> - ?act_typ:Neuron.Activation.typ -> - node -> - node
val global_max_pool2d : - ?name:string -> - ?act_typ:Neuron.Activation.typ -> - node -> - node
val global_avg_pool1d : - ?name:string -> - ?act_typ:Neuron.Activation.typ -> - node -> - node
val global_avg_pool2d : - ?name:string -> - ?act_typ:Neuron.Activation.typ -> - node -> - node
val upsampling2d : - ?name:string -> - ?act_typ:Neuron.Activation.typ -> - int array -> - node -> - node
val padding2d : - ?name:string -> - ?act_typ:Neuron.Activation.typ -> - int array array -> - node -> - node
val dropout : ?name:string -> float -> node -> node
val gaussian_noise : ?name:string -> float -> node -> node
val gaussian_dropout : ?name:string -> float -> node -> node
val alpha_dropout : ?name:string -> float -> node -> node
val normalisation : - ?name:string -> - ?axis:int -> - ?training:bool -> - ?decay:float -> - ?mu:Neuron.Optimise.Algodiff.A.arr -> - ?var:Neuron.Optimise.Algodiff.A.arr -> - node -> - node
val reshape : ?name:string -> int array -> node -> node
val flatten : ?name:string -> node -> node
val slice : ?name:string -> int list list -> node -> node
val lambda : - ?name:string -> - ?act_typ:Neuron.Activation.typ -> - ?out_shape:int array -> - (Neuron.Optimise.Algodiff.t -> Neuron.Optimise.Algodiff.t) -> - node -> - node
val lambda_array : - ?name:string -> - ?act_typ:Neuron.Activation.typ -> - int array -> - (Neuron.Optimise.Algodiff.t array -> Neuron.Optimise.Algodiff.t) -> - node array -> - node
val add : ?name:string -> ?act_typ:Neuron.Activation.typ -> node array -> node
val mul : ?name:string -> ?act_typ:Neuron.Activation.typ -> node array -> node
val dot : ?name:string -> ?act_typ:Neuron.Activation.typ -> node array -> node
val max : ?name:string -> ?act_typ:Neuron.Activation.typ -> node array -> node
val average : - ?name:string -> - ?act_typ:Neuron.Activation.typ -> - node array -> - node
val concatenate : - ?name:string -> - ?act_typ:Neuron.Activation.typ -> - int -> - node array -> - node
val to_string : network -> string
val pp_network : Stdlib.Format.formatter -> network -> unit
val print : network -> unit
val save : ?unsafe:bool -> network -> string -> unit
val load : string -> network
val save_weights : network -> string -> unit
val load_weights : network -> string -> unit
val make_subnetwork : - ?copy:bool -> - ?make_inputs:string array -> - network -> - string array -> - network
\ No newline at end of file diff --git a/owl/Owl_neural/D/index.html b/owl/Owl_neural/D/index.html deleted file mode 100644 index bd5861e08..000000000 --- a/owl/Owl_neural/D/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -D (owl.Owl_neural.D)

Module Owl_neural.D

include sig ... end
module Graph : sig ... end
module Optimise = Graph.Neuron.Optimise
module Init = Graph.Neuron.Init
module Activation = Graph.Neuron.Activation
module Regularisation = Graph.Neuron.Optimise.Regularisation
\ No newline at end of file diff --git a/owl/Owl_neural/S/Graph/Neuron/Activation/index.html b/owl/Owl_neural/S/Graph/Neuron/Activation/index.html deleted file mode 100644 index 77d4791f1..000000000 --- a/owl/Owl_neural/S/Graph/Neuron/Activation/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -Activation (owl.Owl_neural.S.Graph.Neuron.Activation)

Module Neuron.Activation

type typ = - Owl_neural_generic.Make_Embedded(Owl_algodiff_primal_ops.S).Neuron.Activation.typ = -
  1. | Elu
  2. | Relu
  3. | Sigmoid
  4. | HardSigmoid
  5. | Softmax of int
  6. | Softplus
  7. | Softsign
  8. | Tanh
  9. | Relu6
  10. | LeakyRelu of float
  11. | TRelu of float
  12. | Custom of Optimise.Algodiff.t -> Optimise.Algodiff.t
  13. | None
type neuron_typ = - Owl_neural_generic.Make_Embedded(Owl_algodiff_primal_ops.S).Neuron.Activation.neuron_typ = - {
  1. mutable activation : typ;
  2. mutable in_shape : int array;
  3. mutable out_shape : int array;
}
val create : typ -> neuron_typ
val connect : int array -> neuron_typ -> unit
val run_activation : Optimise.Algodiff.t -> typ -> Optimise.Algodiff.t
val copy : neuron_typ -> neuron_typ
val activation_to_string : typ -> string
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl/Owl_neural/S/Graph/Neuron/Add/index.html b/owl/Owl_neural/S/Graph/Neuron/Add/index.html deleted file mode 100644 index 2353ea275..000000000 --- a/owl/Owl_neural/S/Graph/Neuron/Add/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Add (owl.Owl_neural.S.Graph.Neuron.Add)

Module Neuron.Add

type neuron_typ = - Owl_neural_generic.Make_Embedded(Owl_algodiff_primal_ops.S).Neuron.Add.neuron_typ = - {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
}
val create : unit -> neuron_typ
val connect : int array array -> neuron_typ -> unit
val copy : 'a -> neuron_typ
val run : Optimise.Algodiff.t array -> 'a -> Optimise.Algodiff.t
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl/Owl_neural/S/Graph/Neuron/AlphaDropout/index.html b/owl/Owl_neural/S/Graph/Neuron/AlphaDropout/index.html deleted file mode 100644 index a2fa97dbf..000000000 --- a/owl/Owl_neural/S/Graph/Neuron/AlphaDropout/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -AlphaDropout (owl.Owl_neural.S.Graph.Neuron.AlphaDropout)

Module Neuron.AlphaDropout

type neuron_typ = - Owl_neural_generic.Make_Embedded(Owl_algodiff_primal_ops.S).Neuron.AlphaDropout.neuron_typ = - {
  1. mutable rate : float;
  2. mutable in_shape : int array;
  3. mutable out_shape : int array;
}
val create : float -> neuron_typ
val connect : int array -> neuron_typ -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl/Owl_neural/S/Graph/Neuron/Average/index.html b/owl/Owl_neural/S/Graph/Neuron/Average/index.html deleted file mode 100644 index 4b770e595..000000000 --- a/owl/Owl_neural/S/Graph/Neuron/Average/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Average (owl.Owl_neural.S.Graph.Neuron.Average)

Module Neuron.Average

type neuron_typ = - Owl_neural_generic.Make_Embedded(Owl_algodiff_primal_ops.S).Neuron.Average.neuron_typ = - {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
}
val create : unit -> neuron_typ
val connect : int array array -> neuron_typ -> unit
val copy : 'a -> neuron_typ
val run : Optimise.Algodiff.t array -> 'a -> Optimise.Algodiff.t
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl/Owl_neural/S/Graph/Neuron/AvgPool1D/index.html b/owl/Owl_neural/S/Graph/Neuron/AvgPool1D/index.html deleted file mode 100644 index 13c8d5a02..000000000 --- a/owl/Owl_neural/S/Graph/Neuron/AvgPool1D/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -AvgPool1D (owl.Owl_neural.S.Graph.Neuron.AvgPool1D)

Module Neuron.AvgPool1D

type neuron_typ = - Owl_neural_generic.Make_Embedded(Owl_algodiff_primal_ops.S).Neuron.AvgPool1D.neuron_typ = - {
  1. mutable padding : Owl_types.padding;
  2. mutable kernel : int array;
  3. mutable stride : int array;
  4. mutable in_shape : int array;
  5. mutable out_shape : int array;
}
val create : Owl_types.padding -> int array -> int array -> neuron_typ
val connect : int array -> neuron_typ -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl/Owl_neural/S/Graph/Neuron/AvgPool2D/index.html b/owl/Owl_neural/S/Graph/Neuron/AvgPool2D/index.html deleted file mode 100644 index 5e6f20835..000000000 --- a/owl/Owl_neural/S/Graph/Neuron/AvgPool2D/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -AvgPool2D (owl.Owl_neural.S.Graph.Neuron.AvgPool2D)

Module Neuron.AvgPool2D

type neuron_typ = - Owl_neural_generic.Make_Embedded(Owl_algodiff_primal_ops.S).Neuron.AvgPool2D.neuron_typ = - {
  1. mutable padding : Owl_types.padding;
  2. mutable kernel : int array;
  3. mutable stride : int array;
  4. mutable in_shape : int array;
  5. mutable out_shape : int array;
}
val create : Owl_types.padding -> int array -> int array -> neuron_typ
val connect : int array -> neuron_typ -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl/Owl_neural/S/Graph/Neuron/Concatenate/index.html b/owl/Owl_neural/S/Graph/Neuron/Concatenate/index.html deleted file mode 100644 index 9d72ae0a1..000000000 --- a/owl/Owl_neural/S/Graph/Neuron/Concatenate/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Concatenate (owl.Owl_neural.S.Graph.Neuron.Concatenate)

Module Neuron.Concatenate

type neuron_typ = - Owl_neural_generic.Make_Embedded(Owl_algodiff_primal_ops.S).Neuron.Concatenate.neuron_typ = - {
  1. mutable axis : int;
  2. mutable in_shape : int array;
  3. mutable out_shape : int array;
}
val create : int -> neuron_typ
val connect : int array array -> neuron_typ -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl/Owl_neural/S/Graph/Neuron/Conv1D/index.html b/owl/Owl_neural/S/Graph/Neuron/Conv1D/index.html deleted file mode 100644 index 5304d78bf..000000000 --- a/owl/Owl_neural/S/Graph/Neuron/Conv1D/index.html +++ /dev/null @@ -1,10 +0,0 @@ - -Conv1D (owl.Owl_neural.S.Graph.Neuron.Conv1D)

Module Neuron.Conv1D

type neuron_typ = - Owl_neural_generic.Make_Embedded(Owl_algodiff_primal_ops.S).Neuron.Conv1D.neuron_typ = - {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable b : Optimise.Algodiff.t;
  3. mutable kernel : int array;
  4. mutable stride : int array;
  5. mutable padding : Owl_types.padding;
  6. mutable init_typ : Init.typ;
  7. mutable in_shape : int array;
  8. mutable out_shape : int array;
}
val create : - ?inputs:int array -> - Owl_types.padding -> - int array -> - int array -> - Init.typ -> - neuron_typ
val connect : int array -> neuron_typ -> unit
val init : neuron_typ -> unit
val reset : neuron_typ -> unit
val mktag : int -> neuron_typ -> unit
val mkpar : neuron_typ -> Optimise.Algodiff.t array
val mkpri : neuron_typ -> Optimise.Algodiff.t array
val mkadj : neuron_typ -> Optimise.Algodiff.t array
val update : neuron_typ -> Optimise.Algodiff.t array -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl/Owl_neural/S/Graph/Neuron/Conv2D/index.html b/owl/Owl_neural/S/Graph/Neuron/Conv2D/index.html deleted file mode 100644 index 5f24d50d0..000000000 --- a/owl/Owl_neural/S/Graph/Neuron/Conv2D/index.html +++ /dev/null @@ -1,10 +0,0 @@ - -Conv2D (owl.Owl_neural.S.Graph.Neuron.Conv2D)

Module Neuron.Conv2D

type neuron_typ = - Owl_neural_generic.Make_Embedded(Owl_algodiff_primal_ops.S).Neuron.Conv2D.neuron_typ = - {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable b : Optimise.Algodiff.t;
  3. mutable kernel : int array;
  4. mutable stride : int array;
  5. mutable padding : Owl_types.padding;
  6. mutable init_typ : Init.typ;
  7. mutable in_shape : int array;
  8. mutable out_shape : int array;
}
val create : - ?inputs:int array -> - Owl_types.padding -> - int array -> - int array -> - Init.typ -> - neuron_typ
val connect : int array -> neuron_typ -> unit
val init : neuron_typ -> unit
val reset : neuron_typ -> unit
val mktag : int -> neuron_typ -> unit
val mkpar : neuron_typ -> Optimise.Algodiff.t array
val mkpri : neuron_typ -> Optimise.Algodiff.t array
val mkadj : neuron_typ -> Optimise.Algodiff.t array
val update : neuron_typ -> Optimise.Algodiff.t array -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl/Owl_neural/S/Graph/Neuron/Conv3D/index.html b/owl/Owl_neural/S/Graph/Neuron/Conv3D/index.html deleted file mode 100644 index 71d63d178..000000000 --- a/owl/Owl_neural/S/Graph/Neuron/Conv3D/index.html +++ /dev/null @@ -1,10 +0,0 @@ - -Conv3D (owl.Owl_neural.S.Graph.Neuron.Conv3D)

Module Neuron.Conv3D

type neuron_typ = - Owl_neural_generic.Make_Embedded(Owl_algodiff_primal_ops.S).Neuron.Conv3D.neuron_typ = - {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable b : Optimise.Algodiff.t;
  3. mutable kernel : int array;
  4. mutable stride : int array;
  5. mutable padding : Owl_types.padding;
  6. mutable init_typ : Init.typ;
  7. mutable in_shape : int array;
  8. mutable out_shape : int array;
}
val create : - ?inputs:int array -> - Owl_types.padding -> - int array -> - int array -> - Init.typ -> - neuron_typ
val connect : int array -> neuron_typ -> unit
val init : neuron_typ -> unit
val reset : neuron_typ -> unit
val mktag : int -> neuron_typ -> unit
val mkpar : neuron_typ -> Optimise.Algodiff.t array
val mkpri : neuron_typ -> Optimise.Algodiff.t array
val mkadj : neuron_typ -> Optimise.Algodiff.t array
val update : neuron_typ -> Optimise.Algodiff.t array -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl/Owl_neural/S/Graph/Neuron/DilatedConv1D/index.html b/owl/Owl_neural/S/Graph/Neuron/DilatedConv1D/index.html deleted file mode 100644 index 861d7ce75..000000000 --- a/owl/Owl_neural/S/Graph/Neuron/DilatedConv1D/index.html +++ /dev/null @@ -1,11 +0,0 @@ - -DilatedConv1D (owl.Owl_neural.S.Graph.Neuron.DilatedConv1D)

Module Neuron.DilatedConv1D

type neuron_typ = - Owl_neural_generic.Make_Embedded(Owl_algodiff_primal_ops.S).Neuron.DilatedConv1D.neuron_typ = - {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable b : Optimise.Algodiff.t;
  3. mutable kernel : int array;
  4. mutable stride : int array;
  5. mutable rate : int array;
  6. mutable padding : Owl_types.padding;
  7. mutable init_typ : Init.typ;
  8. mutable in_shape : int array;
  9. mutable out_shape : int array;
}
val create : - ?inputs:int array -> - Owl_types.padding -> - int array -> - int array -> - int array -> - Init.typ -> - neuron_typ
val connect : int array -> neuron_typ -> unit
val init : neuron_typ -> unit
val reset : neuron_typ -> unit
val mktag : int -> neuron_typ -> unit
val mkpar : neuron_typ -> Optimise.Algodiff.t array
val mkpri : neuron_typ -> Optimise.Algodiff.t array
val mkadj : neuron_typ -> Optimise.Algodiff.t array
val update : neuron_typ -> Optimise.Algodiff.t array -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl/Owl_neural/S/Graph/Neuron/DilatedConv2D/index.html b/owl/Owl_neural/S/Graph/Neuron/DilatedConv2D/index.html deleted file mode 100644 index 2a69646d4..000000000 --- a/owl/Owl_neural/S/Graph/Neuron/DilatedConv2D/index.html +++ /dev/null @@ -1,11 +0,0 @@ - -DilatedConv2D (owl.Owl_neural.S.Graph.Neuron.DilatedConv2D)

Module Neuron.DilatedConv2D

type neuron_typ = - Owl_neural_generic.Make_Embedded(Owl_algodiff_primal_ops.S).Neuron.DilatedConv2D.neuron_typ = - {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable b : Optimise.Algodiff.t;
  3. mutable kernel : int array;
  4. mutable stride : int array;
  5. mutable rate : int array;
  6. mutable padding : Owl_types.padding;
  7. mutable init_typ : Init.typ;
  8. mutable in_shape : int array;
  9. mutable out_shape : int array;
}
val create : - ?inputs:int array -> - Owl_types.padding -> - int array -> - int array -> - int array -> - Init.typ -> - neuron_typ
val connect : int array -> neuron_typ -> unit
val init : neuron_typ -> unit
val reset : neuron_typ -> unit
val mktag : int -> neuron_typ -> unit
val mkpar : neuron_typ -> Optimise.Algodiff.t array
val mkpri : neuron_typ -> Optimise.Algodiff.t array
val mkadj : neuron_typ -> Optimise.Algodiff.t array
val update : neuron_typ -> Optimise.Algodiff.t array -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl/Owl_neural/S/Graph/Neuron/DilatedConv3D/index.html b/owl/Owl_neural/S/Graph/Neuron/DilatedConv3D/index.html deleted file mode 100644 index 87f5fcf5e..000000000 --- a/owl/Owl_neural/S/Graph/Neuron/DilatedConv3D/index.html +++ /dev/null @@ -1,11 +0,0 @@ - -DilatedConv3D (owl.Owl_neural.S.Graph.Neuron.DilatedConv3D)

Module Neuron.DilatedConv3D

type neuron_typ = - Owl_neural_generic.Make_Embedded(Owl_algodiff_primal_ops.S).Neuron.DilatedConv3D.neuron_typ = - {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable b : Optimise.Algodiff.t;
  3. mutable kernel : int array;
  4. mutable stride : int array;
  5. mutable rate : int array;
  6. mutable padding : Owl_types.padding;
  7. mutable init_typ : Init.typ;
  8. mutable in_shape : int array;
  9. mutable out_shape : int array;
}
val create : - ?inputs:int array -> - Owl_types.padding -> - int array -> - int array -> - int array -> - Init.typ -> - neuron_typ
val connect : int array -> neuron_typ -> unit
val init : neuron_typ -> unit
val reset : neuron_typ -> unit
val mktag : int -> neuron_typ -> unit
val mkpar : neuron_typ -> Optimise.Algodiff.t array
val mkpri : neuron_typ -> Optimise.Algodiff.t array
val mkadj : neuron_typ -> Optimise.Algodiff.t array
val update : neuron_typ -> Optimise.Algodiff.t array -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl/Owl_neural/S/Graph/Neuron/Dot/index.html b/owl/Owl_neural/S/Graph/Neuron/Dot/index.html deleted file mode 100644 index 12360114c..000000000 --- a/owl/Owl_neural/S/Graph/Neuron/Dot/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Dot (owl.Owl_neural.S.Graph.Neuron.Dot)

Module Neuron.Dot

type neuron_typ = - Owl_neural_generic.Make_Embedded(Owl_algodiff_primal_ops.S).Neuron.Dot.neuron_typ = - {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
}
val create : unit -> neuron_typ
val connect : int array array -> neuron_typ -> unit
val copy : 'a -> neuron_typ
val run : Optimise.Algodiff.t array -> 'a -> Optimise.Algodiff.t
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl/Owl_neural/S/Graph/Neuron/Dropout/index.html b/owl/Owl_neural/S/Graph/Neuron/Dropout/index.html deleted file mode 100644 index 6e2f69fea..000000000 --- a/owl/Owl_neural/S/Graph/Neuron/Dropout/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Dropout (owl.Owl_neural.S.Graph.Neuron.Dropout)

Module Neuron.Dropout

type neuron_typ = - Owl_neural_generic.Make_Embedded(Owl_algodiff_primal_ops.S).Neuron.Dropout.neuron_typ = - {
  1. mutable rate : float;
  2. mutable in_shape : int array;
  3. mutable out_shape : int array;
}
val create : float -> neuron_typ
val connect : int array -> neuron_typ -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl/Owl_neural/S/Graph/Neuron/Embedding/index.html b/owl/Owl_neural/S/Graph/Neuron/Embedding/index.html deleted file mode 100644 index 38087b292..000000000 --- a/owl/Owl_neural/S/Graph/Neuron/Embedding/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Embedding (owl.Owl_neural.S.Graph.Neuron.Embedding)

Module Neuron.Embedding

type neuron_typ = - Owl_neural_generic.Make_Embedded(Owl_algodiff_primal_ops.S).Neuron.Embedding.neuron_typ = - {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable init_typ : Init.typ;
  3. mutable in_dim : int;
  4. mutable in_shape : int array;
  5. mutable out_shape : int array;
}
val create : ?inputs:int -> int -> int -> Init.typ -> neuron_typ
val connect : int array -> neuron_typ -> unit
val init : neuron_typ -> unit
val reset : neuron_typ -> unit
val mktag : int -> neuron_typ -> unit
val mkpar : neuron_typ -> Optimise.Algodiff.t array
val mkpri : neuron_typ -> Optimise.Algodiff.t array
val mkadj : neuron_typ -> Optimise.Algodiff.t array
val update : neuron_typ -> Optimise.Algodiff.t array -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl/Owl_neural/S/Graph/Neuron/Flatten/index.html b/owl/Owl_neural/S/Graph/Neuron/Flatten/index.html deleted file mode 100644 index 75e4fbe20..000000000 --- a/owl/Owl_neural/S/Graph/Neuron/Flatten/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Flatten (owl.Owl_neural.S.Graph.Neuron.Flatten)

Module Neuron.Flatten

type neuron_typ = - Owl_neural_generic.Make_Embedded(Owl_algodiff_primal_ops.S).Neuron.Flatten.neuron_typ = - {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
}
val create : unit -> neuron_typ
val connect : int array -> neuron_typ -> unit
val copy : 'a -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl/Owl_neural/S/Graph/Neuron/FullyConnected/index.html b/owl/Owl_neural/S/Graph/Neuron/FullyConnected/index.html deleted file mode 100644 index b67ba33e3..000000000 --- a/owl/Owl_neural/S/Graph/Neuron/FullyConnected/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -FullyConnected (owl.Owl_neural.S.Graph.Neuron.FullyConnected)

Module Neuron.FullyConnected

type neuron_typ = - Owl_neural_generic.Make_Embedded(Owl_algodiff_primal_ops.S).Neuron.FullyConnected.neuron_typ = - {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable b : Optimise.Algodiff.t;
  3. mutable init_typ : Init.typ;
  4. mutable in_shape : int array;
  5. mutable out_shape : int array;
}
val create : ?inputs:int -> int -> Init.typ -> neuron_typ
val connect : int array -> neuron_typ -> unit
val init : neuron_typ -> unit
val reset : neuron_typ -> unit
val mktag : int -> neuron_typ -> unit
val mkpar : neuron_typ -> Optimise.Algodiff.t array
val mkpri : neuron_typ -> Optimise.Algodiff.t array
val mkadj : neuron_typ -> Optimise.Algodiff.t array
val update : neuron_typ -> Optimise.Algodiff.t array -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl/Owl_neural/S/Graph/Neuron/GRU/index.html b/owl/Owl_neural/S/Graph/Neuron/GRU/index.html deleted file mode 100644 index 862bce24b..000000000 --- a/owl/Owl_neural/S/Graph/Neuron/GRU/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -GRU (owl.Owl_neural.S.Graph.Neuron.GRU)

Module Neuron.GRU

type neuron_typ = - Owl_neural_generic.Make_Embedded(Owl_algodiff_primal_ops.S).Neuron.GRU.neuron_typ = - {
  1. mutable wxz : Optimise.Algodiff.t;
  2. mutable whz : Optimise.Algodiff.t;
  3. mutable wxr : Optimise.Algodiff.t;
  4. mutable whr : Optimise.Algodiff.t;
  5. mutable wxh : Optimise.Algodiff.t;
  6. mutable whh : Optimise.Algodiff.t;
  7. mutable bz : Optimise.Algodiff.t;
  8. mutable br : Optimise.Algodiff.t;
  9. mutable bh : Optimise.Algodiff.t;
  10. mutable h : Optimise.Algodiff.t;
  11. mutable init_typ : Init.typ;
  12. mutable in_shape : int array;
  13. mutable out_shape : int array;
}
val create : ?time_steps:int -> ?inputs:int -> int -> Init.typ -> neuron_typ
val connect : int array -> neuron_typ -> unit
val init : neuron_typ -> unit
val reset : neuron_typ -> unit
val mktag : int -> neuron_typ -> unit
val mkpar : neuron_typ -> Optimise.Algodiff.t array
val mkpri : neuron_typ -> Optimise.Algodiff.t array
val mkadj : neuron_typ -> Optimise.Algodiff.t array
val update : neuron_typ -> Optimise.Algodiff.t array -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl/Owl_neural/S/Graph/Neuron/GaussianDropout/index.html b/owl/Owl_neural/S/Graph/Neuron/GaussianDropout/index.html deleted file mode 100644 index f8e902491..000000000 --- a/owl/Owl_neural/S/Graph/Neuron/GaussianDropout/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -GaussianDropout (owl.Owl_neural.S.Graph.Neuron.GaussianDropout)

Module Neuron.GaussianDropout

type neuron_typ = - Owl_neural_generic.Make_Embedded(Owl_algodiff_primal_ops.S).Neuron.GaussianDropout.neuron_typ = - {
  1. mutable rate : float;
  2. mutable in_shape : int array;
  3. mutable out_shape : int array;
}
val create : float -> neuron_typ
val connect : int array -> neuron_typ -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl/Owl_neural/S/Graph/Neuron/GaussianNoise/index.html b/owl/Owl_neural/S/Graph/Neuron/GaussianNoise/index.html deleted file mode 100644 index 925d924fa..000000000 --- a/owl/Owl_neural/S/Graph/Neuron/GaussianNoise/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -GaussianNoise (owl.Owl_neural.S.Graph.Neuron.GaussianNoise)

Module Neuron.GaussianNoise

type neuron_typ = - Owl_neural_generic.Make_Embedded(Owl_algodiff_primal_ops.S).Neuron.GaussianNoise.neuron_typ = - {
  1. mutable sigma : float;
  2. mutable in_shape : int array;
  3. mutable out_shape : int array;
}
val create : float -> neuron_typ
val connect : int array -> neuron_typ -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl/Owl_neural/S/Graph/Neuron/GlobalAvgPool1D/index.html b/owl/Owl_neural/S/Graph/Neuron/GlobalAvgPool1D/index.html deleted file mode 100644 index fe637b4e7..000000000 --- a/owl/Owl_neural/S/Graph/Neuron/GlobalAvgPool1D/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -GlobalAvgPool1D (owl.Owl_neural.S.Graph.Neuron.GlobalAvgPool1D)

Module Neuron.GlobalAvgPool1D

type neuron_typ = - Owl_neural_generic.Make_Embedded(Owl_algodiff_primal_ops.S).Neuron.GlobalAvgPool1D.neuron_typ = - {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
}
val create : unit -> neuron_typ
val connect : int array -> neuron_typ -> unit
val copy : 'a -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl/Owl_neural/S/Graph/Neuron/GlobalAvgPool2D/index.html b/owl/Owl_neural/S/Graph/Neuron/GlobalAvgPool2D/index.html deleted file mode 100644 index f722aaa69..000000000 --- a/owl/Owl_neural/S/Graph/Neuron/GlobalAvgPool2D/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -GlobalAvgPool2D (owl.Owl_neural.S.Graph.Neuron.GlobalAvgPool2D)

Module Neuron.GlobalAvgPool2D

type neuron_typ = - Owl_neural_generic.Make_Embedded(Owl_algodiff_primal_ops.S).Neuron.GlobalAvgPool2D.neuron_typ = - {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
}
val create : unit -> neuron_typ
val connect : int array -> neuron_typ -> unit
val copy : 'a -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl/Owl_neural/S/Graph/Neuron/GlobalMaxPool1D/index.html b/owl/Owl_neural/S/Graph/Neuron/GlobalMaxPool1D/index.html deleted file mode 100644 index b28b18802..000000000 --- a/owl/Owl_neural/S/Graph/Neuron/GlobalMaxPool1D/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -GlobalMaxPool1D (owl.Owl_neural.S.Graph.Neuron.GlobalMaxPool1D)

Module Neuron.GlobalMaxPool1D

type neuron_typ = - Owl_neural_generic.Make_Embedded(Owl_algodiff_primal_ops.S).Neuron.GlobalMaxPool1D.neuron_typ = - {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
}
val create : unit -> neuron_typ
val connect : int array -> neuron_typ -> unit
val copy : 'a -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl/Owl_neural/S/Graph/Neuron/GlobalMaxPool2D/index.html b/owl/Owl_neural/S/Graph/Neuron/GlobalMaxPool2D/index.html deleted file mode 100644 index 665d6aedb..000000000 --- a/owl/Owl_neural/S/Graph/Neuron/GlobalMaxPool2D/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -GlobalMaxPool2D (owl.Owl_neural.S.Graph.Neuron.GlobalMaxPool2D)

Module Neuron.GlobalMaxPool2D

type neuron_typ = - Owl_neural_generic.Make_Embedded(Owl_algodiff_primal_ops.S).Neuron.GlobalMaxPool2D.neuron_typ = - {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
}
val create : unit -> neuron_typ
val connect : int array -> neuron_typ -> unit
val copy : 'a -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl/Owl_neural/S/Graph/Neuron/Init/index.html b/owl/Owl_neural/S/Graph/Neuron/Init/index.html deleted file mode 100644 index ce34efae5..000000000 --- a/owl/Owl_neural/S/Graph/Neuron/Init/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Init (owl.Owl_neural.S.Graph.Neuron.Init)

Module Neuron.Init

type typ = - Owl_neural_generic.Make_Embedded(Owl_algodiff_primal_ops.S).Neuron.Init.typ = -
  1. | Uniform of float * float
  2. | Gaussian of float * float
  3. | Standard
  4. | Tanh
  5. | GlorotNormal
  6. | GlorotUniform
  7. | LecunNormal
  8. | HeNormal
  9. | Custom of int array -> Optimise.Algodiff.t
val calc_fans : int array -> float * float
val run : typ -> int array -> Optimise.Algodiff.t -> Optimise.Algodiff.t
val to_string : typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl/Owl_neural/S/Graph/Neuron/Input/index.html b/owl/Owl_neural/S/Graph/Neuron/Input/index.html deleted file mode 100644 index d3c300540..000000000 --- a/owl/Owl_neural/S/Graph/Neuron/Input/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Input (owl.Owl_neural.S.Graph.Neuron.Input)

Module Neuron.Input

type neuron_typ = - Owl_neural_generic.Make_Embedded(Owl_algodiff_primal_ops.S).Neuron.Input.neuron_typ = - {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
}
val create : int array -> neuron_typ
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl/Owl_neural/S/Graph/Neuron/LSTM/index.html b/owl/Owl_neural/S/Graph/Neuron/LSTM/index.html deleted file mode 100644 index c209ce20f..000000000 --- a/owl/Owl_neural/S/Graph/Neuron/LSTM/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -LSTM (owl.Owl_neural.S.Graph.Neuron.LSTM)

Module Neuron.LSTM

type neuron_typ = - Owl_neural_generic.Make_Embedded(Owl_algodiff_primal_ops.S).Neuron.LSTM.neuron_typ = - {
  1. mutable wxi : Optimise.Algodiff.t;
  2. mutable whi : Optimise.Algodiff.t;
  3. mutable wxc : Optimise.Algodiff.t;
  4. mutable whc : Optimise.Algodiff.t;
  5. mutable wxf : Optimise.Algodiff.t;
  6. mutable whf : Optimise.Algodiff.t;
  7. mutable wxo : Optimise.Algodiff.t;
  8. mutable who : Optimise.Algodiff.t;
  9. mutable bi : Optimise.Algodiff.t;
  10. mutable bc : Optimise.Algodiff.t;
  11. mutable bf : Optimise.Algodiff.t;
  12. mutable bo : Optimise.Algodiff.t;
  13. mutable c : Optimise.Algodiff.t;
  14. mutable h : Optimise.Algodiff.t;
  15. mutable init_typ : Init.typ;
  16. mutable in_shape : int array;
  17. mutable out_shape : int array;
}
val create : ?time_steps:int -> ?inputs:int -> int -> Init.typ -> neuron_typ
val connect : int array -> neuron_typ -> unit
val init : neuron_typ -> unit
val reset : neuron_typ -> unit
val mktag : int -> neuron_typ -> unit
val mkpar : neuron_typ -> Optimise.Algodiff.t array
val mkpri : neuron_typ -> Optimise.Algodiff.t array
val mkadj : neuron_typ -> Optimise.Algodiff.t array
val update : neuron_typ -> Optimise.Algodiff.t array -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl/Owl_neural/S/Graph/Neuron/Lambda/index.html b/owl/Owl_neural/S/Graph/Neuron/Lambda/index.html deleted file mode 100644 index 6daf56337..000000000 --- a/owl/Owl_neural/S/Graph/Neuron/Lambda/index.html +++ /dev/null @@ -1,7 +0,0 @@ - -Lambda (owl.Owl_neural.S.Graph.Neuron.Lambda)

Module Neuron.Lambda

type neuron_typ = - Owl_neural_generic.Make_Embedded(Owl_algodiff_primal_ops.S).Neuron.Lambda.neuron_typ = - {
  1. mutable lambda : Optimise.Algodiff.t -> Optimise.Algodiff.t;
  2. mutable in_shape : int array;
  3. mutable out_shape : int array;
}
val create : - ?out_shape:int array -> - (Optimise.Algodiff.t -> Optimise.Algodiff.t) -> - neuron_typ
val connect : int array -> neuron_typ -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl/Owl_neural/S/Graph/Neuron/LambdaArray/index.html b/owl/Owl_neural/S/Graph/Neuron/LambdaArray/index.html deleted file mode 100644 index 8a758389c..000000000 --- a/owl/Owl_neural/S/Graph/Neuron/LambdaArray/index.html +++ /dev/null @@ -1,7 +0,0 @@ - -LambdaArray (owl.Owl_neural.S.Graph.Neuron.LambdaArray)

Module Neuron.LambdaArray

type neuron_typ = - Owl_neural_generic.Make_Embedded(Owl_algodiff_primal_ops.S).Neuron.LambdaArray.neuron_typ = - {
  1. mutable lambda : Optimise.Algodiff.t array -> Optimise.Algodiff.t;
  2. mutable in_shape : int array;
  3. mutable out_shape : int array;
}
val create : - int array -> - (Optimise.Algodiff.t array -> Optimise.Algodiff.t) -> - neuron_typ
val connect : int array array -> neuron_typ -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl/Owl_neural/S/Graph/Neuron/Linear/index.html b/owl/Owl_neural/S/Graph/Neuron/Linear/index.html deleted file mode 100644 index 8fcaf3f06..000000000 --- a/owl/Owl_neural/S/Graph/Neuron/Linear/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Linear (owl.Owl_neural.S.Graph.Neuron.Linear)

Module Neuron.Linear

type neuron_typ = - Owl_neural_generic.Make_Embedded(Owl_algodiff_primal_ops.S).Neuron.Linear.neuron_typ = - {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable b : Optimise.Algodiff.t;
  3. mutable init_typ : Init.typ;
  4. mutable in_shape : int array;
  5. mutable out_shape : int array;
}
val create : ?inputs:int -> int -> Init.typ -> neuron_typ
val connect : int array -> neuron_typ -> unit
val init : neuron_typ -> unit
val reset : neuron_typ -> unit
val mktag : int -> neuron_typ -> unit
val mkpar : neuron_typ -> Optimise.Algodiff.t array
val mkpri : neuron_typ -> Optimise.Algodiff.t array
val mkadj : neuron_typ -> Optimise.Algodiff.t array
val update : neuron_typ -> Optimise.Algodiff.t array -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl/Owl_neural/S/Graph/Neuron/LinearNoBias/index.html b/owl/Owl_neural/S/Graph/Neuron/LinearNoBias/index.html deleted file mode 100644 index 764d08e63..000000000 --- a/owl/Owl_neural/S/Graph/Neuron/LinearNoBias/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -LinearNoBias (owl.Owl_neural.S.Graph.Neuron.LinearNoBias)

Module Neuron.LinearNoBias

type neuron_typ = - Owl_neural_generic.Make_Embedded(Owl_algodiff_primal_ops.S).Neuron.LinearNoBias.neuron_typ = - {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable init_typ : Init.typ;
  3. mutable in_shape : int array;
  4. mutable out_shape : int array;
}
val create : ?inputs:int -> int -> Init.typ -> neuron_typ
val connect : int array -> neuron_typ -> unit
val init : neuron_typ -> unit
val reset : neuron_typ -> unit
val mktag : int -> neuron_typ -> unit
val mkpar : neuron_typ -> Optimise.Algodiff.t array
val mkpri : neuron_typ -> Optimise.Algodiff.t array
val mkadj : neuron_typ -> Optimise.Algodiff.t array
val update : neuron_typ -> Optimise.Algodiff.t array -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl/Owl_neural/S/Graph/Neuron/Masking/index.html b/owl/Owl_neural/S/Graph/Neuron/Masking/index.html deleted file mode 100644 index 240e34464..000000000 --- a/owl/Owl_neural/S/Graph/Neuron/Masking/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Masking (owl.Owl_neural.S.Graph.Neuron.Masking)

Module Neuron.Masking

\ No newline at end of file diff --git a/owl/Owl_neural/S/Graph/Neuron/Max/index.html b/owl/Owl_neural/S/Graph/Neuron/Max/index.html deleted file mode 100644 index f87afdaee..000000000 --- a/owl/Owl_neural/S/Graph/Neuron/Max/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Max (owl.Owl_neural.S.Graph.Neuron.Max)

Module Neuron.Max

type neuron_typ = - Owl_neural_generic.Make_Embedded(Owl_algodiff_primal_ops.S).Neuron.Max.neuron_typ = - {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
}
val create : unit -> neuron_typ
val connect : int array array -> neuron_typ -> unit
val copy : 'a -> neuron_typ
val run : Optimise.Algodiff.t array -> 'a -> Optimise.Algodiff.t
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl/Owl_neural/S/Graph/Neuron/MaxPool1D/index.html b/owl/Owl_neural/S/Graph/Neuron/MaxPool1D/index.html deleted file mode 100644 index e0c426e2e..000000000 --- a/owl/Owl_neural/S/Graph/Neuron/MaxPool1D/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -MaxPool1D (owl.Owl_neural.S.Graph.Neuron.MaxPool1D)

Module Neuron.MaxPool1D

type neuron_typ = - Owl_neural_generic.Make_Embedded(Owl_algodiff_primal_ops.S).Neuron.MaxPool1D.neuron_typ = - {
  1. mutable padding : Owl_types.padding;
  2. mutable kernel : int array;
  3. mutable stride : int array;
  4. mutable in_shape : int array;
  5. mutable out_shape : int array;
}
val create : Owl_types.padding -> int array -> int array -> neuron_typ
val connect : int array -> neuron_typ -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl/Owl_neural/S/Graph/Neuron/MaxPool2D/index.html b/owl/Owl_neural/S/Graph/Neuron/MaxPool2D/index.html deleted file mode 100644 index 8174f677d..000000000 --- a/owl/Owl_neural/S/Graph/Neuron/MaxPool2D/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -MaxPool2D (owl.Owl_neural.S.Graph.Neuron.MaxPool2D)

Module Neuron.MaxPool2D

type neuron_typ = - Owl_neural_generic.Make_Embedded(Owl_algodiff_primal_ops.S).Neuron.MaxPool2D.neuron_typ = - {
  1. mutable padding : Owl_types.padding;
  2. mutable kernel : int array;
  3. mutable stride : int array;
  4. mutable in_shape : int array;
  5. mutable out_shape : int array;
}
val create : Owl_types.padding -> int array -> int array -> neuron_typ
val connect : int array -> neuron_typ -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl/Owl_neural/S/Graph/Neuron/Mul/index.html b/owl/Owl_neural/S/Graph/Neuron/Mul/index.html deleted file mode 100644 index a32cfe4c5..000000000 --- a/owl/Owl_neural/S/Graph/Neuron/Mul/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Mul (owl.Owl_neural.S.Graph.Neuron.Mul)

Module Neuron.Mul

type neuron_typ = - Owl_neural_generic.Make_Embedded(Owl_algodiff_primal_ops.S).Neuron.Mul.neuron_typ = - {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
}
val create : unit -> neuron_typ
val connect : int array array -> neuron_typ -> unit
val copy : 'a -> neuron_typ
val run : Optimise.Algodiff.t array -> 'a -> Optimise.Algodiff.t
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl/Owl_neural/S/Graph/Neuron/Normalisation/index.html b/owl/Owl_neural/S/Graph/Neuron/Normalisation/index.html deleted file mode 100644 index 8398d273f..000000000 --- a/owl/Owl_neural/S/Graph/Neuron/Normalisation/index.html +++ /dev/null @@ -1,10 +0,0 @@ - -Normalisation (owl.Owl_neural.S.Graph.Neuron.Normalisation)

Module Neuron.Normalisation

type neuron_typ = - Owl_neural_generic.Make_Embedded(Owl_algodiff_primal_ops.S).Neuron.Normalisation.neuron_typ = - {
  1. mutable axis : int;
  2. mutable beta : Optimise.Algodiff.t;
  3. mutable gamma : Optimise.Algodiff.t;
  4. mutable mu : Optimise.Algodiff.t;
  5. mutable var : Optimise.Algodiff.t;
  6. mutable decay : Optimise.Algodiff.t;
  7. mutable training : bool;
  8. mutable in_shape : int array;
  9. mutable out_shape : int array;
}
val create : - ?training:bool -> - ?decay:float -> - ?mu:Optimise.Algodiff.A.arr -> - ?var:Optimise.Algodiff.A.arr -> - int -> - neuron_typ
val connect : int array -> neuron_typ -> unit
val init : neuron_typ -> unit
val reset : neuron_typ -> unit
val mktag : int -> neuron_typ -> unit
val mkpar : neuron_typ -> Optimise.Algodiff.t array
val mkpri : neuron_typ -> Optimise.Algodiff.t array
val mkadj : neuron_typ -> Optimise.Algodiff.t array
val update : neuron_typ -> Optimise.Algodiff.t array -> unit
val load_weights : neuron_typ -> Optimise.Algodiff.t array -> unit
val save_weights : neuron_typ -> Optimise.Algodiff.t array
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl/Owl_neural/S/Graph/Neuron/Optimise/Algodiff/A/Linalg/index.html b/owl/Owl_neural/S/Graph/Neuron/Optimise/Algodiff/A/Linalg/index.html deleted file mode 100644 index ee2dad58e..000000000 --- a/owl/Owl_neural/S/Graph/Neuron/Optimise/Algodiff/A/Linalg/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -Linalg (owl.Owl_neural.S.Graph.Neuron.Optimise.Algodiff.A.Linalg)

Module A.Linalg

val inv : arr -> arr
val logdet : arr -> elt
val chol : ?upper:bool -> arr -> arr
val svd : ?thin:bool -> arr -> arr * arr * arr
val qr : arr -> arr * arr
val lq : arr -> arr * arr
val sylvester : arr -> arr -> arr -> arr
val lyapunov : arr -> arr -> arr
val discrete_lyapunov : - ?solver:[ `bilinear | `default | `direct ] -> - arr -> - arr -> - arr
val linsolve : ?trans:bool -> ?typ:[ `l | `n | `u ] -> arr -> arr -> arr
val care : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
val dare : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
\ No newline at end of file diff --git a/owl/Owl_neural/S/Graph/Neuron/Optimise/Algodiff/A/Mat/index.html b/owl/Owl_neural/S/Graph/Neuron/Optimise/Algodiff/A/Mat/index.html deleted file mode 100644 index a6f50e5af..000000000 --- a/owl/Owl_neural/S/Graph/Neuron/Optimise/Algodiff/A/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl.Owl_neural.S.Graph.Neuron.Optimise.Algodiff.A.Mat)

Module A.Mat

val diagm : ?k:int -> arr -> arr
val triu : ?k:int -> arr -> arr
val tril : ?k:int -> arr -> arr
val eye : int -> arr
\ No newline at end of file diff --git a/owl/Owl_neural/S/Graph/Neuron/Optimise/Algodiff/A/Scalar/index.html b/owl/Owl_neural/S/Graph/Neuron/Optimise/Algodiff/A/Scalar/index.html deleted file mode 100644 index 2f9682b19..000000000 --- a/owl/Owl_neural/S/Graph/Neuron/Optimise/Algodiff/A/Scalar/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Scalar (owl.Owl_neural.S.Graph.Neuron.Optimise.Algodiff.A.Scalar)

Module A.Scalar

val add : elt -> elt -> elt
val sub : elt -> elt -> elt
val mul : elt -> elt -> elt
val div : elt -> elt -> elt
val pow : elt -> elt -> elt
val atan2 : elt -> elt -> elt
val abs : elt -> elt
val neg : elt -> elt
val sqr : elt -> elt
val sqrt : elt -> elt
val exp : elt -> elt
val log : elt -> elt
val log2 : elt -> elt
val log10 : elt -> elt
val signum : elt -> elt
val floor : elt -> elt
val ceil : elt -> elt
val round : elt -> elt
val sin : elt -> elt
val cos : elt -> elt
val tan : elt -> elt
val sinh : elt -> elt
val cosh : elt -> elt
val tanh : elt -> elt
val asin : elt -> elt
val acos : elt -> elt
val atan : elt -> elt
val asinh : elt -> elt
val acosh : elt -> elt
val atanh : elt -> elt
val relu : elt -> elt
val dawsn : elt -> elt
val sigmoid : elt -> elt
\ No newline at end of file diff --git a/owl/Owl_neural/S/Graph/Neuron/Optimise/Algodiff/A/index.html b/owl/Owl_neural/S/Graph/Neuron/Optimise/Algodiff/A/index.html deleted file mode 100644 index 0d013b37f..000000000 --- a/owl/Owl_neural/S/Graph/Neuron/Optimise/Algodiff/A/index.html +++ /dev/null @@ -1,160 +0,0 @@ - -A (owl.Owl_neural.S.Graph.Neuron.Optimise.Algodiff.A)

Module Algodiff.A

val empty : int array -> arr
val zeros : int array -> arr
val ones : int array -> arr
val create : int array -> elt -> arr
val sequential : ?a:elt -> ?step:elt -> int array -> arr
val uniform : ?a:elt -> ?b:elt -> int array -> arr
val gaussian : ?mu:elt -> ?sigma:elt -> int array -> arr
val bernoulli : ?p:elt -> int array -> arr
val init : int array -> (int -> elt) -> arr
val init_nd : int array -> (int array -> elt) -> arr
val shape : arr -> int array
val numel : arr -> int
val get : arr -> int array -> elt
val set : arr -> int array -> elt -> unit
val get_slice : int list list -> arr -> arr
val set_slice : int list list -> arr -> arr -> unit
val get_fancy : Owl_types_common.index list -> arr -> arr
val set_fancy : Owl_types_common.index list -> arr -> arr -> unit
val copy : arr -> arr
val copy_ : out:arr -> arr -> unit
val reset : arr -> unit
val reshape : arr -> int array -> arr
val reverse : arr -> arr
val tile : arr -> int array -> arr
val repeat : arr -> int array -> arr
val concatenate : ?axis:int -> arr array -> arr
val stack : ?axis:int -> arr array -> arr
val split : ?axis:int -> int array -> arr -> arr array
val expand : ?hi:bool -> arr -> int -> arr
val squeeze : ?axis:int array -> arr -> arr
val draw : ?axis:int -> arr -> int -> arr * int array
val map : (elt -> elt) -> arr -> arr
val fold : ?axis:int -> (elt -> elt -> elt) -> elt -> arr -> arr
val scan : ?axis:int -> (elt -> elt -> elt) -> arr -> arr
val one_hot : int -> arr -> arr
val pad : ?v:elt -> int list list -> arr -> arr
val print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(elt -> string) -> - arr -> - unit
val abs : arr -> arr
val neg : arr -> arr
val floor : arr -> arr
val ceil : arr -> arr
val round : arr -> arr
val sqr : arr -> arr
val sqrt : arr -> arr
val log : arr -> arr
val log2 : arr -> arr
val log10 : arr -> arr
val exp : arr -> arr
val sin : arr -> arr
val cos : arr -> arr
val tan : arr -> arr
val sinh : arr -> arr
val cosh : arr -> arr
val tanh : arr -> arr
val asin : arr -> arr
val acos : arr -> arr
val atan : arr -> arr
val asinh : arr -> arr
val acosh : arr -> arr
val atanh : arr -> arr
val min : ?axis:int -> ?keep_dims:bool -> arr -> arr
val max : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum_reduce : ?axis:int array -> arr -> arr
val signum : arr -> arr
val sigmoid : arr -> arr
val relu : arr -> arr
val dawsn : arr -> arr
val min' : arr -> elt
val max' : arr -> elt
val sum' : arr -> elt
val log_sum_exp' : arr -> elt
val log_sum_exp : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm' : arr -> elt
val l2norm' : arr -> elt
val l2norm_sqr' : arr -> elt
val clip_by_value : ?amin:elt -> ?amax:elt -> arr -> arr
val clip_by_l2norm : elt -> arr -> arr
val pow : arr -> arr -> arr
val scalar_pow : elt -> arr -> arr
val pow_scalar : arr -> elt -> arr
val atan2 : arr -> arr -> arr
val scalar_atan2 : elt -> arr -> arr
val atan2_scalar : arr -> elt -> arr
val add : arr -> arr -> arr
val sub : arr -> arr -> arr
val mul : arr -> arr -> arr
val div : arr -> arr -> arr
val add_scalar : arr -> elt -> arr
val sub_scalar : arr -> elt -> arr
val mul_scalar : arr -> elt -> arr
val div_scalar : arr -> elt -> arr
val scalar_add : elt -> arr -> arr
val scalar_sub : elt -> arr -> arr
val scalar_mul : elt -> arr -> arr
val scalar_div : elt -> arr -> arr
val fma : arr -> arr -> arr -> arr
val conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val dilated_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val transpose_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val max_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val upsampling2d : arr -> int array -> arr
val conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val dilated_conv1d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv1d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val transpose_conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val max_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val upsampling2d_backward : arr -> int array -> arr -> arr
val row_num : arr -> int
val col_num : arr -> int
val row : arr -> int -> arr
val rows : arr -> int array -> arr
val copy_row_to : arr -> arr -> int -> unit
val copy_col_to : arr -> arr -> int -> unit
val dot : arr -> arr -> arr
val diag : ?k:int -> arr -> arr
val trace : arr -> elt
val transpose : ?axis:int array -> arr -> arr
val to_rows : arr -> arr array
val of_rows : arr array -> arr
val to_cols : arr -> arr array
val of_cols : arr array -> arr
val of_array : elt array -> int array -> arr
val of_arrays : elt array array -> arr
val float_to_elt : float -> elt
val elt_to_float : elt -> float
val elt_equal : arr -> arr -> arr
val elt_not_equal : arr -> arr -> arr
val elt_less : arr -> arr -> arr
val elt_greater : arr -> arr -> arr
val elt_less_equal : arr -> arr -> arr
val elt_greater_equal : arr -> arr -> arr
val elt_equal_scalar : arr -> elt -> arr
val elt_not_equal_scalar : arr -> elt -> arr
val elt_less_scalar : arr -> elt -> arr
val elt_greater_scalar : arr -> elt -> arr
val elt_less_equal_scalar : arr -> elt -> arr
val elt_greater_equal_scalar : arr -> elt -> arr
module Scalar : sig ... end
module Mat : sig ... end
module Linalg : sig ... end
\ No newline at end of file diff --git a/owl/Owl_neural/S/Graph/Neuron/Optimise/Algodiff/Arr/index.html b/owl/Owl_neural/S/Graph/Neuron/Optimise/Algodiff/Arr/index.html deleted file mode 100644 index b62894ae4..000000000 --- a/owl/Owl_neural/S/Graph/Neuron/Optimise/Algodiff/Arr/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Arr (owl.Owl_neural.S.Graph.Neuron.Optimise.Algodiff.Arr)

Module Algodiff.Arr

val empty : int array -> t
val zeros : int array -> t
val ones : int array -> t
val uniform : ?a:A.elt -> ?b:A.elt -> int array -> t
val gaussian : ?mu:A.elt -> ?sigma:A.elt -> int array -> t
val shape : t -> int array
val numel : t -> int
val reset : t -> unit
val reshape : t -> int array -> t
val add : t -> t -> t
val sub : t -> t -> t
val mul : t -> t -> t
val div : t -> t -> t
val dot : t -> t -> t
\ No newline at end of file diff --git a/owl/Owl_neural/S/Graph/Neuron/Optimise/Algodiff/Builder/index.html b/owl/Owl_neural/S/Graph/Neuron/Optimise/Algodiff/Builder/index.html deleted file mode 100644 index 6e23516fe..000000000 --- a/owl/Owl_neural/S/Graph/Neuron/Optimise/Algodiff/Builder/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Builder (owl.Owl_neural.S.Graph.Neuron.Optimise.Algodiff.Builder)

Module Algodiff.Builder

module type Siso = sig ... end
val build_siso : (module Siso) -> t -> t
module type Sipo = sig ... end
val build_sipo : (module Sipo) -> t -> t * t
module type Sito = sig ... end
val build_sito : (module Sito) -> t -> t * t * t
module type Siao = sig ... end
val build_siao : (module Siao) -> t -> t array
module type Piso = sig ... end
val build_piso : (module Piso) -> t -> t -> t
module type Aiso = sig ... end
val build_aiso : (module Aiso) -> t array -> t
\ No newline at end of file diff --git a/owl/Owl_neural/S/Graph/Neuron/Optimise/Algodiff/Builder/module-type-Aiso/index.html b/owl/Owl_neural/S/Graph/Neuron/Optimise/Algodiff/Builder/module-type-Aiso/index.html deleted file mode 100644 index 757a730f2..000000000 --- a/owl/Owl_neural/S/Graph/Neuron/Optimise/Algodiff/Builder/module-type-Aiso/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Aiso (owl.Owl_neural.S.Graph.Neuron.Optimise.Algodiff.Builder.Aiso)

Module type Builder.Aiso

val label : string
val ff : t array -> t
val df : int list -> t -> t array -> t array -> t
val dr : int list -> t array -> t -> t Stdlib.ref -> t list
\ No newline at end of file diff --git a/owl/Owl_neural/S/Graph/Neuron/Optimise/Algodiff/Builder/module-type-Piso/index.html b/owl/Owl_neural/S/Graph/Neuron/Optimise/Algodiff/Builder/module-type-Piso/index.html deleted file mode 100644 index 97941ff89..000000000 --- a/owl/Owl_neural/S/Graph/Neuron/Optimise/Algodiff/Builder/module-type-Piso/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Piso (owl.Owl_neural.S.Graph.Neuron.Optimise.Algodiff.Builder.Piso)

Module type Builder.Piso

val label : string
val ff_aa : A.elt -> A.elt -> t
val ff_ab : A.elt -> A.arr -> t
val ff_ba : A.arr -> A.elt -> t
val ff_bb : A.arr -> A.arr -> t
val df_da : t -> t -> t -> t -> t
val df_db : t -> t -> t -> t -> t
val df_dab : t -> t -> t -> t -> t -> t
val dr_ab : t -> t -> t -> t Stdlib.ref -> t * t
val dr_a : t -> t -> t -> t Stdlib.ref -> t
val dr_b : t -> t -> t -> t Stdlib.ref -> t
\ No newline at end of file diff --git a/owl/Owl_neural/S/Graph/Neuron/Optimise/Algodiff/Builder/module-type-Siao/index.html b/owl/Owl_neural/S/Graph/Neuron/Optimise/Algodiff/Builder/module-type-Siao/index.html deleted file mode 100644 index 76b3234e1..000000000 --- a/owl/Owl_neural/S/Graph/Neuron/Optimise/Algodiff/Builder/module-type-Siao/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Siao (owl.Owl_neural.S.Graph.Neuron.Optimise.Algodiff.Builder.Siao)

Module type Builder.Siao

val label : string
val ff_f : A.elt -> t array
val ff_arr : A.arr -> t array
val df : t array -> t -> t -> t array
val dr : t -> t -> t Stdlib.ref array -> t Stdlib.ref array -> t
\ No newline at end of file diff --git a/owl/Owl_neural/S/Graph/Neuron/Optimise/Algodiff/Builder/module-type-Sipo/index.html b/owl/Owl_neural/S/Graph/Neuron/Optimise/Algodiff/Builder/module-type-Sipo/index.html deleted file mode 100644 index 82f457289..000000000 --- a/owl/Owl_neural/S/Graph/Neuron/Optimise/Algodiff/Builder/module-type-Sipo/index.html +++ /dev/null @@ -1,7 +0,0 @@ - -Sipo (owl.Owl_neural.S.Graph.Neuron.Optimise.Algodiff.Builder.Sipo)

Module type Builder.Sipo

val label : string
val ff_f : A.elt -> t * t
val ff_arr : A.arr -> t * t
val df : t -> t -> t -> t
val dr : - t -> - t -> - (t Stdlib.ref * t Stdlib.ref) -> - (t Stdlib.ref * t Stdlib.ref) -> - t
\ No newline at end of file diff --git a/owl/Owl_neural/S/Graph/Neuron/Optimise/Algodiff/Builder/module-type-Siso/index.html b/owl/Owl_neural/S/Graph/Neuron/Optimise/Algodiff/Builder/module-type-Siso/index.html deleted file mode 100644 index 8d772a393..000000000 --- a/owl/Owl_neural/S/Graph/Neuron/Optimise/Algodiff/Builder/module-type-Siso/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Siso (owl.Owl_neural.S.Graph.Neuron.Optimise.Algodiff.Builder.Siso)

Module type Builder.Siso

val label : string
val ff_f : A.elt -> t
val ff_arr : A.arr -> t
val df : t -> t -> t -> t
val dr : t -> t -> t Stdlib.ref -> t
\ No newline at end of file diff --git a/owl/Owl_neural/S/Graph/Neuron/Optimise/Algodiff/Builder/module-type-Sito/index.html b/owl/Owl_neural/S/Graph/Neuron/Optimise/Algodiff/Builder/module-type-Sito/index.html deleted file mode 100644 index 4d16931aa..000000000 --- a/owl/Owl_neural/S/Graph/Neuron/Optimise/Algodiff/Builder/module-type-Sito/index.html +++ /dev/null @@ -1,7 +0,0 @@ - -Sito (owl.Owl_neural.S.Graph.Neuron.Optimise.Algodiff.Builder.Sito)

Module type Builder.Sito

val label : string
val ff_f : A.elt -> t * t * t
val ff_arr : A.arr -> t * t * t
val df : t -> t -> t -> t
val dr : - t -> - t -> - (t Stdlib.ref * t Stdlib.ref * t Stdlib.ref) -> - (t Stdlib.ref * t Stdlib.ref * t Stdlib.ref) -> - t
\ No newline at end of file diff --git a/owl/Owl_neural/S/Graph/Neuron/Optimise/Algodiff/Linalg/index.html b/owl/Owl_neural/S/Graph/Neuron/Optimise/Algodiff/Linalg/index.html deleted file mode 100644 index 426fda061..000000000 --- a/owl/Owl_neural/S/Graph/Neuron/Optimise/Algodiff/Linalg/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -Linalg (owl.Owl_neural.S.Graph.Neuron.Optimise.Algodiff.Linalg)

Module Algodiff.Linalg

val inv : t -> t
val logdet : t -> t
val chol : ?upper:bool -> t -> t
val qr : t -> t * t
val lq : t -> t * t
val svd : ?thin:bool -> t -> t * t * t
val sylvester : t -> t -> t -> t
val lyapunov : t -> t -> t
val discrete_lyapunov : - ?solver:[ `bilinear | `default | `direct ] -> - t -> - t -> - t
val (/@) : t -> t -> t
val linsolve : ?trans:bool -> ?typ:[ `l | `n | `u ] -> t -> t -> t
val care : ?diag_r:bool -> t -> t -> t -> t -> t
val dare : ?diag_r:bool -> t -> t -> t -> t -> t
\ No newline at end of file diff --git a/owl/Owl_neural/S/Graph/Neuron/Optimise/Algodiff/Mat/index.html b/owl/Owl_neural/S/Graph/Neuron/Optimise/Algodiff/Mat/index.html deleted file mode 100644 index 949b209d4..000000000 --- a/owl/Owl_neural/S/Graph/Neuron/Optimise/Algodiff/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl.Owl_neural.S.Graph.Neuron.Optimise.Algodiff.Mat)

Module Algodiff.Mat

val empty : int -> int -> t
val zeros : int -> int -> t
val eye : int -> t
val ones : int -> int -> t
val uniform : ?a:A.elt -> ?b:A.elt -> int -> int -> t
val gaussian : ?mu:A.elt -> ?sigma:A.elt -> int -> int -> t
val shape : t -> int * int
val numel : t -> int
val row_num : t -> int
val col_num : t -> int
val reset : t -> unit
val reshape : int -> int -> t -> t
val get : t -> int -> int -> t
val set : t -> int -> int -> t -> t
val row : t -> int -> t
val mean : t -> t
val add : t -> t -> t
val sub : t -> t -> t
val mul : t -> t -> t
val div : t -> t -> t
val dot : t -> t -> t
val map_by_row : (t -> t) -> t -> t
val of_arrays : A.elt array array -> t
val init_2d : int -> int -> (int -> int -> t) -> t
val print : t -> unit
\ No newline at end of file diff --git a/owl/Owl_neural/S/Graph/Neuron/Optimise/Algodiff/Maths/index.html b/owl/Owl_neural/S/Graph/Neuron/Optimise/Algodiff/Maths/index.html deleted file mode 100644 index 886f5186f..000000000 --- a/owl/Owl_neural/S/Graph/Neuron/Optimise/Algodiff/Maths/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Maths (owl.Owl_neural.S.Graph.Neuron.Optimise.Algodiff.Maths)

Module Algodiff.Maths

val (+) : t -> t -> t
val (-) : t -> t -> t
val (*) : t -> t -> t
val (/) : t -> t -> t
val (*@) : t -> t -> t
val (**) : t -> t -> t
val add : t -> t -> t
val sub : t -> t -> t
val mul : t -> t -> t
val div : t -> t -> t
val kron : t -> t -> t
val dot : t -> t -> t
val pow : t -> t -> t
val atan2 : t -> t -> t
val min2 : t -> t -> t
val max2 : t -> t -> t
val cross_entropy : t -> t -> t
val inv : t -> t
val neg : t -> t
val abs : t -> t
val signum : t -> t
val floor : t -> t
val ceil : t -> t
val round : t -> t
val sqr : t -> t
val sqrt : t -> t
val log : t -> t
val log2 : t -> t
val log10 : t -> t
val exp : t -> t
val sin : t -> t
val cos : t -> t
val tan : t -> t
val sinh : t -> t
val cosh : t -> t
val tanh : t -> t
val asin : t -> t
val acos : t -> t
val atan : t -> t
val asinh : t -> t
val acosh : t -> t
val atanh : t -> t
val sum' : t -> t
val log_sum_exp' : t -> t
val log_sum_exp : ?axis:int -> ?keep_dims:bool -> t -> t
val sum : ?axis:int -> ?keep_dims:bool -> t -> t
val sum_reduce : ?axis:int array -> t -> t
val mean : t -> t
val transpose : ?axis:int array -> t -> t
val swap : int -> int -> t -> t
val l1norm' : t -> t
val l2norm' : t -> t
val l2norm_sqr' : t -> t
val sigmoid : t -> t
val relu : t -> t
val dawsn : t -> t
val softplus : t -> t
val softsign : t -> t
val softmax : ?axis:int -> t -> t
val reshape : t -> int array -> t
val flatten : t -> t
val get_item : t -> int -> int -> t
val get_row : t -> int -> t
val concat : axis:int -> t -> t -> t
val split : axis:int -> int array -> t -> t array
val of_arrays : t array array -> t
val to_arrays : t -> t array array
val concatenate : axis:int -> t array -> t
val stack : axis:int -> t array -> t
val get_slice : int list list -> t -> t
val set_slice : int list list -> t -> t -> t
val get_fancy : Owl_types.index list -> t -> t
val set_fancy : Owl_types.index list -> t -> t -> t
val diag : ?k:int -> t -> t
val diagm : ?k:int -> t -> t
val trace : t -> t
val triu : ?k:int -> t -> t
val tril : ?k:int -> t -> t
\ No newline at end of file diff --git a/owl/Owl_neural/S/Graph/Neuron/Optimise/Algodiff/NN/index.html b/owl/Owl_neural/S/Graph/Neuron/Optimise/Algodiff/NN/index.html deleted file mode 100644 index 768259366..000000000 --- a/owl/Owl_neural/S/Graph/Neuron/Optimise/Algodiff/NN/index.html +++ /dev/null @@ -1,20 +0,0 @@ - -NN (owl.Owl_neural.S.Graph.Neuron.Optimise.Algodiff.NN)

Module Algodiff.NN

val dropout : ?rate:float -> t -> t
val conv1d : ?padding:Owl_types.padding -> t -> t -> int array -> t
val conv2d : ?padding:Owl_types.padding -> t -> t -> int array -> t
val conv3d : ?padding:Owl_types.padding -> t -> t -> int array -> t
val dilated_conv1d : - ?padding:Owl_types.padding -> - t -> - t -> - int array -> - int array -> - t
val dilated_conv2d : - ?padding:Owl_types.padding -> - t -> - t -> - int array -> - int array -> - t
val dilated_conv3d : - ?padding:Owl_types.padding -> - t -> - t -> - int array -> - int array -> - t
val transpose_conv1d : ?padding:Owl_types.padding -> t -> t -> int array -> t
val transpose_conv2d : ?padding:Owl_types.padding -> t -> t -> int array -> t
val transpose_conv3d : ?padding:Owl_types.padding -> t -> t -> int array -> t
val max_pool1d : Owl_types.padding -> t -> int array -> int array -> t
val max_pool2d : Owl_types.padding -> t -> int array -> int array -> t
val max_pool3d : Owl_types.padding -> t -> int array -> int array -> t
val avg_pool1d : Owl_types.padding -> t -> int array -> int array -> t
val avg_pool2d : Owl_types.padding -> t -> int array -> int array -> t
val avg_pool3d : Owl_types.padding -> t -> int array -> int array -> t
val upsampling2d : t -> int array -> t
val pad : ?v:A.elt -> int list list -> t -> t
\ No newline at end of file diff --git a/owl/Owl_neural/S/Graph/Neuron/Optimise/Algodiff/index.html b/owl/Owl_neural/S/Graph/Neuron/Optimise/Algodiff/index.html deleted file mode 100644 index 7f3c5d962..000000000 --- a/owl/Owl_neural/S/Graph/Neuron/Optimise/Algodiff/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Algodiff (owl.Owl_neural.S.Graph.Neuron.Optimise.Algodiff)

Module Optimise.Algodiff

module A : sig ... end
type t = - Owl_neural_generic.Make_Embedded(Owl_algodiff_primal_ops.S).Neuron.Optimise.Algodiff.t = -
  1. | F of A.elt
  2. | Arr of A.arr
  3. | DF of t * t * int
  4. | DR of t * t Stdlib.ref * op * int Stdlib.ref * int * int Stdlib.ref
and adjoint = t -> t Stdlib.ref -> (t * t) list -> (t * t) list
and register = t list -> t list
and label = string * t list
and op = adjoint * register * label
val tag : unit -> int
val primal : t -> t
val primal' : t -> t
val zero : t -> t
val reset_zero : t -> t
val tangent : t -> t
val adjref : t -> t Stdlib.ref
val adjval : t -> t
val shape : t -> int array
val is_float : t -> bool
val is_arr : t -> bool
val row_num : t -> int
val col_num : t -> int
val numel : t -> int
val clip_by_value : amin:A.elt -> amax:A.elt -> t -> t
val clip_by_l2norm : A.elt -> t -> t
val copy_primal' : t -> t
val tile : t -> int array -> t
val repeat : t -> int array -> t
val pack_elt : A.elt -> t
val unpack_elt : t -> A.elt
val pack_flt : float -> t
val _f : float -> t
val unpack_flt : t -> float
val pack_arr : A.arr -> t
val unpack_arr : t -> A.arr
val deep_info : t -> string
val type_info : t -> string
val error_binop : string -> t -> t -> 'a
val error_uniop : string -> t -> 'a
val make_forward : t -> t -> int -> t
val make_reverse : t -> int -> t
val reverse_prop : t -> t -> unit
val diff : (t -> t) -> t -> t
val diff' : (t -> t) -> t -> t * t
val grad : (t -> t) -> t -> t
val grad' : (t -> t) -> t -> t * t
val jacobian : (t -> t) -> t -> t
val jacobian' : (t -> t) -> t -> t * t
val jacobianv : (t -> t) -> t -> t -> t
val jacobianv' : (t -> t) -> t -> t -> t * t
val jacobianTv : (t -> t) -> t -> t -> t
val jacobianTv' : (t -> t) -> t -> t -> t * t
val hessian : (t -> t) -> t -> t
val hessian' : (t -> t) -> t -> t * t
val hessianv : (t -> t) -> t -> t -> t
val hessianv' : (t -> t) -> t -> t -> t * t
val laplacian : (t -> t) -> t -> t
val laplacian' : (t -> t) -> t -> t * t
val gradhessian : (t -> t) -> t -> t * t
val gradhessian' : (t -> t) -> t -> t * t * t
val gradhessianv : (t -> t) -> t -> t -> t * t
val gradhessianv' : (t -> t) -> t -> t -> t * t * t
module Builder : sig ... end
module Maths : sig ... end
module Linalg : sig ... end
module NN : sig ... end
module Mat : sig ... end
module Arr : sig ... end
val to_trace : t list -> string
val to_dot : t list -> string
val pp_num : Stdlib.Format.formatter -> t -> unit
\ No newline at end of file diff --git a/owl/Owl_neural/S/Graph/Neuron/Optimise/Batch/index.html b/owl/Owl_neural/S/Graph/Neuron/Optimise/Batch/index.html deleted file mode 100644 index 5c6035531..000000000 --- a/owl/Owl_neural/S/Graph/Neuron/Optimise/Batch/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Batch (owl.Owl_neural.S.Graph.Neuron.Optimise.Batch)

Module Optimise.Batch

type typ = - Owl_neural_generic.Make_Embedded(Owl_algodiff_primal_ops.S).Neuron.Optimise.Batch.typ = -
  1. | Full
  2. | Mini of int
  3. | Sample of int
  4. | Stochastic
val run : typ -> Algodiff.t -> Algodiff.t -> int -> Algodiff.t * Algodiff.t
val batches : typ -> Algodiff.t -> int
val to_string : typ -> string
\ No newline at end of file diff --git a/owl/Owl_neural/S/Graph/Neuron/Optimise/Checkpoint/index.html b/owl/Owl_neural/S/Graph/Neuron/Optimise/Checkpoint/index.html deleted file mode 100644 index 3bc242951..000000000 --- a/owl/Owl_neural/S/Graph/Neuron/Optimise/Checkpoint/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -Checkpoint (owl.Owl_neural.S.Graph.Neuron.Optimise.Checkpoint)

Module Optimise.Checkpoint

type state = - Owl_neural_generic.Make_Embedded(Owl_algodiff_primal_ops.S).Neuron.Optimise.Checkpoint.state = - {
  1. mutable current_batch : int;
  2. mutable batches_per_epoch : int;
  3. mutable epochs : float;
  4. mutable batches : int;
  5. mutable loss : Algodiff.t array;
  6. mutable start_at : float;
  7. mutable stop : bool;
  8. mutable gs : Algodiff.t array array;
  9. mutable ps : Algodiff.t array array;
  10. mutable us : Algodiff.t array array;
  11. mutable ch : Algodiff.t array array array;
}
type typ = - Owl_neural_generic.Make_Embedded(Owl_algodiff_primal_ops.S).Neuron.Optimise.Checkpoint.typ = -
  1. | Batch of int
  2. | Epoch of float
  3. | Custom of state -> unit
  4. | None
val init_state : int -> float -> state
val default_checkpoint_fun : (string -> 'a) -> 'a
val print_state_info : state -> unit
val print_summary : state -> unit
val run : typ -> (string -> unit) -> int -> Algodiff.t -> state -> unit
val to_string : typ -> string
\ No newline at end of file diff --git a/owl/Owl_neural/S/Graph/Neuron/Optimise/Clipping/index.html b/owl/Owl_neural/S/Graph/Neuron/Optimise/Clipping/index.html deleted file mode 100644 index fd0a4c65b..000000000 --- a/owl/Owl_neural/S/Graph/Neuron/Optimise/Clipping/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Clipping (owl.Owl_neural.S.Graph.Neuron.Optimise.Clipping)

Module Optimise.Clipping

type typ = - Owl_neural_generic.Make_Embedded(Owl_algodiff_primal_ops.S).Neuron.Optimise.Clipping.typ = -
  1. | L2norm of float
  2. | Value of float * float
  3. | None
val run : typ -> Algodiff.t -> Algodiff.t
val default : typ -> typ
val to_string : typ -> string
\ No newline at end of file diff --git a/owl/Owl_neural/S/Graph/Neuron/Optimise/Gradient/index.html b/owl/Owl_neural/S/Graph/Neuron/Optimise/Gradient/index.html deleted file mode 100644 index 43d5c5058..000000000 --- a/owl/Owl_neural/S/Graph/Neuron/Optimise/Gradient/index.html +++ /dev/null @@ -1,11 +0,0 @@ - -Gradient (owl.Owl_neural.S.Graph.Neuron.Optimise.Gradient)

Module Optimise.Gradient

type typ = - Owl_neural_generic.Make_Embedded(Owl_algodiff_primal_ops.S).Neuron.Optimise.Gradient.typ = -
  1. | GD
  2. | CG
  3. | CD
  4. | NonlinearCG
  5. | DaiYuanCG
  6. | NewtonCG
  7. | Newton
val run : - typ -> - (Algodiff.t -> Algodiff.t) -> - Algodiff.t -> - Algodiff.t -> - Algodiff.t -> - Algodiff.t -> - Algodiff.t
val to_string : typ -> string
\ No newline at end of file diff --git a/owl/Owl_neural/S/Graph/Neuron/Optimise/Learning_Rate/index.html b/owl/Owl_neural/S/Graph/Neuron/Optimise/Learning_Rate/index.html deleted file mode 100644 index 044e80c3a..000000000 --- a/owl/Owl_neural/S/Graph/Neuron/Optimise/Learning_Rate/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Learning_Rate (owl.Owl_neural.S.Graph.Neuron.Optimise.Learning_Rate)

Module Optimise.Learning_Rate

type typ = - Owl_neural_generic.Make_Embedded(Owl_algodiff_primal_ops.S).Neuron.Optimise.Learning_Rate.typ = -
  1. | Adagrad of float
  2. | Const of float
  3. | Decay of float * float
  4. | Exp_decay of float * float
  5. | RMSprop of float * float
  6. | Adam of float * float * float
  7. | Schedule of float array
val run : typ -> int -> Algodiff.t -> Algodiff.t array -> Algodiff.t
val default : typ -> typ
val update_ch : typ -> Algodiff.t -> Algodiff.t array -> Algodiff.t array
val to_string : typ -> string
\ No newline at end of file diff --git a/owl/Owl_neural/S/Graph/Neuron/Optimise/Loss/index.html b/owl/Owl_neural/S/Graph/Neuron/Optimise/Loss/index.html deleted file mode 100644 index 0855e65b9..000000000 --- a/owl/Owl_neural/S/Graph/Neuron/Optimise/Loss/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Loss (owl.Owl_neural.S.Graph.Neuron.Optimise.Loss)

Module Optimise.Loss

type typ = - Owl_neural_generic.Make_Embedded(Owl_algodiff_primal_ops.S).Neuron.Optimise.Loss.typ = -
  1. | Hinge
  2. | L1norm
  3. | L2norm
  4. | Quadratic
  5. | Cross_entropy
  6. | Custom of Algodiff.t -> Algodiff.t -> Algodiff.t
val run : typ -> Algodiff.t -> Algodiff.t -> Algodiff.t
val to_string : typ -> string
\ No newline at end of file diff --git a/owl/Owl_neural/S/Graph/Neuron/Optimise/Momentum/index.html b/owl/Owl_neural/S/Graph/Neuron/Optimise/Momentum/index.html deleted file mode 100644 index cf6d96699..000000000 --- a/owl/Owl_neural/S/Graph/Neuron/Optimise/Momentum/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Momentum (owl.Owl_neural.S.Graph.Neuron.Optimise.Momentum)

Module Optimise.Momentum

type typ = - Owl_neural_generic.Make_Embedded(Owl_algodiff_primal_ops.S).Neuron.Optimise.Momentum.typ = -
  1. | Standard of float
  2. | Nesterov of float
  3. | None
val run : typ -> Algodiff.t -> Algodiff.t -> Algodiff.t
val default : typ -> typ
val to_string : typ -> string
\ No newline at end of file diff --git a/owl/Owl_neural/S/Graph/Neuron/Optimise/Params/index.html b/owl/Owl_neural/S/Graph/Neuron/Optimise/Params/index.html deleted file mode 100644 index e1cfcc8c4..000000000 --- a/owl/Owl_neural/S/Graph/Neuron/Optimise/Params/index.html +++ /dev/null @@ -1,16 +0,0 @@ - -Params (owl.Owl_neural.S.Graph.Neuron.Optimise.Params)

Module Optimise.Params

type typ = - Owl_neural_generic.Make_Embedded(Owl_algodiff_primal_ops.S).Neuron.Optimise.Params.typ = - {
  1. mutable epochs : float;
  2. mutable batch : Batch.typ;
  3. mutable gradient : Gradient.typ;
  4. mutable loss : Loss.typ;
  5. mutable learning_rate : Learning_Rate.typ;
  6. mutable regularisation : Regularisation.typ;
  7. mutable momentum : Momentum.typ;
  8. mutable clipping : Clipping.typ;
  9. mutable stopping : Stopping.typ;
  10. mutable checkpoint : Checkpoint.typ;
  11. mutable verbosity : bool;
}
val default : unit -> typ
val config : - ?batch:Batch.typ -> - ?gradient:Gradient.typ -> - ?loss:Loss.typ -> - ?learning_rate:Learning_Rate.typ -> - ?regularisation:Regularisation.typ -> - ?momentum:Momentum.typ -> - ?clipping:Clipping.typ -> - ?stopping:Stopping.typ -> - ?checkpoint:Checkpoint.typ -> - ?verbosity:bool -> - float -> - typ
val to_string : typ -> string
\ No newline at end of file diff --git a/owl/Owl_neural/S/Graph/Neuron/Optimise/Regularisation/index.html b/owl/Owl_neural/S/Graph/Neuron/Optimise/Regularisation/index.html deleted file mode 100644 index 01752cf13..000000000 --- a/owl/Owl_neural/S/Graph/Neuron/Optimise/Regularisation/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Regularisation (owl.Owl_neural.S.Graph.Neuron.Optimise.Regularisation)

Module Optimise.Regularisation

type typ = - Owl_neural_generic.Make_Embedded(Owl_algodiff_primal_ops.S).Neuron.Optimise.Regularisation.typ = -
  1. | L1norm of float
  2. | L2norm of float
  3. | Elastic_net of float * float
  4. | None
val run : typ -> Algodiff.t -> Algodiff.t
val to_string : typ -> string
\ No newline at end of file diff --git a/owl/Owl_neural/S/Graph/Neuron/Optimise/Stopping/index.html b/owl/Owl_neural/S/Graph/Neuron/Optimise/Stopping/index.html deleted file mode 100644 index 8181bb328..000000000 --- a/owl/Owl_neural/S/Graph/Neuron/Optimise/Stopping/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Stopping (owl.Owl_neural.S.Graph.Neuron.Optimise.Stopping)

Module Optimise.Stopping

type typ = - Owl_neural_generic.Make_Embedded(Owl_algodiff_primal_ops.S).Neuron.Optimise.Stopping.typ = -
  1. | Const of float
  2. | Early of int * int
  3. | None
val run : typ -> float -> bool
val default : typ -> typ
val to_string : typ -> string
\ No newline at end of file diff --git a/owl/Owl_neural/S/Graph/Neuron/Optimise/Utils/index.html b/owl/Owl_neural/S/Graph/Neuron/Optimise/Utils/index.html deleted file mode 100644 index 4ce388f9f..000000000 --- a/owl/Owl_neural/S/Graph/Neuron/Optimise/Utils/index.html +++ /dev/null @@ -1,7 +0,0 @@ - -Utils (owl.Owl_neural.S.Graph.Neuron.Optimise.Utils)

Module Optimise.Utils

val sample_num : Algodiff.t -> int
val draw_samples : Algodiff.t -> Algodiff.t -> int -> Algodiff.t * Algodiff.t
val get_chunk : - Algodiff.t -> - Algodiff.t -> - int -> - int -> - Algodiff.t * Algodiff.t
\ No newline at end of file diff --git a/owl/Owl_neural/S/Graph/Neuron/Optimise/index.html b/owl/Owl_neural/S/Graph/Neuron/Optimise/index.html deleted file mode 100644 index 3ff54d18a..000000000 --- a/owl/Owl_neural/S/Graph/Neuron/Optimise/index.html +++ /dev/null @@ -1,31 +0,0 @@ - -Optimise (owl.Owl_neural.S.Graph.Neuron.Optimise)

Module Neuron.Optimise

module Algodiff : sig ... end
module Utils : sig ... end
module Learning_Rate : sig ... end
module Batch : sig ... end
module Loss : sig ... end
module Gradient : sig ... end
module Momentum : sig ... end
module Regularisation : sig ... end
module Clipping : sig ... end
module Stopping : sig ... end
module Checkpoint : sig ... end
module Params : sig ... end
val minimise_weight : - ?state:Checkpoint.state -> - Params.typ -> - (Algodiff.t -> Algodiff.t -> Algodiff.t) -> - Algodiff.t -> - Algodiff.t -> - Algodiff.t -> - Checkpoint.state * Algodiff.t
val minimise_network : - ?state:Checkpoint.state -> - Params.typ -> - (Algodiff.t -> Algodiff.t * Algodiff.t array array) -> - (Algodiff.t -> Algodiff.t array array * Algodiff.t array array) -> - (Algodiff.t array array -> unit) -> - (string -> unit) -> - Algodiff.t -> - Algodiff.t -> - Checkpoint.state
val minimise_fun : - ?state:Checkpoint.state -> - Params.typ -> - (Algodiff.t -> Algodiff.t) -> - Algodiff.t -> - Checkpoint.state * Algodiff.t
val minimise_compiled_network : - ?state:Checkpoint.state -> - Params.typ -> - (Algodiff.t -> Algodiff.t -> Algodiff.t) -> - (unit -> unit) -> - (string -> unit) -> - Algodiff.t -> - Algodiff.t -> - Checkpoint.state
\ No newline at end of file diff --git a/owl/Owl_neural/S/Graph/Neuron/Padding1D/index.html b/owl/Owl_neural/S/Graph/Neuron/Padding1D/index.html deleted file mode 100644 index 9a20b91d9..000000000 --- a/owl/Owl_neural/S/Graph/Neuron/Padding1D/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Padding1D (owl.Owl_neural.S.Graph.Neuron.Padding1D)

Module Neuron.Padding1D

\ No newline at end of file diff --git a/owl/Owl_neural/S/Graph/Neuron/Padding2D/index.html b/owl/Owl_neural/S/Graph/Neuron/Padding2D/index.html deleted file mode 100644 index f59022566..000000000 --- a/owl/Owl_neural/S/Graph/Neuron/Padding2D/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Padding2D (owl.Owl_neural.S.Graph.Neuron.Padding2D)

Module Neuron.Padding2D

type neuron_typ = - Owl_neural_generic.Make_Embedded(Owl_algodiff_primal_ops.S).Neuron.Padding2D.neuron_typ = - {
  1. mutable padding : int array array;
  2. mutable in_shape : int array;
  3. mutable out_shape : int array;
}
val create : int array array -> neuron_typ
val connect : int array -> neuron_typ -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl/Owl_neural/S/Graph/Neuron/Padding3D/index.html b/owl/Owl_neural/S/Graph/Neuron/Padding3D/index.html deleted file mode 100644 index 6d7201bee..000000000 --- a/owl/Owl_neural/S/Graph/Neuron/Padding3D/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Padding3D (owl.Owl_neural.S.Graph.Neuron.Padding3D)

Module Neuron.Padding3D

\ No newline at end of file diff --git a/owl/Owl_neural/S/Graph/Neuron/Recurrent/index.html b/owl/Owl_neural/S/Graph/Neuron/Recurrent/index.html deleted file mode 100644 index 20962817f..000000000 --- a/owl/Owl_neural/S/Graph/Neuron/Recurrent/index.html +++ /dev/null @@ -1,11 +0,0 @@ - -Recurrent (owl.Owl_neural.S.Graph.Neuron.Recurrent)

Module Neuron.Recurrent

type neuron_typ = - Owl_neural_generic.Make_Embedded(Owl_algodiff_primal_ops.S).Neuron.Recurrent.neuron_typ = - {
  1. mutable whh : Optimise.Algodiff.t;
  2. mutable wxh : Optimise.Algodiff.t;
  3. mutable why : Optimise.Algodiff.t;
  4. mutable bh : Optimise.Algodiff.t;
  5. mutable by : Optimise.Algodiff.t;
  6. mutable h : Optimise.Algodiff.t;
  7. mutable hiddens : int;
  8. mutable act : Activation.typ;
  9. mutable init_typ : Init.typ;
  10. mutable in_shape : int array;
  11. mutable out_shape : int array;
}
val create : - ?time_steps:int -> - ?inputs:int -> - int -> - int -> - Activation.typ -> - Init.typ -> - neuron_typ
val connect : int array -> neuron_typ -> unit
val init : neuron_typ -> unit
val reset : neuron_typ -> unit
val mktag : int -> neuron_typ -> unit
val mkpar : neuron_typ -> Optimise.Algodiff.t array
val mkpri : neuron_typ -> Optimise.Algodiff.t array
val mkadj : neuron_typ -> Optimise.Algodiff.t array
val update : neuron_typ -> Optimise.Algodiff.t array -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl/Owl_neural/S/Graph/Neuron/Reshape/index.html b/owl/Owl_neural/S/Graph/Neuron/Reshape/index.html deleted file mode 100644 index 681b87dd6..000000000 --- a/owl/Owl_neural/S/Graph/Neuron/Reshape/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Reshape (owl.Owl_neural.S.Graph.Neuron.Reshape)

Module Neuron.Reshape

type neuron_typ = - Owl_neural_generic.Make_Embedded(Owl_algodiff_primal_ops.S).Neuron.Reshape.neuron_typ = - {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
}
val create : ?inputs:int array -> int array -> neuron_typ
val connect : int array -> neuron_typ -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl/Owl_neural/S/Graph/Neuron/Slice/index.html b/owl/Owl_neural/S/Graph/Neuron/Slice/index.html deleted file mode 100644 index af480a686..000000000 --- a/owl/Owl_neural/S/Graph/Neuron/Slice/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Slice (owl.Owl_neural.S.Graph.Neuron.Slice)

Module Neuron.Slice

type neuron_typ = - Owl_neural_generic.Make_Embedded(Owl_algodiff_primal_ops.S).Neuron.Slice.neuron_typ = - {
  1. mutable in_shape : int array;
  2. mutable out_shape : int array;
  3. mutable slice : int list list;
}
val create : int list list -> neuron_typ
val connect : int array -> neuron_typ -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl/Owl_neural/S/Graph/Neuron/TransposeConv1D/index.html b/owl/Owl_neural/S/Graph/Neuron/TransposeConv1D/index.html deleted file mode 100644 index c9b7a0349..000000000 --- a/owl/Owl_neural/S/Graph/Neuron/TransposeConv1D/index.html +++ /dev/null @@ -1,10 +0,0 @@ - -TransposeConv1D (owl.Owl_neural.S.Graph.Neuron.TransposeConv1D)

Module Neuron.TransposeConv1D

type neuron_typ = - Owl_neural_generic.Make_Embedded(Owl_algodiff_primal_ops.S).Neuron.TransposeConv1D.neuron_typ = - {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable b : Optimise.Algodiff.t;
  3. mutable kernel : int array;
  4. mutable stride : int array;
  5. mutable padding : Owl_types.padding;
  6. mutable init_typ : Init.typ;
  7. mutable in_shape : int array;
  8. mutable out_shape : int array;
}
val create : - ?inputs:int array -> - Owl_types.padding -> - int array -> - int array -> - Init.typ -> - neuron_typ
val connect : int array -> neuron_typ -> unit
val init : neuron_typ -> unit
val reset : neuron_typ -> unit
val mktag : int -> neuron_typ -> unit
val mkpar : neuron_typ -> Optimise.Algodiff.t array
val mkpri : neuron_typ -> Optimise.Algodiff.t array
val mkadj : neuron_typ -> Optimise.Algodiff.t array
val update : neuron_typ -> Optimise.Algodiff.t array -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl/Owl_neural/S/Graph/Neuron/TransposeConv2D/index.html b/owl/Owl_neural/S/Graph/Neuron/TransposeConv2D/index.html deleted file mode 100644 index df1cefe39..000000000 --- a/owl/Owl_neural/S/Graph/Neuron/TransposeConv2D/index.html +++ /dev/null @@ -1,10 +0,0 @@ - -TransposeConv2D (owl.Owl_neural.S.Graph.Neuron.TransposeConv2D)

Module Neuron.TransposeConv2D

type neuron_typ = - Owl_neural_generic.Make_Embedded(Owl_algodiff_primal_ops.S).Neuron.TransposeConv2D.neuron_typ = - {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable b : Optimise.Algodiff.t;
  3. mutable kernel : int array;
  4. mutable stride : int array;
  5. mutable padding : Owl_types.padding;
  6. mutable init_typ : Init.typ;
  7. mutable in_shape : int array;
  8. mutable out_shape : int array;
}
val create : - ?inputs:int array -> - Owl_types.padding -> - int array -> - int array -> - Init.typ -> - neuron_typ
val connect : int array -> neuron_typ -> unit
val init : neuron_typ -> unit
val reset : neuron_typ -> unit
val mktag : int -> neuron_typ -> unit
val mkpar : neuron_typ -> Optimise.Algodiff.t array
val mkpri : neuron_typ -> Optimise.Algodiff.t array
val mkadj : neuron_typ -> Optimise.Algodiff.t array
val update : neuron_typ -> Optimise.Algodiff.t array -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl/Owl_neural/S/Graph/Neuron/TransposeConv3D/index.html b/owl/Owl_neural/S/Graph/Neuron/TransposeConv3D/index.html deleted file mode 100644 index cd5022b80..000000000 --- a/owl/Owl_neural/S/Graph/Neuron/TransposeConv3D/index.html +++ /dev/null @@ -1,10 +0,0 @@ - -TransposeConv3D (owl.Owl_neural.S.Graph.Neuron.TransposeConv3D)

Module Neuron.TransposeConv3D

type neuron_typ = - Owl_neural_generic.Make_Embedded(Owl_algodiff_primal_ops.S).Neuron.TransposeConv3D.neuron_typ = - {
  1. mutable w : Optimise.Algodiff.t;
  2. mutable b : Optimise.Algodiff.t;
  3. mutable kernel : int array;
  4. mutable stride : int array;
  5. mutable padding : Owl_types.padding;
  6. mutable init_typ : Init.typ;
  7. mutable in_shape : int array;
  8. mutable out_shape : int array;
}
val create : - ?inputs:int array -> - Owl_types.padding -> - int array -> - int array -> - Init.typ -> - neuron_typ
val connect : int array -> neuron_typ -> unit
val init : neuron_typ -> unit
val reset : neuron_typ -> unit
val mktag : int -> neuron_typ -> unit
val mkpar : neuron_typ -> Optimise.Algodiff.t array
val mkpri : neuron_typ -> Optimise.Algodiff.t array
val mkadj : neuron_typ -> Optimise.Algodiff.t array
val update : neuron_typ -> Optimise.Algodiff.t array -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl/Owl_neural/S/Graph/Neuron/UpSampling1D/index.html b/owl/Owl_neural/S/Graph/Neuron/UpSampling1D/index.html deleted file mode 100644 index 19a8db73d..000000000 --- a/owl/Owl_neural/S/Graph/Neuron/UpSampling1D/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -UpSampling1D (owl.Owl_neural.S.Graph.Neuron.UpSampling1D)

Module Neuron.UpSampling1D

\ No newline at end of file diff --git a/owl/Owl_neural/S/Graph/Neuron/UpSampling2D/index.html b/owl/Owl_neural/S/Graph/Neuron/UpSampling2D/index.html deleted file mode 100644 index 3f625df19..000000000 --- a/owl/Owl_neural/S/Graph/Neuron/UpSampling2D/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -UpSampling2D (owl.Owl_neural.S.Graph.Neuron.UpSampling2D)

Module Neuron.UpSampling2D

type neuron_typ = - Owl_neural_generic.Make_Embedded(Owl_algodiff_primal_ops.S).Neuron.UpSampling2D.neuron_typ = - {
  1. mutable size : int array;
  2. mutable in_shape : int array;
  3. mutable out_shape : int array;
}
val create : int array -> neuron_typ
val connect : int array -> neuron_typ -> unit
val copy : neuron_typ -> neuron_typ
val to_string : neuron_typ -> string
val to_name : unit -> string
\ No newline at end of file diff --git a/owl/Owl_neural/S/Graph/Neuron/UpSampling3D/index.html b/owl/Owl_neural/S/Graph/Neuron/UpSampling3D/index.html deleted file mode 100644 index 7a7f083d5..000000000 --- a/owl/Owl_neural/S/Graph/Neuron/UpSampling3D/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -UpSampling3D (owl.Owl_neural.S.Graph.Neuron.UpSampling3D)

Module Neuron.UpSampling3D

\ No newline at end of file diff --git a/owl/Owl_neural/S/Graph/Neuron/index.html b/owl/Owl_neural/S/Graph/Neuron/index.html deleted file mode 100644 index 7a49e36be..000000000 --- a/owl/Owl_neural/S/Graph/Neuron/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Neuron (owl.Owl_neural.S.Graph.Neuron)

Module Graph.Neuron

module Optimise : sig ... end
module Init : sig ... end
module Input : sig ... end
module Activation : sig ... end
module Linear : sig ... end
module LinearNoBias : sig ... end
module Recurrent : sig ... end
module LSTM : sig ... end
module GRU : sig ... end
module Conv1D : sig ... end
module Conv2D : sig ... end
module Conv3D : sig ... end
module DilatedConv1D : sig ... end
module DilatedConv2D : sig ... end
module DilatedConv3D : sig ... end
module TransposeConv1D : sig ... end
module TransposeConv2D : sig ... end
module TransposeConv3D : sig ... end
module FullyConnected : sig ... end
module MaxPool1D : sig ... end
module MaxPool2D : sig ... end
module AvgPool1D : sig ... end
module AvgPool2D : sig ... end
module GlobalMaxPool1D : sig ... end
module GlobalMaxPool2D : sig ... end
module GlobalAvgPool1D : sig ... end
module GlobalAvgPool2D : sig ... end
module UpSampling1D : sig ... end
module UpSampling2D : sig ... end
module UpSampling3D : sig ... end
module Padding1D : sig ... end
module Padding2D : sig ... end
module Padding3D : sig ... end
module Lambda : sig ... end
module LambdaArray : sig ... end
module Dropout : sig ... end
module Reshape : sig ... end
module Flatten : sig ... end
module Slice : sig ... end
module Add : sig ... end
module Mul : sig ... end
module Dot : sig ... end
module Max : sig ... end
module Average : sig ... end
module Concatenate : sig ... end
module Normalisation : sig ... end
module GaussianNoise : sig ... end
module GaussianDropout : sig ... end
module AlphaDropout : sig ... end
module Embedding : sig ... end
module Masking : sig ... end
type neuron = - Owl_neural_generic.Make_Embedded(Owl_algodiff_primal_ops.S).Neuron.neuron = -
  1. | Input of Input.neuron_typ
  2. | Linear of Linear.neuron_typ
  3. | LinearNoBias of LinearNoBias.neuron_typ
  4. | Embedding of Embedding.neuron_typ
  5. | LSTM of LSTM.neuron_typ
  6. | GRU of GRU.neuron_typ
  7. | Recurrent of Recurrent.neuron_typ
  8. | Conv1D of Conv1D.neuron_typ
  9. | Conv2D of Conv2D.neuron_typ
  10. | Conv3D of Conv3D.neuron_typ
  11. | DilatedConv1D of DilatedConv1D.neuron_typ
  12. | DilatedConv2D of DilatedConv2D.neuron_typ
  13. | DilatedConv3D of DilatedConv3D.neuron_typ
  14. | TransposeConv1D of TransposeConv1D.neuron_typ
  15. | TransposeConv2D of TransposeConv2D.neuron_typ
  16. | TransposeConv3D of TransposeConv3D.neuron_typ
  17. | FullyConnected of FullyConnected.neuron_typ
  18. | MaxPool1D of MaxPool1D.neuron_typ
  19. | MaxPool2D of MaxPool2D.neuron_typ
  20. | AvgPool1D of AvgPool1D.neuron_typ
  21. | AvgPool2D of AvgPool2D.neuron_typ
  22. | GlobalMaxPool1D of GlobalMaxPool1D.neuron_typ
  23. | GlobalMaxPool2D of GlobalMaxPool2D.neuron_typ
  24. | GlobalAvgPool1D of GlobalAvgPool1D.neuron_typ
  25. | GlobalAvgPool2D of GlobalAvgPool2D.neuron_typ
  26. | UpSampling2D of UpSampling2D.neuron_typ
  27. | Padding2D of Padding2D.neuron_typ
  28. | Dropout of Dropout.neuron_typ
  29. | Reshape of Reshape.neuron_typ
  30. | Flatten of Flatten.neuron_typ
  31. | Slice of Slice.neuron_typ
  32. | Lambda of Lambda.neuron_typ
  33. | LambdaArray of LambdaArray.neuron_typ
  34. | Activation of Activation.neuron_typ
  35. | GaussianNoise of GaussianNoise.neuron_typ
  36. | GaussianDropout of GaussianDropout.neuron_typ
  37. | AlphaDropout of AlphaDropout.neuron_typ
  38. | Normalisation of Normalisation.neuron_typ
  39. | Add of Add.neuron_typ
  40. | Mul of Mul.neuron_typ
  41. | Dot of Dot.neuron_typ
  42. | Max of Max.neuron_typ
  43. | Average of Average.neuron_typ
  44. | Concatenate of Concatenate.neuron_typ
val get_in_out_shape : neuron -> int array * int array
val get_in_shape : neuron -> int array
val get_out_shape : neuron -> int array
val connect : int array array -> neuron -> unit
val init : neuron -> unit
val reset : neuron -> unit
val mktag : int -> neuron -> unit
val mkpar : neuron -> Optimise.Algodiff.t array
val mkpri : neuron -> Optimise.Algodiff.t array
val mkadj : neuron -> Optimise.Algodiff.t array
val update : neuron -> Optimise.Algodiff.t array -> unit
val load_weights : neuron -> Optimise.Algodiff.t array -> unit
val save_weights : neuron -> Optimise.Algodiff.t array
val copy : neuron -> neuron
val to_string : neuron -> string
val to_name : neuron -> string
\ No newline at end of file diff --git a/owl/Owl_neural/S/Graph/index.html b/owl/Owl_neural/S/Graph/index.html deleted file mode 100644 index 1139fce8f..000000000 --- a/owl/Owl_neural/S/Graph/index.html +++ /dev/null @@ -1,245 +0,0 @@ - -Graph (owl.Owl_neural.S.Graph)

Module S.Graph

module Neuron : sig ... end
type node = Owl_neural_generic.Make_Embedded(Owl_algodiff_primal_ops.S).node = {
  1. mutable name : string;
  2. mutable prev : node array;
  3. mutable next : node array;
  4. mutable neuron : Neuron.neuron;
  5. mutable output : Neuron.Optimise.Algodiff.t option;
  6. mutable network : network;
  7. mutable train : bool;
}
and network = - Owl_neural_generic.Make_Embedded(Owl_algodiff_primal_ops.S).network = - {
  1. mutable nnid : string;
  2. mutable size : int;
  3. mutable roots : node array;
  4. mutable outputs : node array;
  5. mutable topo : node array;
}
val make_network : ?nnid:string -> int -> node array -> node array -> network
val make_node : - ?name:string -> - ?train:bool -> - node array -> - node array -> - Neuron.neuron -> - Neuron.Optimise.Algodiff.t option -> - network -> - node
val get_roots : network -> node array
val get_outputs : network -> node array
val get_node : network -> string -> node
val get_network : ?name:string -> node -> network
val outputs : ?name:string -> node array -> network
val get_network_name : network -> string
val set_network_name : network -> string -> unit
val collect_output : node array -> Neuron.Optimise.Algodiff.t array
val connect_pair : node -> node -> unit
val connect_to_parents : node array -> node -> unit
val add_node : - ?act_typ:Neuron.Activation.typ -> - network -> - node array -> - node -> - node
val input_shape : network -> int array
val input_shapes : network -> int array array
val init : network -> unit
val reset : network -> unit
val mktag : int -> network -> unit
val mkpar : network -> Neuron.Optimise.Algodiff.t array array
val mkpri : network -> Neuron.Optimise.Algodiff.t array array
val mkadj : network -> Neuron.Optimise.Algodiff.t array array
val update : network -> Neuron.Optimise.Algodiff.t array array -> unit
val run_inputs : - Neuron.Optimise.Algodiff.t array -> - network -> - Neuron.Optimise.Algodiff.t array
val forward_inputs : - network -> - Neuron.Optimise.Algodiff.t array -> - Neuron.Optimise.Algodiff.t array * Neuron.Optimise.Algodiff.t array array
val backward : - network -> - Neuron.Optimise.Algodiff.t -> - Neuron.Optimise.Algodiff.t array array - * Neuron.Optimise.Algodiff.t array array
val copy : network -> network
val model_inputs : - network -> - Neuron.Optimise.Algodiff.A.arr array -> - Neuron.Optimise.Algodiff.A.arr array
val input : ?name:string -> int array -> node
val inputs : ?names:string array -> int array array -> node array
val activation : ?name:string -> Neuron.Activation.typ -> node -> node
val linear : - ?name:string -> - ?init_typ:Neuron.Init.typ -> - ?act_typ:Neuron.Activation.typ -> - int -> - node -> - node
val linear_nobias : - ?name:string -> - ?init_typ:Neuron.Init.typ -> - ?act_typ:Neuron.Activation.typ -> - int -> - node -> - node
val embedding : - ?name:string -> - ?init_typ:Neuron.Init.typ -> - ?act_typ:Neuron.Activation.typ -> - int -> - int -> - node -> - node
val recurrent : - ?name:string -> - ?init_typ:Neuron.Init.typ -> - act_typ:Neuron.Activation.typ -> - int -> - int -> - node -> - node
val lstm : ?name:string -> ?init_typ:Neuron.Init.typ -> int -> node -> node
val gru : ?name:string -> ?init_typ:Neuron.Init.typ -> int -> node -> node
val conv1d : - ?name:string -> - ?padding:Owl_types.padding -> - ?init_typ:Neuron.Init.typ -> - ?act_typ:Neuron.Activation.typ -> - int array -> - int array -> - node -> - node
val conv2d : - ?name:string -> - ?padding:Owl_types.padding -> - ?init_typ:Neuron.Init.typ -> - ?act_typ:Neuron.Activation.typ -> - int array -> - int array -> - node -> - node
val conv3d : - ?name:string -> - ?padding:Owl_types.padding -> - ?init_typ:Neuron.Init.typ -> - ?act_typ:Neuron.Activation.typ -> - int array -> - int array -> - node -> - node
val dilated_conv1d : - ?name:string -> - ?padding:Owl_types.padding -> - ?init_typ:Neuron.Init.typ -> - ?act_typ:Neuron.Activation.typ -> - int array -> - int array -> - int array -> - node -> - node
val dilated_conv2d : - ?name:string -> - ?padding:Owl_types.padding -> - ?init_typ:Neuron.Init.typ -> - ?act_typ:Neuron.Activation.typ -> - int array -> - int array -> - int array -> - node -> - node
val dilated_conv3d : - ?name:string -> - ?padding:Owl_types.padding -> - ?init_typ:Neuron.Init.typ -> - ?act_typ:Neuron.Activation.typ -> - int array -> - int array -> - int array -> - node -> - node
val transpose_conv1d : - ?name:string -> - ?padding:Owl_types.padding -> - ?init_typ:Neuron.Init.typ -> - ?act_typ:Neuron.Activation.typ -> - int array -> - int array -> - node -> - node
val transpose_conv2d : - ?name:string -> - ?padding:Owl_types.padding -> - ?init_typ:Neuron.Init.typ -> - ?act_typ:Neuron.Activation.typ -> - int array -> - int array -> - node -> - node
val transpose_conv3d : - ?name:string -> - ?padding:Owl_types.padding -> - ?init_typ:Neuron.Init.typ -> - ?act_typ:Neuron.Activation.typ -> - int array -> - int array -> - node -> - node
val fully_connected : - ?name:string -> - ?init_typ:Neuron.Init.typ -> - ?act_typ:Neuron.Activation.typ -> - int -> - node -> - node
val max_pool1d : - ?name:string -> - ?padding:Owl_types.padding -> - ?act_typ:Neuron.Activation.typ -> - int array -> - int array -> - node -> - node
val max_pool2d : - ?name:string -> - ?padding:Owl_types.padding -> - ?act_typ:Neuron.Activation.typ -> - int array -> - int array -> - node -> - node
val avg_pool1d : - ?name:string -> - ?padding:Owl_types.padding -> - ?act_typ:Neuron.Activation.typ -> - int array -> - int array -> - node -> - node
val avg_pool2d : - ?name:string -> - ?padding:Owl_types.padding -> - ?act_typ:Neuron.Activation.typ -> - int array -> - int array -> - node -> - node
val global_max_pool1d : - ?name:string -> - ?act_typ:Neuron.Activation.typ -> - node -> - node
val global_max_pool2d : - ?name:string -> - ?act_typ:Neuron.Activation.typ -> - node -> - node
val global_avg_pool1d : - ?name:string -> - ?act_typ:Neuron.Activation.typ -> - node -> - node
val global_avg_pool2d : - ?name:string -> - ?act_typ:Neuron.Activation.typ -> - node -> - node
val upsampling2d : - ?name:string -> - ?act_typ:Neuron.Activation.typ -> - int array -> - node -> - node
val padding2d : - ?name:string -> - ?act_typ:Neuron.Activation.typ -> - int array array -> - node -> - node
val dropout : ?name:string -> float -> node -> node
val gaussian_noise : ?name:string -> float -> node -> node
val gaussian_dropout : ?name:string -> float -> node -> node
val alpha_dropout : ?name:string -> float -> node -> node
val normalisation : - ?name:string -> - ?axis:int -> - ?training:bool -> - ?decay:float -> - ?mu:Neuron.Optimise.Algodiff.A.arr -> - ?var:Neuron.Optimise.Algodiff.A.arr -> - node -> - node
val reshape : ?name:string -> int array -> node -> node
val flatten : ?name:string -> node -> node
val slice : ?name:string -> int list list -> node -> node
val lambda : - ?name:string -> - ?act_typ:Neuron.Activation.typ -> - ?out_shape:int array -> - (Neuron.Optimise.Algodiff.t -> Neuron.Optimise.Algodiff.t) -> - node -> - node
val lambda_array : - ?name:string -> - ?act_typ:Neuron.Activation.typ -> - int array -> - (Neuron.Optimise.Algodiff.t array -> Neuron.Optimise.Algodiff.t) -> - node array -> - node
val add : ?name:string -> ?act_typ:Neuron.Activation.typ -> node array -> node
val mul : ?name:string -> ?act_typ:Neuron.Activation.typ -> node array -> node
val dot : ?name:string -> ?act_typ:Neuron.Activation.typ -> node array -> node
val max : ?name:string -> ?act_typ:Neuron.Activation.typ -> node array -> node
val average : - ?name:string -> - ?act_typ:Neuron.Activation.typ -> - node array -> - node
val concatenate : - ?name:string -> - ?act_typ:Neuron.Activation.typ -> - int -> - node array -> - node
val to_string : network -> string
val pp_network : Stdlib.Format.formatter -> network -> unit
val print : network -> unit
val save : ?unsafe:bool -> network -> string -> unit
val load : string -> network
val save_weights : network -> string -> unit
val load_weights : network -> string -> unit
val make_subnetwork : - ?copy:bool -> - ?make_inputs:string array -> - network -> - string array -> - network
\ No newline at end of file diff --git a/owl/Owl_neural/S/index.html b/owl/Owl_neural/S/index.html deleted file mode 100644 index b762cd557..000000000 --- a/owl/Owl_neural/S/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -S (owl.Owl_neural.S)

Module Owl_neural.S

include sig ... end
module Graph : sig ... end
module Optimise = Graph.Neuron.Optimise
module Init = Graph.Neuron.Init
module Activation = Graph.Neuron.Activation
module Regularisation = Graph.Neuron.Optimise.Regularisation
\ No newline at end of file diff --git a/owl/Owl_neural_parallel/.dummy b/owl/Owl_neural_parallel/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl/Owl_neural_parallel/Make/argument-1-M/index.html b/owl/Owl_neural_parallel/Make/argument-1-M/index.html deleted file mode 100644 index ce999513d..000000000 --- a/owl/Owl_neural_parallel/Make/argument-1-M/index.html +++ /dev/null @@ -1,9 +0,0 @@ - -M (owl.Owl_neural_parallel.Make.M)

Parameter Make.M

type network
val mkpar : network -> Owl_algodiff.S.t array array
val init : network -> unit
val update : network -> Owl_algodiff.S.t array array -> unit
val copy : network -> network
val train_generic : - ?state:Owl_optimise.S.Checkpoint.state -> - ?params:Owl_optimise.S.Params.typ -> - ?init_model:bool -> - network -> - Owl_algodiff.S.t -> - Owl_algodiff.S.t -> - Owl_optimise.S.Checkpoint.state
\ No newline at end of file diff --git a/owl/Owl_neural_parallel/Make/argument-2-E/index.html b/owl/Owl_neural_parallel/Make/argument-2-E/index.html deleted file mode 100644 index 0a08139c6..000000000 --- a/owl/Owl_neural_parallel/Make/argument-2-E/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -E (owl.Owl_neural_parallel.Make.E)

Parameter Make.E

type param_context
type barrier =
  1. | ASP
  2. | BSP
  3. | SSP
  4. | PSP
val get : 'a -> 'b * int
val set : 'a -> 'b -> unit
val worker_num : unit -> int
val start : ?barrier:barrier -> string -> string -> unit
val register_barrier : (param_context Stdlib.ref -> int * string list) -> unit
val register_schedule : ('a list -> ('a * ('b * 'c) list) list) -> unit
val register_pull : (('a * 'b) list -> ('a * 'c) list) -> unit
val register_push : ('a -> ('b * 'c) list -> ('b * 'c) list) -> unit
val register_stop : (param_context Stdlib.ref -> bool) -> unit
\ No newline at end of file diff --git a/owl/Owl_neural_parallel/Make/index.html b/owl/Owl_neural_parallel/Make/index.html deleted file mode 100644 index 9b0138b99..000000000 --- a/owl/Owl_neural_parallel/Make/index.html +++ /dev/null @@ -1,22 +0,0 @@ - -Make (owl.Owl_neural_parallel.Make)

Module Owl_neural_parallel.Make

Parameters

module M : ModelSig
module E : EngineSig

Signature

type task = {
  1. mutable id : int;
  2. mutable state : Owl_optimise.S.Checkpoint.state option;
  3. mutable params : Owl_optimise.S.Params.typ;
  4. mutable model : M.network;
  5. mutable data_x : Owl_algodiff.S.t;
  6. mutable data_y : Owl_algodiff.S.t;
}
val make_task : - int -> - Owl_optimise.S.Params.typ -> - M.network -> - Owl_algodiff.S.t -> - Owl_algodiff.S.t -> - task
val delta_model : M.network -> M.network -> unit
val local_model : task -> 'a
val schedule : task -> 'a list -> ('b * (int * 'c) list) list
val pull : task -> ('a * M.network) list -> ('b * 'c) list
val push : task -> 'a -> ('b * M.network) list -> ('c * M.network) list
val stop : 'a -> 'b -> bool
val train_generic : - ?params:Owl_optimise.S.Params.typ -> - M.network -> - Owl_algodiff.S.t -> - Owl_algodiff.S.t -> - string -> - string -> - unit
val train : - ?params:Owl_optimise.S.Params.typ -> - M.network -> - Owl_algodiff.S.A.arr -> - Owl_algodiff.S.A.arr -> - string -> - string -> - unit
\ No newline at end of file diff --git a/owl/Owl_neural_parallel/module-type-EngineSig/index.html b/owl/Owl_neural_parallel/module-type-EngineSig/index.html deleted file mode 100644 index 1d1f80a78..000000000 --- a/owl/Owl_neural_parallel/module-type-EngineSig/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -EngineSig (owl.Owl_neural_parallel.EngineSig)

Module type Owl_neural_parallel.EngineSig

type param_context
type barrier =
  1. | ASP
  2. | BSP
  3. | SSP
  4. | PSP
val get : 'a -> 'b * int
val set : 'a -> 'b -> unit
val worker_num : unit -> int
val start : ?barrier:barrier -> string -> string -> unit
val register_barrier : (param_context Stdlib.ref -> int * string list) -> unit
val register_schedule : ('a list -> ('a * ('b * 'c) list) list) -> unit
val register_pull : (('a * 'b) list -> ('a * 'c) list) -> unit
val register_push : ('a -> ('b * 'c) list -> ('b * 'c) list) -> unit
val register_stop : (param_context Stdlib.ref -> bool) -> unit
\ No newline at end of file diff --git a/owl/Owl_neural_parallel/module-type-ModelSig/index.html b/owl/Owl_neural_parallel/module-type-ModelSig/index.html deleted file mode 100644 index 1112a072a..000000000 --- a/owl/Owl_neural_parallel/module-type-ModelSig/index.html +++ /dev/null @@ -1,9 +0,0 @@ - -ModelSig (owl.Owl_neural_parallel.ModelSig)

Module type Owl_neural_parallel.ModelSig

type network
val mkpar : network -> Owl_algodiff.S.t array array
val init : network -> unit
val update : network -> Owl_algodiff.S.t array array -> unit
val copy : network -> network
val train_generic : - ?state:Owl_optimise.S.Checkpoint.state -> - ?params:Owl_optimise.S.Params.typ -> - ?init_model:bool -> - network -> - Owl_algodiff.S.t -> - Owl_algodiff.S.t -> - Owl_optimise.S.Checkpoint.state
\ No newline at end of file diff --git a/owl/Owl_nlp/.dummy b/owl/Owl_nlp/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl/Owl_nlp_corpus/.dummy b/owl/Owl_nlp_corpus/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl/Owl_nlp_lda/.dummy b/owl/Owl_nlp_lda/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl/Owl_nlp_similarity/.dummy b/owl/Owl_nlp_similarity/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl/Owl_nlp_tfidf/.dummy b/owl/Owl_nlp_tfidf/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl/Owl_nlp_utils/.dummy b/owl/Owl_nlp_utils/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl/Owl_nlp_vocabulary/.dummy b/owl/Owl_nlp_vocabulary/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl/Owl_optimise/.dummy b/owl/Owl_optimise/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl/Owl_optimise/D/Algodiff/A/Linalg/index.html b/owl/Owl_optimise/D/Algodiff/A/Linalg/index.html deleted file mode 100644 index 2dad5e619..000000000 --- a/owl/Owl_optimise/D/Algodiff/A/Linalg/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -Linalg (owl.Owl_optimise.D.Algodiff.A.Linalg)

Module A.Linalg

val inv : arr -> arr
val logdet : arr -> elt
val chol : ?upper:bool -> arr -> arr
val svd : ?thin:bool -> arr -> arr * arr * arr
val qr : arr -> arr * arr
val lq : arr -> arr * arr
val sylvester : arr -> arr -> arr -> arr
val lyapunov : arr -> arr -> arr
val discrete_lyapunov : - ?solver:[ `bilinear | `default | `direct ] -> - arr -> - arr -> - arr
val linsolve : ?trans:bool -> ?typ:[ `l | `n | `u ] -> arr -> arr -> arr
val care : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
val dare : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
\ No newline at end of file diff --git a/owl/Owl_optimise/D/Algodiff/A/Mat/index.html b/owl/Owl_optimise/D/Algodiff/A/Mat/index.html deleted file mode 100644 index 068a6d39e..000000000 --- a/owl/Owl_optimise/D/Algodiff/A/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl.Owl_optimise.D.Algodiff.A.Mat)

Module A.Mat

val diagm : ?k:int -> arr -> arr
val triu : ?k:int -> arr -> arr
val tril : ?k:int -> arr -> arr
val eye : int -> arr
\ No newline at end of file diff --git a/owl/Owl_optimise/D/Algodiff/A/Scalar/index.html b/owl/Owl_optimise/D/Algodiff/A/Scalar/index.html deleted file mode 100644 index 21bc033b8..000000000 --- a/owl/Owl_optimise/D/Algodiff/A/Scalar/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Scalar (owl.Owl_optimise.D.Algodiff.A.Scalar)

Module A.Scalar

val add : elt -> elt -> elt
val sub : elt -> elt -> elt
val mul : elt -> elt -> elt
val div : elt -> elt -> elt
val pow : elt -> elt -> elt
val atan2 : elt -> elt -> elt
val abs : elt -> elt
val neg : elt -> elt
val sqr : elt -> elt
val sqrt : elt -> elt
val exp : elt -> elt
val log : elt -> elt
val log2 : elt -> elt
val log10 : elt -> elt
val signum : elt -> elt
val floor : elt -> elt
val ceil : elt -> elt
val round : elt -> elt
val sin : elt -> elt
val cos : elt -> elt
val tan : elt -> elt
val sinh : elt -> elt
val cosh : elt -> elt
val tanh : elt -> elt
val asin : elt -> elt
val acos : elt -> elt
val atan : elt -> elt
val asinh : elt -> elt
val acosh : elt -> elt
val atanh : elt -> elt
val relu : elt -> elt
val dawsn : elt -> elt
val sigmoid : elt -> elt
\ No newline at end of file diff --git a/owl/Owl_optimise/D/Algodiff/A/index.html b/owl/Owl_optimise/D/Algodiff/A/index.html deleted file mode 100644 index b6efd7ebf..000000000 --- a/owl/Owl_optimise/D/Algodiff/A/index.html +++ /dev/null @@ -1,158 +0,0 @@ - -A (owl.Owl_optimise.D.Algodiff.A)

Module Algodiff.A

val empty : int array -> arr
val zeros : int array -> arr
val ones : int array -> arr
val create : int array -> elt -> arr
val sequential : ?a:elt -> ?step:elt -> int array -> arr
val uniform : ?a:elt -> ?b:elt -> int array -> arr
val gaussian : ?mu:elt -> ?sigma:elt -> int array -> arr
val bernoulli : ?p:elt -> int array -> arr
val init : int array -> (int -> elt) -> arr
val init_nd : int array -> (int array -> elt) -> arr
val shape : arr -> int array
val numel : arr -> int
val get : arr -> int array -> elt
val set : arr -> int array -> elt -> unit
val get_slice : int list list -> arr -> arr
val set_slice : int list list -> arr -> arr -> unit
val get_fancy : Owl_types_common.index list -> arr -> arr
val set_fancy : Owl_types_common.index list -> arr -> arr -> unit
val copy : arr -> arr
val copy_ : out:arr -> arr -> unit
val reset : arr -> unit
val reshape : arr -> int array -> arr
val reverse : arr -> arr
val tile : arr -> int array -> arr
val repeat : arr -> int array -> arr
val concatenate : ?axis:int -> arr array -> arr
val stack : ?axis:int -> arr array -> arr
val split : ?axis:int -> int array -> arr -> arr array
val expand : ?hi:bool -> arr -> int -> arr
val squeeze : ?axis:int array -> arr -> arr
val draw : ?axis:int -> arr -> int -> arr * int array
val map : (elt -> elt) -> arr -> arr
val fold : ?axis:int -> (elt -> elt -> elt) -> elt -> arr -> arr
val scan : ?axis:int -> (elt -> elt -> elt) -> arr -> arr
val one_hot : int -> arr -> arr
val pad : ?v:elt -> int list list -> arr -> arr
val print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(elt -> string) -> - arr -> - unit
val abs : arr -> arr
val neg : arr -> arr
val floor : arr -> arr
val ceil : arr -> arr
val round : arr -> arr
val sqr : arr -> arr
val sqrt : arr -> arr
val log : arr -> arr
val log2 : arr -> arr
val log10 : arr -> arr
val exp : arr -> arr
val sin : arr -> arr
val cos : arr -> arr
val tan : arr -> arr
val sinh : arr -> arr
val cosh : arr -> arr
val tanh : arr -> arr
val asin : arr -> arr
val acos : arr -> arr
val atan : arr -> arr
val asinh : arr -> arr
val acosh : arr -> arr
val atanh : arr -> arr
val min : ?axis:int -> ?keep_dims:bool -> arr -> arr
val max : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum_reduce : ?axis:int array -> arr -> arr
val signum : arr -> arr
val sigmoid : arr -> arr
val relu : arr -> arr
val dawsn : arr -> arr
val min' : arr -> elt
val max' : arr -> elt
val sum' : arr -> elt
val log_sum_exp' : arr -> elt
val log_sum_exp : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm' : arr -> elt
val l2norm' : arr -> elt
val l2norm_sqr' : arr -> elt
val clip_by_value : ?amin:elt -> ?amax:elt -> arr -> arr
val clip_by_l2norm : elt -> arr -> arr
val pow : arr -> arr -> arr
val scalar_pow : elt -> arr -> arr
val pow_scalar : arr -> elt -> arr
val atan2 : arr -> arr -> arr
val scalar_atan2 : elt -> arr -> arr
val atan2_scalar : arr -> elt -> arr
val add : arr -> arr -> arr
val sub : arr -> arr -> arr
val mul : arr -> arr -> arr
val div : arr -> arr -> arr
val add_scalar : arr -> elt -> arr
val sub_scalar : arr -> elt -> arr
val mul_scalar : arr -> elt -> arr
val div_scalar : arr -> elt -> arr
val scalar_add : elt -> arr -> arr
val scalar_sub : elt -> arr -> arr
val scalar_mul : elt -> arr -> arr
val scalar_div : elt -> arr -> arr
val fma : arr -> arr -> arr -> arr
val conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val dilated_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val transpose_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val max_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val upsampling2d : arr -> int array -> arr
val conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val dilated_conv1d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv1d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val transpose_conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val max_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val upsampling2d_backward : arr -> int array -> arr -> arr
val row_num : arr -> int
val col_num : arr -> int
val row : arr -> int -> arr
val rows : arr -> int array -> arr
val copy_row_to : arr -> arr -> int -> unit
val copy_col_to : arr -> arr -> int -> unit
val dot : arr -> arr -> arr
val diag : ?k:int -> arr -> arr
val trace : arr -> elt
val transpose : ?axis:int array -> arr -> arr
val to_rows : arr -> arr array
val of_rows : arr array -> arr
val to_cols : arr -> arr array
val of_cols : arr array -> arr
val of_array : elt array -> int array -> arr
val of_arrays : elt array array -> arr
val float_to_elt : float -> elt
val elt_to_float : elt -> float
val elt_equal : arr -> arr -> arr
val elt_not_equal : arr -> arr -> arr
val elt_less : arr -> arr -> arr
val elt_greater : arr -> arr -> arr
val elt_less_equal : arr -> arr -> arr
val elt_greater_equal : arr -> arr -> arr
val elt_equal_scalar : arr -> elt -> arr
val elt_not_equal_scalar : arr -> elt -> arr
val elt_less_scalar : arr -> elt -> arr
val elt_greater_scalar : arr -> elt -> arr
val elt_less_equal_scalar : arr -> elt -> arr
val elt_greater_equal_scalar : arr -> elt -> arr
module Scalar : sig ... end
module Mat : sig ... end
module Linalg : sig ... end
\ No newline at end of file diff --git a/owl/Owl_optimise/D/Algodiff/Arr/index.html b/owl/Owl_optimise/D/Algodiff/Arr/index.html deleted file mode 100644 index 8acacd133..000000000 --- a/owl/Owl_optimise/D/Algodiff/Arr/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Arr (owl.Owl_optimise.D.Algodiff.Arr)

Module Algodiff.Arr

val empty : int array -> t
val zeros : int array -> t
val ones : int array -> t
val uniform : ?a:A.elt -> ?b:A.elt -> int array -> t
val gaussian : ?mu:A.elt -> ?sigma:A.elt -> int array -> t
val shape : t -> int array
val numel : t -> int
val reset : t -> unit
val reshape : t -> int array -> t
val add : t -> t -> t
val sub : t -> t -> t
val mul : t -> t -> t
val div : t -> t -> t
val dot : t -> t -> t
\ No newline at end of file diff --git a/owl/Owl_optimise/D/Algodiff/Builder/index.html b/owl/Owl_optimise/D/Algodiff/Builder/index.html deleted file mode 100644 index bbf9c2870..000000000 --- a/owl/Owl_optimise/D/Algodiff/Builder/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Builder (owl.Owl_optimise.D.Algodiff.Builder)

Module Algodiff.Builder

module type Siso = sig ... end
val build_siso : (module Siso) -> t -> t
module type Sipo = sig ... end
val build_sipo : (module Sipo) -> t -> t * t
module type Sito = sig ... end
val build_sito : (module Sito) -> t -> t * t * t
module type Siao = sig ... end
val build_siao : (module Siao) -> t -> t array
module type Piso = sig ... end
val build_piso : (module Piso) -> t -> t -> t
module type Aiso = sig ... end
val build_aiso : (module Aiso) -> t array -> t
\ No newline at end of file diff --git a/owl/Owl_optimise/D/Algodiff/Builder/module-type-Aiso/index.html b/owl/Owl_optimise/D/Algodiff/Builder/module-type-Aiso/index.html deleted file mode 100644 index 23259726e..000000000 --- a/owl/Owl_optimise/D/Algodiff/Builder/module-type-Aiso/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Aiso (owl.Owl_optimise.D.Algodiff.Builder.Aiso)

Module type Builder.Aiso

val label : string
val ff : t array -> t
val df : int list -> t -> t array -> t array -> t
val dr : int list -> t array -> t -> t Stdlib.ref -> t list
\ No newline at end of file diff --git a/owl/Owl_optimise/D/Algodiff/Builder/module-type-Piso/index.html b/owl/Owl_optimise/D/Algodiff/Builder/module-type-Piso/index.html deleted file mode 100644 index cfad91556..000000000 --- a/owl/Owl_optimise/D/Algodiff/Builder/module-type-Piso/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Piso (owl.Owl_optimise.D.Algodiff.Builder.Piso)

Module type Builder.Piso

val label : string
val ff_aa : A.elt -> A.elt -> t
val ff_ab : A.elt -> A.arr -> t
val ff_ba : A.arr -> A.elt -> t
val ff_bb : A.arr -> A.arr -> t
val df_da : t -> t -> t -> t -> t
val df_db : t -> t -> t -> t -> t
val df_dab : t -> t -> t -> t -> t -> t
val dr_ab : t -> t -> t -> t Stdlib.ref -> t * t
val dr_a : t -> t -> t -> t Stdlib.ref -> t
val dr_b : t -> t -> t -> t Stdlib.ref -> t
\ No newline at end of file diff --git a/owl/Owl_optimise/D/Algodiff/Builder/module-type-Siao/index.html b/owl/Owl_optimise/D/Algodiff/Builder/module-type-Siao/index.html deleted file mode 100644 index 20468fc53..000000000 --- a/owl/Owl_optimise/D/Algodiff/Builder/module-type-Siao/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Siao (owl.Owl_optimise.D.Algodiff.Builder.Siao)

Module type Builder.Siao

val label : string
val ff_f : A.elt -> t array
val ff_arr : A.arr -> t array
val df : t array -> t -> t -> t array
val dr : t -> t -> t Stdlib.ref array -> t Stdlib.ref array -> t
\ No newline at end of file diff --git a/owl/Owl_optimise/D/Algodiff/Builder/module-type-Sipo/index.html b/owl/Owl_optimise/D/Algodiff/Builder/module-type-Sipo/index.html deleted file mode 100644 index 25ec2fb2a..000000000 --- a/owl/Owl_optimise/D/Algodiff/Builder/module-type-Sipo/index.html +++ /dev/null @@ -1,7 +0,0 @@ - -Sipo (owl.Owl_optimise.D.Algodiff.Builder.Sipo)

Module type Builder.Sipo

val label : string
val ff_f : A.elt -> t * t
val ff_arr : A.arr -> t * t
val df : t -> t -> t -> t
val dr : - t -> - t -> - (t Stdlib.ref * t Stdlib.ref) -> - (t Stdlib.ref * t Stdlib.ref) -> - t
\ No newline at end of file diff --git a/owl/Owl_optimise/D/Algodiff/Builder/module-type-Siso/index.html b/owl/Owl_optimise/D/Algodiff/Builder/module-type-Siso/index.html deleted file mode 100644 index b6aabac29..000000000 --- a/owl/Owl_optimise/D/Algodiff/Builder/module-type-Siso/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Siso (owl.Owl_optimise.D.Algodiff.Builder.Siso)

Module type Builder.Siso

val label : string
val ff_f : A.elt -> t
val ff_arr : A.arr -> t
val df : t -> t -> t -> t
val dr : t -> t -> t Stdlib.ref -> t
\ No newline at end of file diff --git a/owl/Owl_optimise/D/Algodiff/Builder/module-type-Sito/index.html b/owl/Owl_optimise/D/Algodiff/Builder/module-type-Sito/index.html deleted file mode 100644 index 0ad14a797..000000000 --- a/owl/Owl_optimise/D/Algodiff/Builder/module-type-Sito/index.html +++ /dev/null @@ -1,7 +0,0 @@ - -Sito (owl.Owl_optimise.D.Algodiff.Builder.Sito)

Module type Builder.Sito

val label : string
val ff_f : A.elt -> t * t * t
val ff_arr : A.arr -> t * t * t
val df : t -> t -> t -> t
val dr : - t -> - t -> - (t Stdlib.ref * t Stdlib.ref * t Stdlib.ref) -> - (t Stdlib.ref * t Stdlib.ref * t Stdlib.ref) -> - t
\ No newline at end of file diff --git a/owl/Owl_optimise/D/Algodiff/Linalg/index.html b/owl/Owl_optimise/D/Algodiff/Linalg/index.html deleted file mode 100644 index 656649de2..000000000 --- a/owl/Owl_optimise/D/Algodiff/Linalg/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -Linalg (owl.Owl_optimise.D.Algodiff.Linalg)

Module Algodiff.Linalg

val inv : t -> t
val logdet : t -> t
val chol : ?upper:bool -> t -> t
val qr : t -> t * t
val lq : t -> t * t
val svd : ?thin:bool -> t -> t * t * t
val sylvester : t -> t -> t -> t
val lyapunov : t -> t -> t
val discrete_lyapunov : - ?solver:[ `bilinear | `default | `direct ] -> - t -> - t -> - t
val (/@) : t -> t -> t
val linsolve : ?trans:bool -> ?typ:[ `l | `n | `u ] -> t -> t -> t
val care : ?diag_r:bool -> t -> t -> t -> t -> t
val dare : ?diag_r:bool -> t -> t -> t -> t -> t
\ No newline at end of file diff --git a/owl/Owl_optimise/D/Algodiff/Mat/index.html b/owl/Owl_optimise/D/Algodiff/Mat/index.html deleted file mode 100644 index 114a17f66..000000000 --- a/owl/Owl_optimise/D/Algodiff/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl.Owl_optimise.D.Algodiff.Mat)

Module Algodiff.Mat

val empty : int -> int -> t
val zeros : int -> int -> t
val eye : int -> t
val ones : int -> int -> t
val uniform : ?a:A.elt -> ?b:A.elt -> int -> int -> t
val gaussian : ?mu:A.elt -> ?sigma:A.elt -> int -> int -> t
val shape : t -> int * int
val numel : t -> int
val row_num : t -> int
val col_num : t -> int
val reset : t -> unit
val reshape : int -> int -> t -> t
val get : t -> int -> int -> t
val set : t -> int -> int -> t -> t
val row : t -> int -> t
val mean : t -> t
val add : t -> t -> t
val sub : t -> t -> t
val mul : t -> t -> t
val div : t -> t -> t
val dot : t -> t -> t
val map_by_row : (t -> t) -> t -> t
val of_arrays : A.elt array array -> t
val init_2d : int -> int -> (int -> int -> t) -> t
val print : t -> unit
\ No newline at end of file diff --git a/owl/Owl_optimise/D/Algodiff/Maths/index.html b/owl/Owl_optimise/D/Algodiff/Maths/index.html deleted file mode 100644 index 862f8e05b..000000000 --- a/owl/Owl_optimise/D/Algodiff/Maths/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Maths (owl.Owl_optimise.D.Algodiff.Maths)

Module Algodiff.Maths

val (+) : t -> t -> t
val (-) : t -> t -> t
val (*) : t -> t -> t
val (/) : t -> t -> t
val (*@) : t -> t -> t
val (**) : t -> t -> t
val add : t -> t -> t
val sub : t -> t -> t
val mul : t -> t -> t
val div : t -> t -> t
val kron : t -> t -> t
val dot : t -> t -> t
val pow : t -> t -> t
val atan2 : t -> t -> t
val min2 : t -> t -> t
val max2 : t -> t -> t
val cross_entropy : t -> t -> t
val inv : t -> t
val neg : t -> t
val abs : t -> t
val signum : t -> t
val floor : t -> t
val ceil : t -> t
val round : t -> t
val sqr : t -> t
val sqrt : t -> t
val log : t -> t
val log2 : t -> t
val log10 : t -> t
val exp : t -> t
val sin : t -> t
val cos : t -> t
val tan : t -> t
val sinh : t -> t
val cosh : t -> t
val tanh : t -> t
val asin : t -> t
val acos : t -> t
val atan : t -> t
val asinh : t -> t
val acosh : t -> t
val atanh : t -> t
val sum' : t -> t
val log_sum_exp' : t -> t
val log_sum_exp : ?axis:int -> ?keep_dims:bool -> t -> t
val sum : ?axis:int -> ?keep_dims:bool -> t -> t
val sum_reduce : ?axis:int array -> t -> t
val mean : t -> t
val transpose : ?axis:int array -> t -> t
val swap : int -> int -> t -> t
val l1norm' : t -> t
val l2norm' : t -> t
val l2norm_sqr' : t -> t
val sigmoid : t -> t
val relu : t -> t
val dawsn : t -> t
val softplus : t -> t
val softsign : t -> t
val softmax : ?axis:int -> t -> t
val reshape : t -> int array -> t
val flatten : t -> t
val get_item : t -> int -> int -> t
val get_row : t -> int -> t
val concat : axis:int -> t -> t -> t
val split : axis:int -> int array -> t -> t array
val of_arrays : t array array -> t
val to_arrays : t -> t array array
val concatenate : axis:int -> t array -> t
val stack : axis:int -> t array -> t
val get_slice : int list list -> t -> t
val set_slice : int list list -> t -> t -> t
val get_fancy : Owl_types.index list -> t -> t
val set_fancy : Owl_types.index list -> t -> t -> t
val diag : ?k:int -> t -> t
val diagm : ?k:int -> t -> t
val trace : t -> t
val triu : ?k:int -> t -> t
val tril : ?k:int -> t -> t
\ No newline at end of file diff --git a/owl/Owl_optimise/D/Algodiff/NN/index.html b/owl/Owl_optimise/D/Algodiff/NN/index.html deleted file mode 100644 index 022b5b0e2..000000000 --- a/owl/Owl_optimise/D/Algodiff/NN/index.html +++ /dev/null @@ -1,20 +0,0 @@ - -NN (owl.Owl_optimise.D.Algodiff.NN)

Module Algodiff.NN

val dropout : ?rate:float -> t -> t
val conv1d : ?padding:Owl_types.padding -> t -> t -> int array -> t
val conv2d : ?padding:Owl_types.padding -> t -> t -> int array -> t
val conv3d : ?padding:Owl_types.padding -> t -> t -> int array -> t
val dilated_conv1d : - ?padding:Owl_types.padding -> - t -> - t -> - int array -> - int array -> - t
val dilated_conv2d : - ?padding:Owl_types.padding -> - t -> - t -> - int array -> - int array -> - t
val dilated_conv3d : - ?padding:Owl_types.padding -> - t -> - t -> - int array -> - int array -> - t
val transpose_conv1d : ?padding:Owl_types.padding -> t -> t -> int array -> t
val transpose_conv2d : ?padding:Owl_types.padding -> t -> t -> int array -> t
val transpose_conv3d : ?padding:Owl_types.padding -> t -> t -> int array -> t
val max_pool1d : Owl_types.padding -> t -> int array -> int array -> t
val max_pool2d : Owl_types.padding -> t -> int array -> int array -> t
val max_pool3d : Owl_types.padding -> t -> int array -> int array -> t
val avg_pool1d : Owl_types.padding -> t -> int array -> int array -> t
val avg_pool2d : Owl_types.padding -> t -> int array -> int array -> t
val avg_pool3d : Owl_types.padding -> t -> int array -> int array -> t
val upsampling2d : t -> int array -> t
val pad : ?v:A.elt -> int list list -> t -> t
\ No newline at end of file diff --git a/owl/Owl_optimise/D/Algodiff/index.html b/owl/Owl_optimise/D/Algodiff/index.html deleted file mode 100644 index b731af528..000000000 --- a/owl/Owl_optimise/D/Algodiff/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Algodiff (owl.Owl_optimise.D.Algodiff)

Module D.Algodiff

module A : sig ... end
type t = Owl_algodiff_generic.Make(Owl_algodiff_primal_ops.D).t =
  1. | F of A.elt
  2. | Arr of A.arr
  3. | DF of t * t * int
  4. | DR of t * t Stdlib.ref * op * int Stdlib.ref * int * int Stdlib.ref
and adjoint = t -> t Stdlib.ref -> (t * t) list -> (t * t) list
and register = t list -> t list
and label = string * t list
and op = adjoint * register * label
val tag : unit -> int
val primal : t -> t
val primal' : t -> t
val zero : t -> t
val reset_zero : t -> t
val tangent : t -> t
val adjref : t -> t Stdlib.ref
val adjval : t -> t
val shape : t -> int array
val is_float : t -> bool
val is_arr : t -> bool
val row_num : t -> int
val col_num : t -> int
val numel : t -> int
val clip_by_value : amin:A.elt -> amax:A.elt -> t -> t
val clip_by_l2norm : A.elt -> t -> t
val copy_primal' : t -> t
val tile : t -> int array -> t
val repeat : t -> int array -> t
val pack_elt : A.elt -> t
val unpack_elt : t -> A.elt
val pack_flt : float -> t
val _f : float -> t
val unpack_flt : t -> float
val pack_arr : A.arr -> t
val unpack_arr : t -> A.arr
val deep_info : t -> string
val type_info : t -> string
val error_binop : string -> t -> t -> 'a
val error_uniop : string -> t -> 'a
val make_forward : t -> t -> int -> t
val make_reverse : t -> int -> t
val reverse_prop : t -> t -> unit
val diff : (t -> t) -> t -> t
val diff' : (t -> t) -> t -> t * t
val grad : (t -> t) -> t -> t
val grad' : (t -> t) -> t -> t * t
val jacobian : (t -> t) -> t -> t
val jacobian' : (t -> t) -> t -> t * t
val jacobianv : (t -> t) -> t -> t -> t
val jacobianv' : (t -> t) -> t -> t -> t * t
val jacobianTv : (t -> t) -> t -> t -> t
val jacobianTv' : (t -> t) -> t -> t -> t * t
val hessian : (t -> t) -> t -> t
val hessian' : (t -> t) -> t -> t * t
val hessianv : (t -> t) -> t -> t -> t
val hessianv' : (t -> t) -> t -> t -> t * t
val laplacian : (t -> t) -> t -> t
val laplacian' : (t -> t) -> t -> t * t
val gradhessian : (t -> t) -> t -> t * t
val gradhessian' : (t -> t) -> t -> t * t * t
val gradhessianv : (t -> t) -> t -> t -> t * t
val gradhessianv' : (t -> t) -> t -> t -> t * t * t
module Builder : sig ... end
module Maths : sig ... end
module Linalg : sig ... end
module NN : sig ... end
module Mat : sig ... end
module Arr : sig ... end
val to_trace : t list -> string
val to_dot : t list -> string
val pp_num : Stdlib.Format.formatter -> t -> unit
\ No newline at end of file diff --git a/owl/Owl_optimise/D/Batch/index.html b/owl/Owl_optimise/D/Batch/index.html deleted file mode 100644 index fdb54506f..000000000 --- a/owl/Owl_optimise/D/Batch/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Batch (owl.Owl_optimise.D.Batch)

Module D.Batch

type typ = - Owl_optimise_generic.Make(Owl_algodiff_generic.Make(Owl_algodiff_primal_ops.D)).Batch.typ = -
  1. | Full
  2. | Mini of int
  3. | Sample of int
  4. | Stochastic
val run : typ -> Algodiff.t -> Algodiff.t -> int -> Algodiff.t * Algodiff.t
val batches : typ -> Algodiff.t -> int
val to_string : typ -> string
\ No newline at end of file diff --git a/owl/Owl_optimise/D/Checkpoint/index.html b/owl/Owl_optimise/D/Checkpoint/index.html deleted file mode 100644 index 4d7f9116c..000000000 --- a/owl/Owl_optimise/D/Checkpoint/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -Checkpoint (owl.Owl_optimise.D.Checkpoint)

Module D.Checkpoint

type state = - Owl_optimise_generic.Make(Owl_algodiff_generic.Make(Owl_algodiff_primal_ops.D)).Checkpoint.state = - {
  1. mutable current_batch : int;
  2. mutable batches_per_epoch : int;
  3. mutable epochs : float;
  4. mutable batches : int;
  5. mutable loss : Algodiff.t array;
  6. mutable start_at : float;
  7. mutable stop : bool;
  8. mutable gs : Algodiff.t array array;
  9. mutable ps : Algodiff.t array array;
  10. mutable us : Algodiff.t array array;
  11. mutable ch : Algodiff.t array array array;
}
type typ = - Owl_optimise_generic.Make(Owl_algodiff_generic.Make(Owl_algodiff_primal_ops.D)).Checkpoint.typ = -
  1. | Batch of int
  2. | Epoch of float
  3. | Custom of state -> unit
  4. | None
val init_state : int -> float -> state
val default_checkpoint_fun : (string -> 'a) -> 'a
val print_state_info : state -> unit
val print_summary : state -> unit
val run : typ -> (string -> unit) -> int -> Algodiff.t -> state -> unit
val to_string : typ -> string
\ No newline at end of file diff --git a/owl/Owl_optimise/D/Clipping/index.html b/owl/Owl_optimise/D/Clipping/index.html deleted file mode 100644 index 6420184e1..000000000 --- a/owl/Owl_optimise/D/Clipping/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Clipping (owl.Owl_optimise.D.Clipping)

Module D.Clipping

type typ = - Owl_optimise_generic.Make(Owl_algodiff_generic.Make(Owl_algodiff_primal_ops.D)).Clipping.typ = -
  1. | L2norm of float
  2. | Value of float * float
  3. | None
val run : typ -> Algodiff.t -> Algodiff.t
val default : typ -> typ
val to_string : typ -> string
\ No newline at end of file diff --git a/owl/Owl_optimise/D/Gradient/index.html b/owl/Owl_optimise/D/Gradient/index.html deleted file mode 100644 index 302450603..000000000 --- a/owl/Owl_optimise/D/Gradient/index.html +++ /dev/null @@ -1,11 +0,0 @@ - -Gradient (owl.Owl_optimise.D.Gradient)

Module D.Gradient

type typ = - Owl_optimise_generic.Make(Owl_algodiff_generic.Make(Owl_algodiff_primal_ops.D)).Gradient.typ = -
  1. | GD
  2. | CG
  3. | CD
  4. | NonlinearCG
  5. | DaiYuanCG
  6. | NewtonCG
  7. | Newton
val run : - typ -> - (Algodiff.t -> Algodiff.t) -> - Algodiff.t -> - Algodiff.t -> - Algodiff.t -> - Algodiff.t -> - Algodiff.t
val to_string : typ -> string
\ No newline at end of file diff --git a/owl/Owl_optimise/D/Learning_Rate/index.html b/owl/Owl_optimise/D/Learning_Rate/index.html deleted file mode 100644 index 985aee32e..000000000 --- a/owl/Owl_optimise/D/Learning_Rate/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Learning_Rate (owl.Owl_optimise.D.Learning_Rate)

Module D.Learning_Rate

type typ = - Owl_optimise_generic.Make(Owl_algodiff_generic.Make(Owl_algodiff_primal_ops.D)).Learning_Rate.typ = -
  1. | Adagrad of float
  2. | Const of float
  3. | Decay of float * float
  4. | Exp_decay of float * float
  5. | RMSprop of float * float
  6. | Adam of float * float * float
  7. | Schedule of float array
val run : typ -> int -> Algodiff.t -> Algodiff.t array -> Algodiff.t
val default : typ -> typ
val update_ch : typ -> Algodiff.t -> Algodiff.t array -> Algodiff.t array
val to_string : typ -> string
\ No newline at end of file diff --git a/owl/Owl_optimise/D/Loss/index.html b/owl/Owl_optimise/D/Loss/index.html deleted file mode 100644 index 3b6571ad9..000000000 --- a/owl/Owl_optimise/D/Loss/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Loss (owl.Owl_optimise.D.Loss)

Module D.Loss

type typ = - Owl_optimise_generic.Make(Owl_algodiff_generic.Make(Owl_algodiff_primal_ops.D)).Loss.typ = -
  1. | Hinge
  2. | L1norm
  3. | L2norm
  4. | Quadratic
  5. | Cross_entropy
  6. | Custom of Algodiff.t -> Algodiff.t -> Algodiff.t
val run : typ -> Algodiff.t -> Algodiff.t -> Algodiff.t
val to_string : typ -> string
\ No newline at end of file diff --git a/owl/Owl_optimise/D/Momentum/index.html b/owl/Owl_optimise/D/Momentum/index.html deleted file mode 100644 index dd204cb76..000000000 --- a/owl/Owl_optimise/D/Momentum/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Momentum (owl.Owl_optimise.D.Momentum)

Module D.Momentum

type typ = - Owl_optimise_generic.Make(Owl_algodiff_generic.Make(Owl_algodiff_primal_ops.D)).Momentum.typ = -
  1. | Standard of float
  2. | Nesterov of float
  3. | None
val run : typ -> Algodiff.t -> Algodiff.t -> Algodiff.t
val default : typ -> typ
val to_string : typ -> string
\ No newline at end of file diff --git a/owl/Owl_optimise/D/Params/index.html b/owl/Owl_optimise/D/Params/index.html deleted file mode 100644 index bc2d7797c..000000000 --- a/owl/Owl_optimise/D/Params/index.html +++ /dev/null @@ -1,16 +0,0 @@ - -Params (owl.Owl_optimise.D.Params)

Module D.Params

type typ = - Owl_optimise_generic.Make(Owl_algodiff_generic.Make(Owl_algodiff_primal_ops.D)).Params.typ = - {
  1. mutable epochs : float;
  2. mutable batch : Batch.typ;
  3. mutable gradient : Gradient.typ;
  4. mutable loss : Loss.typ;
  5. mutable learning_rate : Learning_Rate.typ;
  6. mutable regularisation : Regularisation.typ;
  7. mutable momentum : Momentum.typ;
  8. mutable clipping : Clipping.typ;
  9. mutable stopping : Stopping.typ;
  10. mutable checkpoint : Checkpoint.typ;
  11. mutable verbosity : bool;
}
val default : unit -> typ
val config : - ?batch:Batch.typ -> - ?gradient:Gradient.typ -> - ?loss:Loss.typ -> - ?learning_rate:Learning_Rate.typ -> - ?regularisation:Regularisation.typ -> - ?momentum:Momentum.typ -> - ?clipping:Clipping.typ -> - ?stopping:Stopping.typ -> - ?checkpoint:Checkpoint.typ -> - ?verbosity:bool -> - float -> - typ
val to_string : typ -> string
\ No newline at end of file diff --git a/owl/Owl_optimise/D/Regularisation/index.html b/owl/Owl_optimise/D/Regularisation/index.html deleted file mode 100644 index 4b323d014..000000000 --- a/owl/Owl_optimise/D/Regularisation/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Regularisation (owl.Owl_optimise.D.Regularisation)

Module D.Regularisation

type typ = - Owl_optimise_generic.Make(Owl_algodiff_generic.Make(Owl_algodiff_primal_ops.D)).Regularisation.typ = -
  1. | L1norm of float
  2. | L2norm of float
  3. | Elastic_net of float * float
  4. | None
val run : typ -> Algodiff.t -> Algodiff.t
val to_string : typ -> string
\ No newline at end of file diff --git a/owl/Owl_optimise/D/Stopping/index.html b/owl/Owl_optimise/D/Stopping/index.html deleted file mode 100644 index b0a68cc5e..000000000 --- a/owl/Owl_optimise/D/Stopping/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Stopping (owl.Owl_optimise.D.Stopping)

Module D.Stopping

val run : typ -> float -> bool
val default : typ -> typ
val to_string : typ -> string
\ No newline at end of file diff --git a/owl/Owl_optimise/D/Utils/index.html b/owl/Owl_optimise/D/Utils/index.html deleted file mode 100644 index 243548866..000000000 --- a/owl/Owl_optimise/D/Utils/index.html +++ /dev/null @@ -1,7 +0,0 @@ - -Utils (owl.Owl_optimise.D.Utils)

Module D.Utils

val sample_num : Algodiff.t -> int
val draw_samples : Algodiff.t -> Algodiff.t -> int -> Algodiff.t * Algodiff.t
val get_chunk : - Algodiff.t -> - Algodiff.t -> - int -> - int -> - Algodiff.t * Algodiff.t
\ No newline at end of file diff --git a/owl/Owl_optimise/D/index.html b/owl/Owl_optimise/D/index.html deleted file mode 100644 index 49e02a2bb..000000000 --- a/owl/Owl_optimise/D/index.html +++ /dev/null @@ -1,31 +0,0 @@ - -D (owl.Owl_optimise.D)

Module Owl_optimise.D

module Algodiff : sig ... end
module Utils : sig ... end
module Learning_Rate : sig ... end
module Batch : sig ... end
module Loss : sig ... end
module Gradient : sig ... end
module Momentum : sig ... end
module Regularisation : sig ... end
module Clipping : sig ... end
module Stopping : sig ... end
module Checkpoint : sig ... end
module Params : sig ... end
val minimise_weight : - ?state:Checkpoint.state -> - Params.typ -> - (Algodiff.t -> Algodiff.t -> Algodiff.t) -> - Algodiff.t -> - Algodiff.t -> - Algodiff.t -> - Checkpoint.state * Algodiff.t
val minimise_network : - ?state:Checkpoint.state -> - Params.typ -> - (Algodiff.t -> Algodiff.t * Algodiff.t array array) -> - (Algodiff.t -> Algodiff.t array array * Algodiff.t array array) -> - (Algodiff.t array array -> unit) -> - (string -> unit) -> - Algodiff.t -> - Algodiff.t -> - Checkpoint.state
val minimise_fun : - ?state:Checkpoint.state -> - Params.typ -> - (Algodiff.t -> Algodiff.t) -> - Algodiff.t -> - Checkpoint.state * Algodiff.t
val minimise_compiled_network : - ?state:Checkpoint.state -> - Params.typ -> - (Algodiff.t -> Algodiff.t -> Algodiff.t) -> - (unit -> unit) -> - (string -> unit) -> - Algodiff.t -> - Algodiff.t -> - Checkpoint.state
\ No newline at end of file diff --git a/owl/Owl_optimise/Make_Embedded/Algodiff/A/Linalg/index.html b/owl/Owl_optimise/Make_Embedded/Algodiff/A/Linalg/index.html deleted file mode 100644 index e801acb75..000000000 --- a/owl/Owl_optimise/Make_Embedded/Algodiff/A/Linalg/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -Linalg (owl.Owl_optimise.Make_Embedded.Algodiff.A.Linalg)

Module A.Linalg

val inv : arr -> arr
val logdet : arr -> elt
val chol : ?upper:bool -> arr -> arr
val svd : ?thin:bool -> arr -> arr * arr * arr
val qr : arr -> arr * arr
val lq : arr -> arr * arr
val sylvester : arr -> arr -> arr -> arr
val lyapunov : arr -> arr -> arr
val discrete_lyapunov : - ?solver:[ `bilinear | `default | `direct ] -> - arr -> - arr -> - arr
val linsolve : ?trans:bool -> ?typ:[ `l | `n | `u ] -> arr -> arr -> arr
val care : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
val dare : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
\ No newline at end of file diff --git a/owl/Owl_optimise/Make_Embedded/Algodiff/A/Mat/index.html b/owl/Owl_optimise/Make_Embedded/Algodiff/A/Mat/index.html deleted file mode 100644 index 2b631ba02..000000000 --- a/owl/Owl_optimise/Make_Embedded/Algodiff/A/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl.Owl_optimise.Make_Embedded.Algodiff.A.Mat)

Module A.Mat

val diagm : ?k:int -> arr -> arr
val triu : ?k:int -> arr -> arr
val tril : ?k:int -> arr -> arr
val eye : int -> arr
\ No newline at end of file diff --git a/owl/Owl_optimise/Make_Embedded/Algodiff/A/Scalar/index.html b/owl/Owl_optimise/Make_Embedded/Algodiff/A/Scalar/index.html deleted file mode 100644 index 91f7c79fa..000000000 --- a/owl/Owl_optimise/Make_Embedded/Algodiff/A/Scalar/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Scalar (owl.Owl_optimise.Make_Embedded.Algodiff.A.Scalar)

Module A.Scalar

val add : elt -> elt -> elt
val sub : elt -> elt -> elt
val mul : elt -> elt -> elt
val div : elt -> elt -> elt
val pow : elt -> elt -> elt
val atan2 : elt -> elt -> elt
val abs : elt -> elt
val neg : elt -> elt
val sqr : elt -> elt
val sqrt : elt -> elt
val exp : elt -> elt
val log : elt -> elt
val log2 : elt -> elt
val log10 : elt -> elt
val signum : elt -> elt
val floor : elt -> elt
val ceil : elt -> elt
val round : elt -> elt
val sin : elt -> elt
val cos : elt -> elt
val tan : elt -> elt
val sinh : elt -> elt
val cosh : elt -> elt
val tanh : elt -> elt
val asin : elt -> elt
val acos : elt -> elt
val atan : elt -> elt
val asinh : elt -> elt
val acosh : elt -> elt
val atanh : elt -> elt
val relu : elt -> elt
val dawsn : elt -> elt
val sigmoid : elt -> elt
\ No newline at end of file diff --git a/owl/Owl_optimise/Make_Embedded/Algodiff/A/index.html b/owl/Owl_optimise/Make_Embedded/Algodiff/A/index.html deleted file mode 100644 index 512e9c2be..000000000 --- a/owl/Owl_optimise/Make_Embedded/Algodiff/A/index.html +++ /dev/null @@ -1,158 +0,0 @@ - -A (owl.Owl_optimise.Make_Embedded.Algodiff.A)

Module Algodiff.A

val empty : int array -> arr
val zeros : int array -> arr
val ones : int array -> arr
val create : int array -> elt -> arr
val sequential : ?a:elt -> ?step:elt -> int array -> arr
val uniform : ?a:elt -> ?b:elt -> int array -> arr
val gaussian : ?mu:elt -> ?sigma:elt -> int array -> arr
val bernoulli : ?p:elt -> int array -> arr
val init : int array -> (int -> elt) -> arr
val init_nd : int array -> (int array -> elt) -> arr
val shape : arr -> int array
val numel : arr -> int
val get : arr -> int array -> elt
val set : arr -> int array -> elt -> unit
val get_slice : int list list -> arr -> arr
val set_slice : int list list -> arr -> arr -> unit
val get_fancy : Owl_types_common.index list -> arr -> arr
val set_fancy : Owl_types_common.index list -> arr -> arr -> unit
val copy : arr -> arr
val copy_ : out:arr -> arr -> unit
val reset : arr -> unit
val reshape : arr -> int array -> arr
val reverse : arr -> arr
val tile : arr -> int array -> arr
val repeat : arr -> int array -> arr
val concatenate : ?axis:int -> arr array -> arr
val stack : ?axis:int -> arr array -> arr
val split : ?axis:int -> int array -> arr -> arr array
val expand : ?hi:bool -> arr -> int -> arr
val squeeze : ?axis:int array -> arr -> arr
val draw : ?axis:int -> arr -> int -> arr * int array
val map : (elt -> elt) -> arr -> arr
val fold : ?axis:int -> (elt -> elt -> elt) -> elt -> arr -> arr
val scan : ?axis:int -> (elt -> elt -> elt) -> arr -> arr
val one_hot : int -> arr -> arr
val pad : ?v:elt -> int list list -> arr -> arr
val print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(elt -> string) -> - arr -> - unit
val abs : arr -> arr
val neg : arr -> arr
val floor : arr -> arr
val ceil : arr -> arr
val round : arr -> arr
val sqr : arr -> arr
val sqrt : arr -> arr
val log : arr -> arr
val log2 : arr -> arr
val log10 : arr -> arr
val exp : arr -> arr
val sin : arr -> arr
val cos : arr -> arr
val tan : arr -> arr
val sinh : arr -> arr
val cosh : arr -> arr
val tanh : arr -> arr
val asin : arr -> arr
val acos : arr -> arr
val atan : arr -> arr
val asinh : arr -> arr
val acosh : arr -> arr
val atanh : arr -> arr
val min : ?axis:int -> ?keep_dims:bool -> arr -> arr
val max : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum_reduce : ?axis:int array -> arr -> arr
val signum : arr -> arr
val sigmoid : arr -> arr
val relu : arr -> arr
val dawsn : arr -> arr
val min' : arr -> elt
val max' : arr -> elt
val sum' : arr -> elt
val log_sum_exp' : arr -> elt
val log_sum_exp : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm' : arr -> elt
val l2norm' : arr -> elt
val l2norm_sqr' : arr -> elt
val clip_by_value : ?amin:elt -> ?amax:elt -> arr -> arr
val clip_by_l2norm : elt -> arr -> arr
val pow : arr -> arr -> arr
val scalar_pow : elt -> arr -> arr
val pow_scalar : arr -> elt -> arr
val atan2 : arr -> arr -> arr
val scalar_atan2 : elt -> arr -> arr
val atan2_scalar : arr -> elt -> arr
val add : arr -> arr -> arr
val sub : arr -> arr -> arr
val mul : arr -> arr -> arr
val div : arr -> arr -> arr
val add_scalar : arr -> elt -> arr
val sub_scalar : arr -> elt -> arr
val mul_scalar : arr -> elt -> arr
val div_scalar : arr -> elt -> arr
val scalar_add : elt -> arr -> arr
val scalar_sub : elt -> arr -> arr
val scalar_mul : elt -> arr -> arr
val scalar_div : elt -> arr -> arr
val fma : arr -> arr -> arr -> arr
val conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val dilated_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val transpose_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val max_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val upsampling2d : arr -> int array -> arr
val conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val dilated_conv1d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv1d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val transpose_conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val max_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val upsampling2d_backward : arr -> int array -> arr -> arr
val row_num : arr -> int
val col_num : arr -> int
val row : arr -> int -> arr
val rows : arr -> int array -> arr
val copy_row_to : arr -> arr -> int -> unit
val copy_col_to : arr -> arr -> int -> unit
val dot : arr -> arr -> arr
val diag : ?k:int -> arr -> arr
val trace : arr -> elt
val transpose : ?axis:int array -> arr -> arr
val to_rows : arr -> arr array
val of_rows : arr array -> arr
val to_cols : arr -> arr array
val of_cols : arr array -> arr
val of_array : elt array -> int array -> arr
val of_arrays : elt array array -> arr
val float_to_elt : float -> elt
val elt_to_float : elt -> float
val elt_equal : arr -> arr -> arr
val elt_not_equal : arr -> arr -> arr
val elt_less : arr -> arr -> arr
val elt_greater : arr -> arr -> arr
val elt_less_equal : arr -> arr -> arr
val elt_greater_equal : arr -> arr -> arr
val elt_equal_scalar : arr -> elt -> arr
val elt_not_equal_scalar : arr -> elt -> arr
val elt_less_scalar : arr -> elt -> arr
val elt_greater_scalar : arr -> elt -> arr
val elt_less_equal_scalar : arr -> elt -> arr
val elt_greater_equal_scalar : arr -> elt -> arr
module Scalar : sig ... end
module Mat : sig ... end
module Linalg : sig ... end
\ No newline at end of file diff --git a/owl/Owl_optimise/Make_Embedded/Algodiff/Arr/index.html b/owl/Owl_optimise/Make_Embedded/Algodiff/Arr/index.html deleted file mode 100644 index 6c637f23e..000000000 --- a/owl/Owl_optimise/Make_Embedded/Algodiff/Arr/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Arr (owl.Owl_optimise.Make_Embedded.Algodiff.Arr)

Module Algodiff.Arr

val empty : int array -> t
val zeros : int array -> t
val ones : int array -> t
val uniform : ?a:A.elt -> ?b:A.elt -> int array -> t
val gaussian : ?mu:A.elt -> ?sigma:A.elt -> int array -> t
val shape : t -> int array
val numel : t -> int
val reset : t -> unit
val reshape : t -> int array -> t
val add : t -> t -> t
val sub : t -> t -> t
val mul : t -> t -> t
val div : t -> t -> t
val dot : t -> t -> t
\ No newline at end of file diff --git a/owl/Owl_optimise/Make_Embedded/Algodiff/Builder/index.html b/owl/Owl_optimise/Make_Embedded/Algodiff/Builder/index.html deleted file mode 100644 index 4569125f2..000000000 --- a/owl/Owl_optimise/Make_Embedded/Algodiff/Builder/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Builder (owl.Owl_optimise.Make_Embedded.Algodiff.Builder)

Module Algodiff.Builder

module type Siso = sig ... end
val build_siso : (module Siso) -> t -> t
module type Sipo = sig ... end
val build_sipo : (module Sipo) -> t -> t * t
module type Sito = sig ... end
val build_sito : (module Sito) -> t -> t * t * t
module type Siao = sig ... end
val build_siao : (module Siao) -> t -> t array
module type Piso = sig ... end
val build_piso : (module Piso) -> t -> t -> t
module type Aiso = sig ... end
val build_aiso : (module Aiso) -> t array -> t
\ No newline at end of file diff --git a/owl/Owl_optimise/Make_Embedded/Algodiff/Builder/module-type-Aiso/index.html b/owl/Owl_optimise/Make_Embedded/Algodiff/Builder/module-type-Aiso/index.html deleted file mode 100644 index f9c5d80db..000000000 --- a/owl/Owl_optimise/Make_Embedded/Algodiff/Builder/module-type-Aiso/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Aiso (owl.Owl_optimise.Make_Embedded.Algodiff.Builder.Aiso)

Module type Builder.Aiso

val label : string
val ff : t array -> t
val df : int list -> t -> t array -> t array -> t
val dr : int list -> t array -> t -> t Stdlib.ref -> t list
\ No newline at end of file diff --git a/owl/Owl_optimise/Make_Embedded/Algodiff/Builder/module-type-Piso/index.html b/owl/Owl_optimise/Make_Embedded/Algodiff/Builder/module-type-Piso/index.html deleted file mode 100644 index 83753799b..000000000 --- a/owl/Owl_optimise/Make_Embedded/Algodiff/Builder/module-type-Piso/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Piso (owl.Owl_optimise.Make_Embedded.Algodiff.Builder.Piso)

Module type Builder.Piso

val label : string
val ff_aa : A.elt -> A.elt -> t
val ff_ab : A.elt -> A.arr -> t
val ff_ba : A.arr -> A.elt -> t
val ff_bb : A.arr -> A.arr -> t
val df_da : t -> t -> t -> t -> t
val df_db : t -> t -> t -> t -> t
val df_dab : t -> t -> t -> t -> t -> t
val dr_ab : t -> t -> t -> t Stdlib.ref -> t * t
val dr_a : t -> t -> t -> t Stdlib.ref -> t
val dr_b : t -> t -> t -> t Stdlib.ref -> t
\ No newline at end of file diff --git a/owl/Owl_optimise/Make_Embedded/Algodiff/Builder/module-type-Siao/index.html b/owl/Owl_optimise/Make_Embedded/Algodiff/Builder/module-type-Siao/index.html deleted file mode 100644 index 1294ab9d5..000000000 --- a/owl/Owl_optimise/Make_Embedded/Algodiff/Builder/module-type-Siao/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Siao (owl.Owl_optimise.Make_Embedded.Algodiff.Builder.Siao)

Module type Builder.Siao

val label : string
val ff_f : A.elt -> t array
val ff_arr : A.arr -> t array
val df : t array -> t -> t -> t array
val dr : t -> t -> t Stdlib.ref array -> t Stdlib.ref array -> t
\ No newline at end of file diff --git a/owl/Owl_optimise/Make_Embedded/Algodiff/Builder/module-type-Sipo/index.html b/owl/Owl_optimise/Make_Embedded/Algodiff/Builder/module-type-Sipo/index.html deleted file mode 100644 index b855fa3f7..000000000 --- a/owl/Owl_optimise/Make_Embedded/Algodiff/Builder/module-type-Sipo/index.html +++ /dev/null @@ -1,7 +0,0 @@ - -Sipo (owl.Owl_optimise.Make_Embedded.Algodiff.Builder.Sipo)

Module type Builder.Sipo

val label : string
val ff_f : A.elt -> t * t
val ff_arr : A.arr -> t * t
val df : t -> t -> t -> t
val dr : - t -> - t -> - (t Stdlib.ref * t Stdlib.ref) -> - (t Stdlib.ref * t Stdlib.ref) -> - t
\ No newline at end of file diff --git a/owl/Owl_optimise/Make_Embedded/Algodiff/Builder/module-type-Siso/index.html b/owl/Owl_optimise/Make_Embedded/Algodiff/Builder/module-type-Siso/index.html deleted file mode 100644 index b107d708d..000000000 --- a/owl/Owl_optimise/Make_Embedded/Algodiff/Builder/module-type-Siso/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Siso (owl.Owl_optimise.Make_Embedded.Algodiff.Builder.Siso)

Module type Builder.Siso

val label : string
val ff_f : A.elt -> t
val ff_arr : A.arr -> t
val df : t -> t -> t -> t
val dr : t -> t -> t Stdlib.ref -> t
\ No newline at end of file diff --git a/owl/Owl_optimise/Make_Embedded/Algodiff/Builder/module-type-Sito/index.html b/owl/Owl_optimise/Make_Embedded/Algodiff/Builder/module-type-Sito/index.html deleted file mode 100644 index a75a627c4..000000000 --- a/owl/Owl_optimise/Make_Embedded/Algodiff/Builder/module-type-Sito/index.html +++ /dev/null @@ -1,7 +0,0 @@ - -Sito (owl.Owl_optimise.Make_Embedded.Algodiff.Builder.Sito)

Module type Builder.Sito

val label : string
val ff_f : A.elt -> t * t * t
val ff_arr : A.arr -> t * t * t
val df : t -> t -> t -> t
val dr : - t -> - t -> - (t Stdlib.ref * t Stdlib.ref * t Stdlib.ref) -> - (t Stdlib.ref * t Stdlib.ref * t Stdlib.ref) -> - t
\ No newline at end of file diff --git a/owl/Owl_optimise/Make_Embedded/Algodiff/Linalg/index.html b/owl/Owl_optimise/Make_Embedded/Algodiff/Linalg/index.html deleted file mode 100644 index 06bb11a13..000000000 --- a/owl/Owl_optimise/Make_Embedded/Algodiff/Linalg/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -Linalg (owl.Owl_optimise.Make_Embedded.Algodiff.Linalg)

Module Algodiff.Linalg

val inv : t -> t
val logdet : t -> t
val chol : ?upper:bool -> t -> t
val qr : t -> t * t
val lq : t -> t * t
val svd : ?thin:bool -> t -> t * t * t
val sylvester : t -> t -> t -> t
val lyapunov : t -> t -> t
val discrete_lyapunov : - ?solver:[ `bilinear | `default | `direct ] -> - t -> - t -> - t
val (/@) : t -> t -> t
val linsolve : ?trans:bool -> ?typ:[ `l | `n | `u ] -> t -> t -> t
val care : ?diag_r:bool -> t -> t -> t -> t -> t
val dare : ?diag_r:bool -> t -> t -> t -> t -> t
\ No newline at end of file diff --git a/owl/Owl_optimise/Make_Embedded/Algodiff/Mat/index.html b/owl/Owl_optimise/Make_Embedded/Algodiff/Mat/index.html deleted file mode 100644 index cea721c5c..000000000 --- a/owl/Owl_optimise/Make_Embedded/Algodiff/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl.Owl_optimise.Make_Embedded.Algodiff.Mat)

Module Algodiff.Mat

val empty : int -> int -> t
val zeros : int -> int -> t
val eye : int -> t
val ones : int -> int -> t
val uniform : ?a:A.elt -> ?b:A.elt -> int -> int -> t
val gaussian : ?mu:A.elt -> ?sigma:A.elt -> int -> int -> t
val shape : t -> int * int
val numel : t -> int
val row_num : t -> int
val col_num : t -> int
val reset : t -> unit
val reshape : int -> int -> t -> t
val get : t -> int -> int -> t
val set : t -> int -> int -> t -> t
val row : t -> int -> t
val mean : t -> t
val add : t -> t -> t
val sub : t -> t -> t
val mul : t -> t -> t
val div : t -> t -> t
val dot : t -> t -> t
val map_by_row : (t -> t) -> t -> t
val of_arrays : A.elt array array -> t
val init_2d : int -> int -> (int -> int -> t) -> t
val print : t -> unit
\ No newline at end of file diff --git a/owl/Owl_optimise/Make_Embedded/Algodiff/Maths/index.html b/owl/Owl_optimise/Make_Embedded/Algodiff/Maths/index.html deleted file mode 100644 index 9c792fbab..000000000 --- a/owl/Owl_optimise/Make_Embedded/Algodiff/Maths/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Maths (owl.Owl_optimise.Make_Embedded.Algodiff.Maths)

Module Algodiff.Maths

val (+) : t -> t -> t
val (-) : t -> t -> t
val (*) : t -> t -> t
val (/) : t -> t -> t
val (*@) : t -> t -> t
val (**) : t -> t -> t
val add : t -> t -> t
val sub : t -> t -> t
val mul : t -> t -> t
val div : t -> t -> t
val kron : t -> t -> t
val dot : t -> t -> t
val pow : t -> t -> t
val atan2 : t -> t -> t
val min2 : t -> t -> t
val max2 : t -> t -> t
val cross_entropy : t -> t -> t
val inv : t -> t
val neg : t -> t
val abs : t -> t
val signum : t -> t
val floor : t -> t
val ceil : t -> t
val round : t -> t
val sqr : t -> t
val sqrt : t -> t
val log : t -> t
val log2 : t -> t
val log10 : t -> t
val exp : t -> t
val sin : t -> t
val cos : t -> t
val tan : t -> t
val sinh : t -> t
val cosh : t -> t
val tanh : t -> t
val asin : t -> t
val acos : t -> t
val atan : t -> t
val asinh : t -> t
val acosh : t -> t
val atanh : t -> t
val sum' : t -> t
val log_sum_exp' : t -> t
val log_sum_exp : ?axis:int -> ?keep_dims:bool -> t -> t
val sum : ?axis:int -> ?keep_dims:bool -> t -> t
val sum_reduce : ?axis:int array -> t -> t
val mean : t -> t
val transpose : ?axis:int array -> t -> t
val swap : int -> int -> t -> t
val l1norm' : t -> t
val l2norm' : t -> t
val l2norm_sqr' : t -> t
val sigmoid : t -> t
val relu : t -> t
val dawsn : t -> t
val softplus : t -> t
val softsign : t -> t
val softmax : ?axis:int -> t -> t
val reshape : t -> int array -> t
val flatten : t -> t
val get_item : t -> int -> int -> t
val get_row : t -> int -> t
val concat : axis:int -> t -> t -> t
val split : axis:int -> int array -> t -> t array
val of_arrays : t array array -> t
val to_arrays : t -> t array array
val concatenate : axis:int -> t array -> t
val stack : axis:int -> t array -> t
val get_slice : int list list -> t -> t
val set_slice : int list list -> t -> t -> t
val get_fancy : Owl_types.index list -> t -> t
val set_fancy : Owl_types.index list -> t -> t -> t
val diag : ?k:int -> t -> t
val diagm : ?k:int -> t -> t
val trace : t -> t
val triu : ?k:int -> t -> t
val tril : ?k:int -> t -> t
\ No newline at end of file diff --git a/owl/Owl_optimise/Make_Embedded/Algodiff/NN/index.html b/owl/Owl_optimise/Make_Embedded/Algodiff/NN/index.html deleted file mode 100644 index 3b3e30971..000000000 --- a/owl/Owl_optimise/Make_Embedded/Algodiff/NN/index.html +++ /dev/null @@ -1,20 +0,0 @@ - -NN (owl.Owl_optimise.Make_Embedded.Algodiff.NN)

Module Algodiff.NN

val dropout : ?rate:float -> t -> t
val conv1d : ?padding:Owl_types.padding -> t -> t -> int array -> t
val conv2d : ?padding:Owl_types.padding -> t -> t -> int array -> t
val conv3d : ?padding:Owl_types.padding -> t -> t -> int array -> t
val dilated_conv1d : - ?padding:Owl_types.padding -> - t -> - t -> - int array -> - int array -> - t
val dilated_conv2d : - ?padding:Owl_types.padding -> - t -> - t -> - int array -> - int array -> - t
val dilated_conv3d : - ?padding:Owl_types.padding -> - t -> - t -> - int array -> - int array -> - t
val transpose_conv1d : ?padding:Owl_types.padding -> t -> t -> int array -> t
val transpose_conv2d : ?padding:Owl_types.padding -> t -> t -> int array -> t
val transpose_conv3d : ?padding:Owl_types.padding -> t -> t -> int array -> t
val max_pool1d : Owl_types.padding -> t -> int array -> int array -> t
val max_pool2d : Owl_types.padding -> t -> int array -> int array -> t
val max_pool3d : Owl_types.padding -> t -> int array -> int array -> t
val avg_pool1d : Owl_types.padding -> t -> int array -> int array -> t
val avg_pool2d : Owl_types.padding -> t -> int array -> int array -> t
val avg_pool3d : Owl_types.padding -> t -> int array -> int array -> t
val upsampling2d : t -> int array -> t
val pad : ?v:A.elt -> int list list -> t -> t
\ No newline at end of file diff --git a/owl/Owl_optimise/Make_Embedded/Algodiff/index.html b/owl/Owl_optimise/Make_Embedded/Algodiff/index.html deleted file mode 100644 index 290cf8b7a..000000000 --- a/owl/Owl_optimise/Make_Embedded/Algodiff/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Algodiff (owl.Owl_optimise.Make_Embedded.Algodiff)

Module Make_Embedded.Algodiff

module A : sig ... end
type t = Owl_algodiff_generic.Make(A).t =
  1. | F of A.elt
  2. | Arr of A.arr
  3. | DF of t * t * int
  4. | DR of t * t Stdlib.ref * op * int Stdlib.ref * int * int Stdlib.ref
and adjoint = t -> t Stdlib.ref -> (t * t) list -> (t * t) list
and register = t list -> t list
and label = string * t list
and op = adjoint * register * label
val tag : unit -> int
val primal : t -> t
val primal' : t -> t
val zero : t -> t
val reset_zero : t -> t
val tangent : t -> t
val adjref : t -> t Stdlib.ref
val adjval : t -> t
val shape : t -> int array
val is_float : t -> bool
val is_arr : t -> bool
val row_num : t -> int
val col_num : t -> int
val numel : t -> int
val clip_by_value : amin:A.elt -> amax:A.elt -> t -> t
val clip_by_l2norm : A.elt -> t -> t
val copy_primal' : t -> t
val tile : t -> int array -> t
val repeat : t -> int array -> t
val pack_elt : A.elt -> t
val unpack_elt : t -> A.elt
val pack_flt : float -> t
val _f : float -> t
val unpack_flt : t -> float
val pack_arr : A.arr -> t
val unpack_arr : t -> A.arr
val deep_info : t -> string
val type_info : t -> string
val error_binop : string -> t -> t -> 'a
val error_uniop : string -> t -> 'a
val make_forward : t -> t -> int -> t
val make_reverse : t -> int -> t
val reverse_prop : t -> t -> unit
val diff : (t -> t) -> t -> t
val diff' : (t -> t) -> t -> t * t
val grad : (t -> t) -> t -> t
val grad' : (t -> t) -> t -> t * t
val jacobian : (t -> t) -> t -> t
val jacobian' : (t -> t) -> t -> t * t
val jacobianv : (t -> t) -> t -> t -> t
val jacobianv' : (t -> t) -> t -> t -> t * t
val jacobianTv : (t -> t) -> t -> t -> t
val jacobianTv' : (t -> t) -> t -> t -> t * t
val hessian : (t -> t) -> t -> t
val hessian' : (t -> t) -> t -> t * t
val hessianv : (t -> t) -> t -> t -> t
val hessianv' : (t -> t) -> t -> t -> t * t
val laplacian : (t -> t) -> t -> t
val laplacian' : (t -> t) -> t -> t * t
val gradhessian : (t -> t) -> t -> t * t
val gradhessian' : (t -> t) -> t -> t * t * t
val gradhessianv : (t -> t) -> t -> t -> t * t
val gradhessianv' : (t -> t) -> t -> t -> t * t * t
module Builder : sig ... end
module Maths : sig ... end
module Linalg : sig ... end
module NN : sig ... end
module Mat : sig ... end
module Arr : sig ... end
val to_trace : t list -> string
val to_dot : t list -> string
val pp_num : Stdlib.Format.formatter -> t -> unit
\ No newline at end of file diff --git a/owl/Owl_optimise/Make_Embedded/Batch/index.html b/owl/Owl_optimise/Make_Embedded/Batch/index.html deleted file mode 100644 index 43c046e09..000000000 --- a/owl/Owl_optimise/Make_Embedded/Batch/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Batch (owl.Owl_optimise.Make_Embedded.Batch)

Module Make_Embedded.Batch

type typ = Owl_optimise_generic.Make(Owl_algodiff_generic.Make(A)).Batch.typ =
  1. | Full
  2. | Mini of int
  3. | Sample of int
  4. | Stochastic
val run : typ -> Algodiff.t -> Algodiff.t -> int -> Algodiff.t * Algodiff.t
val batches : typ -> Algodiff.t -> int
val to_string : typ -> string
\ No newline at end of file diff --git a/owl/Owl_optimise/Make_Embedded/Checkpoint/index.html b/owl/Owl_optimise/Make_Embedded/Checkpoint/index.html deleted file mode 100644 index 85d9d9e13..000000000 --- a/owl/Owl_optimise/Make_Embedded/Checkpoint/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -Checkpoint (owl.Owl_optimise.Make_Embedded.Checkpoint)

Module Make_Embedded.Checkpoint

type state = - Owl_optimise_generic.Make(Owl_algodiff_generic.Make(A)).Checkpoint.state = - {
  1. mutable current_batch : int;
  2. mutable batches_per_epoch : int;
  3. mutable epochs : float;
  4. mutable batches : int;
  5. mutable loss : Algodiff.t array;
  6. mutable start_at : float;
  7. mutable stop : bool;
  8. mutable gs : Algodiff.t array array;
  9. mutable ps : Algodiff.t array array;
  10. mutable us : Algodiff.t array array;
  11. mutable ch : Algodiff.t array array array;
}
type typ = - Owl_optimise_generic.Make(Owl_algodiff_generic.Make(A)).Checkpoint.typ = -
  1. | Batch of int
  2. | Epoch of float
  3. | Custom of state -> unit
  4. | None
val init_state : int -> float -> state
val default_checkpoint_fun : (string -> 'a) -> 'a
val print_state_info : state -> unit
val print_summary : state -> unit
val run : typ -> (string -> unit) -> int -> Algodiff.t -> state -> unit
val to_string : typ -> string
\ No newline at end of file diff --git a/owl/Owl_optimise/Make_Embedded/Clipping/index.html b/owl/Owl_optimise/Make_Embedded/Clipping/index.html deleted file mode 100644 index 3da754f63..000000000 --- a/owl/Owl_optimise/Make_Embedded/Clipping/index.html +++ /dev/null @@ -1,3 +0,0 @@ - -Clipping (owl.Owl_optimise.Make_Embedded.Clipping)

Module Make_Embedded.Clipping

type typ = Owl_optimise_generic.Make(Owl_algodiff_generic.Make(A)).Clipping.typ = -
  1. | L2norm of float
  2. | Value of float * float
  3. | None
val run : typ -> Algodiff.t -> Algodiff.t
val default : typ -> typ
val to_string : typ -> string
\ No newline at end of file diff --git a/owl/Owl_optimise/Make_Embedded/Gradient/index.html b/owl/Owl_optimise/Make_Embedded/Gradient/index.html deleted file mode 100644 index 382f97db1..000000000 --- a/owl/Owl_optimise/Make_Embedded/Gradient/index.html +++ /dev/null @@ -1,10 +0,0 @@ - -Gradient (owl.Owl_optimise.Make_Embedded.Gradient)

Module Make_Embedded.Gradient

type typ = Owl_optimise_generic.Make(Owl_algodiff_generic.Make(A)).Gradient.typ = -
  1. | GD
  2. | CG
  3. | CD
  4. | NonlinearCG
  5. | DaiYuanCG
  6. | NewtonCG
  7. | Newton
val run : - typ -> - (Algodiff.t -> Algodiff.t) -> - Algodiff.t -> - Algodiff.t -> - Algodiff.t -> - Algodiff.t -> - Algodiff.t
val to_string : typ -> string
\ No newline at end of file diff --git a/owl/Owl_optimise/Make_Embedded/Learning_Rate/index.html b/owl/Owl_optimise/Make_Embedded/Learning_Rate/index.html deleted file mode 100644 index be7e5aafe..000000000 --- a/owl/Owl_optimise/Make_Embedded/Learning_Rate/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Learning_Rate (owl.Owl_optimise.Make_Embedded.Learning_Rate)

Module Make_Embedded.Learning_Rate

type typ = - Owl_optimise_generic.Make(Owl_algodiff_generic.Make(A)).Learning_Rate.typ = -
  1. | Adagrad of float
  2. | Const of float
  3. | Decay of float * float
  4. | Exp_decay of float * float
  5. | RMSprop of float * float
  6. | Adam of float * float * float
  7. | Schedule of float array
val run : typ -> int -> Algodiff.t -> Algodiff.t array -> Algodiff.t
val default : typ -> typ
val update_ch : typ -> Algodiff.t -> Algodiff.t array -> Algodiff.t array
val to_string : typ -> string
\ No newline at end of file diff --git a/owl/Owl_optimise/Make_Embedded/Loss/index.html b/owl/Owl_optimise/Make_Embedded/Loss/index.html deleted file mode 100644 index d2398ecc0..000000000 --- a/owl/Owl_optimise/Make_Embedded/Loss/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Loss (owl.Owl_optimise.Make_Embedded.Loss)

Module Make_Embedded.Loss

type typ = Owl_optimise_generic.Make(Owl_algodiff_generic.Make(A)).Loss.typ =
  1. | Hinge
  2. | L1norm
  3. | L2norm
  4. | Quadratic
  5. | Cross_entropy
  6. | Custom of Algodiff.t -> Algodiff.t -> Algodiff.t
val run : typ -> Algodiff.t -> Algodiff.t -> Algodiff.t
val to_string : typ -> string
\ No newline at end of file diff --git a/owl/Owl_optimise/Make_Embedded/Momentum/index.html b/owl/Owl_optimise/Make_Embedded/Momentum/index.html deleted file mode 100644 index 826b27b30..000000000 --- a/owl/Owl_optimise/Make_Embedded/Momentum/index.html +++ /dev/null @@ -1,3 +0,0 @@ - -Momentum (owl.Owl_optimise.Make_Embedded.Momentum)

Module Make_Embedded.Momentum

type typ = Owl_optimise_generic.Make(Owl_algodiff_generic.Make(A)).Momentum.typ = -
  1. | Standard of float
  2. | Nesterov of float
  3. | None
val run : typ -> Algodiff.t -> Algodiff.t -> Algodiff.t
val default : typ -> typ
val to_string : typ -> string
\ No newline at end of file diff --git a/owl/Owl_optimise/Make_Embedded/Params/index.html b/owl/Owl_optimise/Make_Embedded/Params/index.html deleted file mode 100644 index bf3be0306..000000000 --- a/owl/Owl_optimise/Make_Embedded/Params/index.html +++ /dev/null @@ -1,14 +0,0 @@ - -Params (owl.Owl_optimise.Make_Embedded.Params)

Module Make_Embedded.Params

type typ = Owl_optimise_generic.Make(Owl_algodiff_generic.Make(A)).Params.typ = {
  1. mutable epochs : float;
  2. mutable batch : Batch.typ;
  3. mutable gradient : Gradient.typ;
  4. mutable loss : Loss.typ;
  5. mutable learning_rate : Learning_Rate.typ;
  6. mutable regularisation : Regularisation.typ;
  7. mutable momentum : Momentum.typ;
  8. mutable clipping : Clipping.typ;
  9. mutable stopping : Stopping.typ;
  10. mutable checkpoint : Checkpoint.typ;
  11. mutable verbosity : bool;
}
val default : unit -> typ
val config : - ?batch:Batch.typ -> - ?gradient:Gradient.typ -> - ?loss:Loss.typ -> - ?learning_rate:Learning_Rate.typ -> - ?regularisation:Regularisation.typ -> - ?momentum:Momentum.typ -> - ?clipping:Clipping.typ -> - ?stopping:Stopping.typ -> - ?checkpoint:Checkpoint.typ -> - ?verbosity:bool -> - float -> - typ
val to_string : typ -> string
\ No newline at end of file diff --git a/owl/Owl_optimise/Make_Embedded/Regularisation/index.html b/owl/Owl_optimise/Make_Embedded/Regularisation/index.html deleted file mode 100644 index a33d32888..000000000 --- a/owl/Owl_optimise/Make_Embedded/Regularisation/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Regularisation (owl.Owl_optimise.Make_Embedded.Regularisation)

Module Make_Embedded.Regularisation

type typ = - Owl_optimise_generic.Make(Owl_algodiff_generic.Make(A)).Regularisation.typ = -
  1. | L1norm of float
  2. | L2norm of float
  3. | Elastic_net of float * float
  4. | None
val run : typ -> Algodiff.t -> Algodiff.t
val to_string : typ -> string
\ No newline at end of file diff --git a/owl/Owl_optimise/Make_Embedded/Stopping/index.html b/owl/Owl_optimise/Make_Embedded/Stopping/index.html deleted file mode 100644 index c110bdbe3..000000000 --- a/owl/Owl_optimise/Make_Embedded/Stopping/index.html +++ /dev/null @@ -1,3 +0,0 @@ - -Stopping (owl.Owl_optimise.Make_Embedded.Stopping)

Module Make_Embedded.Stopping

type typ = Owl_optimise_generic.Make(Owl_algodiff_generic.Make(A)).Stopping.typ = -
  1. | Const of float
  2. | Early of int * int
  3. | None
val run : typ -> float -> bool
val default : typ -> typ
val to_string : typ -> string
\ No newline at end of file diff --git a/owl/Owl_optimise/Make_Embedded/Utils/index.html b/owl/Owl_optimise/Make_Embedded/Utils/index.html deleted file mode 100644 index 20ae98b45..000000000 --- a/owl/Owl_optimise/Make_Embedded/Utils/index.html +++ /dev/null @@ -1,7 +0,0 @@ - -Utils (owl.Owl_optimise.Make_Embedded.Utils)

Module Make_Embedded.Utils

val sample_num : Algodiff.t -> int
val draw_samples : Algodiff.t -> Algodiff.t -> int -> Algodiff.t * Algodiff.t
val get_chunk : - Algodiff.t -> - Algodiff.t -> - int -> - int -> - Algodiff.t * Algodiff.t
\ No newline at end of file diff --git a/owl/Owl_optimise/Make_Embedded/argument-1-A/Linalg/index.html b/owl/Owl_optimise/Make_Embedded/argument-1-A/Linalg/index.html deleted file mode 100644 index 6b2b74ec6..000000000 --- a/owl/Owl_optimise/Make_Embedded/argument-1-A/Linalg/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -Linalg (owl.Owl_optimise.Make_Embedded.A.Linalg)

Module A.Linalg

val inv : arr -> arr
val logdet : arr -> elt
val chol : ?upper:bool -> arr -> arr
val svd : ?thin:bool -> arr -> arr * arr * arr
val qr : arr -> arr * arr
val lq : arr -> arr * arr
val sylvester : arr -> arr -> arr -> arr
val lyapunov : arr -> arr -> arr
val discrete_lyapunov : - ?solver:[ `default | `bilinear | `direct ] -> - arr -> - arr -> - arr
val linsolve : ?trans:bool -> ?typ:[ `n | `u | `l ] -> arr -> arr -> arr
val care : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
val dare : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
\ No newline at end of file diff --git a/owl/Owl_optimise/Make_Embedded/argument-1-A/Mat/index.html b/owl/Owl_optimise/Make_Embedded/argument-1-A/Mat/index.html deleted file mode 100644 index 52b473848..000000000 --- a/owl/Owl_optimise/Make_Embedded/argument-1-A/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl.Owl_optimise.Make_Embedded.A.Mat)

Module A.Mat

val diagm : ?k:int -> arr -> arr
val triu : ?k:int -> arr -> arr
val tril : ?k:int -> arr -> arr
val eye : int -> arr
\ No newline at end of file diff --git a/owl/Owl_optimise/Make_Embedded/argument-1-A/Scalar/index.html b/owl/Owl_optimise/Make_Embedded/argument-1-A/Scalar/index.html deleted file mode 100644 index e41642ddd..000000000 --- a/owl/Owl_optimise/Make_Embedded/argument-1-A/Scalar/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Scalar (owl.Owl_optimise.Make_Embedded.A.Scalar)

Module A.Scalar

val add : elt -> elt -> elt
val sub : elt -> elt -> elt
val mul : elt -> elt -> elt
val div : elt -> elt -> elt
val pow : elt -> elt -> elt
val atan2 : elt -> elt -> elt
val abs : elt -> elt
val neg : elt -> elt
val sqr : elt -> elt
val sqrt : elt -> elt
val exp : elt -> elt
val log : elt -> elt
val log2 : elt -> elt
val log10 : elt -> elt
val signum : elt -> elt
val floor : elt -> elt
val ceil : elt -> elt
val round : elt -> elt
val sin : elt -> elt
val cos : elt -> elt
val tan : elt -> elt
val sinh : elt -> elt
val cosh : elt -> elt
val tanh : elt -> elt
val asin : elt -> elt
val acos : elt -> elt
val atan : elt -> elt
val asinh : elt -> elt
val acosh : elt -> elt
val atanh : elt -> elt
val relu : elt -> elt
val dawsn : elt -> elt
val sigmoid : elt -> elt
\ No newline at end of file diff --git a/owl/Owl_optimise/Make_Embedded/argument-1-A/index.html b/owl/Owl_optimise/Make_Embedded/argument-1-A/index.html deleted file mode 100644 index 29b0f0f9a..000000000 --- a/owl/Owl_optimise/Make_Embedded/argument-1-A/index.html +++ /dev/null @@ -1,158 +0,0 @@ - -A (owl.Owl_optimise.Make_Embedded.A)

Parameter Make_Embedded.A

include Owl_types_ndarray_eltcmp.Sig
include Owl_types_ndarray_basic.Sig
type arr
type elt
val empty : int array -> arr
val zeros : int array -> arr
val ones : int array -> arr
val create : int array -> elt -> arr
val sequential : ?a:elt -> ?step:elt -> int array -> arr
val uniform : ?a:elt -> ?b:elt -> int array -> arr
val gaussian : ?mu:elt -> ?sigma:elt -> int array -> arr
val bernoulli : ?p:elt -> int array -> arr
val init : int array -> (int -> elt) -> arr
val init_nd : int array -> (int array -> elt) -> arr
val shape : arr -> int array
val numel : arr -> int
val get : arr -> int array -> elt
val set : arr -> int array -> elt -> unit
val get_slice : int list list -> arr -> arr
val set_slice : int list list -> arr -> arr -> unit
val get_fancy : Owl_types_common.index list -> arr -> arr
val set_fancy : Owl_types_common.index list -> arr -> arr -> unit
val copy : arr -> arr
val copy_ : out:arr -> arr -> unit
val reset : arr -> unit
val reshape : arr -> int array -> arr
val reverse : arr -> arr
val tile : arr -> int array -> arr
val repeat : arr -> int array -> arr
val concatenate : ?axis:int -> arr array -> arr
val stack : ?axis:int -> arr array -> arr
val split : ?axis:int -> int array -> arr -> arr array
val expand : ?hi:bool -> arr -> int -> arr
val squeeze : ?axis:int array -> arr -> arr
val draw : ?axis:int -> arr -> int -> arr * int array
val map : (elt -> elt) -> arr -> arr
val fold : ?axis:int -> (elt -> elt -> elt) -> elt -> arr -> arr
val scan : ?axis:int -> (elt -> elt -> elt) -> arr -> arr
val one_hot : int -> arr -> arr
val pad : ?v:elt -> int list list -> arr -> arr
val print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(elt -> string) -> - arr -> - unit
val abs : arr -> arr
val neg : arr -> arr
val floor : arr -> arr
val ceil : arr -> arr
val round : arr -> arr
val sqr : arr -> arr
val sqrt : arr -> arr
val log : arr -> arr
val log2 : arr -> arr
val log10 : arr -> arr
val exp : arr -> arr
val sin : arr -> arr
val cos : arr -> arr
val tan : arr -> arr
val sinh : arr -> arr
val cosh : arr -> arr
val tanh : arr -> arr
val asin : arr -> arr
val acos : arr -> arr
val atan : arr -> arr
val asinh : arr -> arr
val acosh : arr -> arr
val atanh : arr -> arr
val min : ?axis:int -> ?keep_dims:bool -> arr -> arr
val max : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum_reduce : ?axis:int array -> arr -> arr
val signum : arr -> arr
val sigmoid : arr -> arr
val relu : arr -> arr
val dawsn : arr -> arr
val min' : arr -> elt
val max' : arr -> elt
val sum' : arr -> elt
val log_sum_exp' : arr -> elt
val log_sum_exp : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm' : arr -> elt
val l2norm' : arr -> elt
val l2norm_sqr' : arr -> elt
val clip_by_value : ?amin:elt -> ?amax:elt -> arr -> arr
val clip_by_l2norm : elt -> arr -> arr
val pow : arr -> arr -> arr
val scalar_pow : elt -> arr -> arr
val pow_scalar : arr -> elt -> arr
val atan2 : arr -> arr -> arr
val scalar_atan2 : elt -> arr -> arr
val atan2_scalar : arr -> elt -> arr
val add : arr -> arr -> arr
val sub : arr -> arr -> arr
val mul : arr -> arr -> arr
val div : arr -> arr -> arr
val add_scalar : arr -> elt -> arr
val sub_scalar : arr -> elt -> arr
val mul_scalar : arr -> elt -> arr
val div_scalar : arr -> elt -> arr
val scalar_add : elt -> arr -> arr
val scalar_sub : elt -> arr -> arr
val scalar_mul : elt -> arr -> arr
val scalar_div : elt -> arr -> arr
val fma : arr -> arr -> arr -> arr
val conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val dilated_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val transpose_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val max_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val upsampling2d : arr -> int array -> arr
val conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val dilated_conv1d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv1d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val transpose_conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val max_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val upsampling2d_backward : arr -> int array -> arr -> arr
val row_num : arr -> int
val col_num : arr -> int
val row : arr -> int -> arr
val rows : arr -> int array -> arr
val copy_row_to : arr -> arr -> int -> unit
val copy_col_to : arr -> arr -> int -> unit
val dot : arr -> arr -> arr
val diag : ?k:int -> arr -> arr
val trace : arr -> elt
val transpose : ?axis:int array -> arr -> arr
val to_rows : arr -> arr array
val of_rows : arr array -> arr
val to_cols : arr -> arr array
val of_cols : arr array -> arr
val of_array : elt array -> int array -> arr
val of_arrays : elt array array -> arr
val float_to_elt : float -> elt
val elt_to_float : elt -> float
val elt_equal : arr -> arr -> arr
val elt_not_equal : arr -> arr -> arr
val elt_less : arr -> arr -> arr
val elt_greater : arr -> arr -> arr
val elt_less_equal : arr -> arr -> arr
val elt_greater_equal : arr -> arr -> arr
val elt_equal_scalar : arr -> elt -> arr
val elt_not_equal_scalar : arr -> elt -> arr
val elt_less_scalar : arr -> elt -> arr
val elt_greater_scalar : arr -> elt -> arr
val elt_less_equal_scalar : arr -> elt -> arr
val elt_greater_equal_scalar : arr -> elt -> arr
module Scalar : sig ... end
module Mat : sig ... end
module Linalg : sig ... end
\ No newline at end of file diff --git a/owl/Owl_optimise/Make_Embedded/index.html b/owl/Owl_optimise/Make_Embedded/index.html deleted file mode 100644 index b0ff53b5f..000000000 --- a/owl/Owl_optimise/Make_Embedded/index.html +++ /dev/null @@ -1,31 +0,0 @@ - -Make_Embedded (owl.Owl_optimise.Make_Embedded)

Module Owl_optimise.Make_Embedded

Parameters

Signature

include sig ... end
module Algodiff : sig ... end
module Utils : sig ... end
module Learning_Rate : sig ... end
module Batch : sig ... end
module Loss : sig ... end
module Gradient : sig ... end
module Momentum : sig ... end
module Regularisation : sig ... end
module Clipping : sig ... end
module Stopping : sig ... end
module Checkpoint : sig ... end
module Params : sig ... end
val minimise_weight : - ?state:Checkpoint.state -> - Params.typ -> - (Algodiff.t -> Algodiff.t -> Algodiff.t) -> - Algodiff.t -> - Algodiff.t -> - Algodiff.t -> - Checkpoint.state * Algodiff.t
val minimise_network : - ?state:Checkpoint.state -> - Params.typ -> - (Algodiff.t -> Algodiff.t * Algodiff.t array array) -> - (Algodiff.t -> Algodiff.t array array * Algodiff.t array array) -> - (Algodiff.t array array -> unit) -> - (string -> unit) -> - Algodiff.t -> - Algodiff.t -> - Checkpoint.state
val minimise_fun : - ?state:Checkpoint.state -> - Params.typ -> - (Algodiff.t -> Algodiff.t) -> - Algodiff.t -> - Checkpoint.state * Algodiff.t
val minimise_compiled_network : - ?state:Checkpoint.state -> - Params.typ -> - (Algodiff.t -> Algodiff.t -> Algodiff.t) -> - (unit -> unit) -> - (string -> unit) -> - Algodiff.t -> - Algodiff.t -> - Checkpoint.state
\ No newline at end of file diff --git a/owl/Owl_optimise/S/Algodiff/A/Linalg/index.html b/owl/Owl_optimise/S/Algodiff/A/Linalg/index.html deleted file mode 100644 index 7c2cba6f8..000000000 --- a/owl/Owl_optimise/S/Algodiff/A/Linalg/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -Linalg (owl.Owl_optimise.S.Algodiff.A.Linalg)

Module A.Linalg

val inv : arr -> arr
val logdet : arr -> elt
val chol : ?upper:bool -> arr -> arr
val svd : ?thin:bool -> arr -> arr * arr * arr
val qr : arr -> arr * arr
val lq : arr -> arr * arr
val sylvester : arr -> arr -> arr -> arr
val lyapunov : arr -> arr -> arr
val discrete_lyapunov : - ?solver:[ `bilinear | `default | `direct ] -> - arr -> - arr -> - arr
val linsolve : ?trans:bool -> ?typ:[ `l | `n | `u ] -> arr -> arr -> arr
val care : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
val dare : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
\ No newline at end of file diff --git a/owl/Owl_optimise/S/Algodiff/A/Mat/index.html b/owl/Owl_optimise/S/Algodiff/A/Mat/index.html deleted file mode 100644 index be8a81029..000000000 --- a/owl/Owl_optimise/S/Algodiff/A/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl.Owl_optimise.S.Algodiff.A.Mat)

Module A.Mat

val diagm : ?k:int -> arr -> arr
val triu : ?k:int -> arr -> arr
val tril : ?k:int -> arr -> arr
val eye : int -> arr
\ No newline at end of file diff --git a/owl/Owl_optimise/S/Algodiff/A/Scalar/index.html b/owl/Owl_optimise/S/Algodiff/A/Scalar/index.html deleted file mode 100644 index 565d9d113..000000000 --- a/owl/Owl_optimise/S/Algodiff/A/Scalar/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Scalar (owl.Owl_optimise.S.Algodiff.A.Scalar)

Module A.Scalar

val add : elt -> elt -> elt
val sub : elt -> elt -> elt
val mul : elt -> elt -> elt
val div : elt -> elt -> elt
val pow : elt -> elt -> elt
val atan2 : elt -> elt -> elt
val abs : elt -> elt
val neg : elt -> elt
val sqr : elt -> elt
val sqrt : elt -> elt
val exp : elt -> elt
val log : elt -> elt
val log2 : elt -> elt
val log10 : elt -> elt
val signum : elt -> elt
val floor : elt -> elt
val ceil : elt -> elt
val round : elt -> elt
val sin : elt -> elt
val cos : elt -> elt
val tan : elt -> elt
val sinh : elt -> elt
val cosh : elt -> elt
val tanh : elt -> elt
val asin : elt -> elt
val acos : elt -> elt
val atan : elt -> elt
val asinh : elt -> elt
val acosh : elt -> elt
val atanh : elt -> elt
val relu : elt -> elt
val dawsn : elt -> elt
val sigmoid : elt -> elt
\ No newline at end of file diff --git a/owl/Owl_optimise/S/Algodiff/A/index.html b/owl/Owl_optimise/S/Algodiff/A/index.html deleted file mode 100644 index c1c38f57c..000000000 --- a/owl/Owl_optimise/S/Algodiff/A/index.html +++ /dev/null @@ -1,158 +0,0 @@ - -A (owl.Owl_optimise.S.Algodiff.A)

Module Algodiff.A

val empty : int array -> arr
val zeros : int array -> arr
val ones : int array -> arr
val create : int array -> elt -> arr
val sequential : ?a:elt -> ?step:elt -> int array -> arr
val uniform : ?a:elt -> ?b:elt -> int array -> arr
val gaussian : ?mu:elt -> ?sigma:elt -> int array -> arr
val bernoulli : ?p:elt -> int array -> arr
val init : int array -> (int -> elt) -> arr
val init_nd : int array -> (int array -> elt) -> arr
val shape : arr -> int array
val numel : arr -> int
val get : arr -> int array -> elt
val set : arr -> int array -> elt -> unit
val get_slice : int list list -> arr -> arr
val set_slice : int list list -> arr -> arr -> unit
val get_fancy : Owl_types_common.index list -> arr -> arr
val set_fancy : Owl_types_common.index list -> arr -> arr -> unit
val copy : arr -> arr
val copy_ : out:arr -> arr -> unit
val reset : arr -> unit
val reshape : arr -> int array -> arr
val reverse : arr -> arr
val tile : arr -> int array -> arr
val repeat : arr -> int array -> arr
val concatenate : ?axis:int -> arr array -> arr
val stack : ?axis:int -> arr array -> arr
val split : ?axis:int -> int array -> arr -> arr array
val expand : ?hi:bool -> arr -> int -> arr
val squeeze : ?axis:int array -> arr -> arr
val draw : ?axis:int -> arr -> int -> arr * int array
val map : (elt -> elt) -> arr -> arr
val fold : ?axis:int -> (elt -> elt -> elt) -> elt -> arr -> arr
val scan : ?axis:int -> (elt -> elt -> elt) -> arr -> arr
val one_hot : int -> arr -> arr
val pad : ?v:elt -> int list list -> arr -> arr
val print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(elt -> string) -> - arr -> - unit
val abs : arr -> arr
val neg : arr -> arr
val floor : arr -> arr
val ceil : arr -> arr
val round : arr -> arr
val sqr : arr -> arr
val sqrt : arr -> arr
val log : arr -> arr
val log2 : arr -> arr
val log10 : arr -> arr
val exp : arr -> arr
val sin : arr -> arr
val cos : arr -> arr
val tan : arr -> arr
val sinh : arr -> arr
val cosh : arr -> arr
val tanh : arr -> arr
val asin : arr -> arr
val acos : arr -> arr
val atan : arr -> arr
val asinh : arr -> arr
val acosh : arr -> arr
val atanh : arr -> arr
val min : ?axis:int -> ?keep_dims:bool -> arr -> arr
val max : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum_reduce : ?axis:int array -> arr -> arr
val signum : arr -> arr
val sigmoid : arr -> arr
val relu : arr -> arr
val dawsn : arr -> arr
val min' : arr -> elt
val max' : arr -> elt
val sum' : arr -> elt
val log_sum_exp' : arr -> elt
val log_sum_exp : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm' : arr -> elt
val l2norm' : arr -> elt
val l2norm_sqr' : arr -> elt
val clip_by_value : ?amin:elt -> ?amax:elt -> arr -> arr
val clip_by_l2norm : elt -> arr -> arr
val pow : arr -> arr -> arr
val scalar_pow : elt -> arr -> arr
val pow_scalar : arr -> elt -> arr
val atan2 : arr -> arr -> arr
val scalar_atan2 : elt -> arr -> arr
val atan2_scalar : arr -> elt -> arr
val add : arr -> arr -> arr
val sub : arr -> arr -> arr
val mul : arr -> arr -> arr
val div : arr -> arr -> arr
val add_scalar : arr -> elt -> arr
val sub_scalar : arr -> elt -> arr
val mul_scalar : arr -> elt -> arr
val div_scalar : arr -> elt -> arr
val scalar_add : elt -> arr -> arr
val scalar_sub : elt -> arr -> arr
val scalar_mul : elt -> arr -> arr
val scalar_div : elt -> arr -> arr
val fma : arr -> arr -> arr -> arr
val conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val dilated_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val transpose_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val max_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val upsampling2d : arr -> int array -> arr
val conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val dilated_conv1d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv1d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val transpose_conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val max_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val upsampling2d_backward : arr -> int array -> arr -> arr
val row_num : arr -> int
val col_num : arr -> int
val row : arr -> int -> arr
val rows : arr -> int array -> arr
val copy_row_to : arr -> arr -> int -> unit
val copy_col_to : arr -> arr -> int -> unit
val dot : arr -> arr -> arr
val diag : ?k:int -> arr -> arr
val trace : arr -> elt
val transpose : ?axis:int array -> arr -> arr
val to_rows : arr -> arr array
val of_rows : arr array -> arr
val to_cols : arr -> arr array
val of_cols : arr array -> arr
val of_array : elt array -> int array -> arr
val of_arrays : elt array array -> arr
val float_to_elt : float -> elt
val elt_to_float : elt -> float
val elt_equal : arr -> arr -> arr
val elt_not_equal : arr -> arr -> arr
val elt_less : arr -> arr -> arr
val elt_greater : arr -> arr -> arr
val elt_less_equal : arr -> arr -> arr
val elt_greater_equal : arr -> arr -> arr
val elt_equal_scalar : arr -> elt -> arr
val elt_not_equal_scalar : arr -> elt -> arr
val elt_less_scalar : arr -> elt -> arr
val elt_greater_scalar : arr -> elt -> arr
val elt_less_equal_scalar : arr -> elt -> arr
val elt_greater_equal_scalar : arr -> elt -> arr
module Scalar : sig ... end
module Mat : sig ... end
module Linalg : sig ... end
\ No newline at end of file diff --git a/owl/Owl_optimise/S/Algodiff/Arr/index.html b/owl/Owl_optimise/S/Algodiff/Arr/index.html deleted file mode 100644 index 9a37577bc..000000000 --- a/owl/Owl_optimise/S/Algodiff/Arr/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Arr (owl.Owl_optimise.S.Algodiff.Arr)

Module Algodiff.Arr

val empty : int array -> t
val zeros : int array -> t
val ones : int array -> t
val uniform : ?a:A.elt -> ?b:A.elt -> int array -> t
val gaussian : ?mu:A.elt -> ?sigma:A.elt -> int array -> t
val shape : t -> int array
val numel : t -> int
val reset : t -> unit
val reshape : t -> int array -> t
val add : t -> t -> t
val sub : t -> t -> t
val mul : t -> t -> t
val div : t -> t -> t
val dot : t -> t -> t
\ No newline at end of file diff --git a/owl/Owl_optimise/S/Algodiff/Builder/index.html b/owl/Owl_optimise/S/Algodiff/Builder/index.html deleted file mode 100644 index 5d1daa0e3..000000000 --- a/owl/Owl_optimise/S/Algodiff/Builder/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Builder (owl.Owl_optimise.S.Algodiff.Builder)

Module Algodiff.Builder

module type Siso = sig ... end
val build_siso : (module Siso) -> t -> t
module type Sipo = sig ... end
val build_sipo : (module Sipo) -> t -> t * t
module type Sito = sig ... end
val build_sito : (module Sito) -> t -> t * t * t
module type Siao = sig ... end
val build_siao : (module Siao) -> t -> t array
module type Piso = sig ... end
val build_piso : (module Piso) -> t -> t -> t
module type Aiso = sig ... end
val build_aiso : (module Aiso) -> t array -> t
\ No newline at end of file diff --git a/owl/Owl_optimise/S/Algodiff/Builder/module-type-Aiso/index.html b/owl/Owl_optimise/S/Algodiff/Builder/module-type-Aiso/index.html deleted file mode 100644 index bb9726f85..000000000 --- a/owl/Owl_optimise/S/Algodiff/Builder/module-type-Aiso/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Aiso (owl.Owl_optimise.S.Algodiff.Builder.Aiso)

Module type Builder.Aiso

val label : string
val ff : t array -> t
val df : int list -> t -> t array -> t array -> t
val dr : int list -> t array -> t -> t Stdlib.ref -> t list
\ No newline at end of file diff --git a/owl/Owl_optimise/S/Algodiff/Builder/module-type-Piso/index.html b/owl/Owl_optimise/S/Algodiff/Builder/module-type-Piso/index.html deleted file mode 100644 index 5a59e4f05..000000000 --- a/owl/Owl_optimise/S/Algodiff/Builder/module-type-Piso/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Piso (owl.Owl_optimise.S.Algodiff.Builder.Piso)

Module type Builder.Piso

val label : string
val ff_aa : A.elt -> A.elt -> t
val ff_ab : A.elt -> A.arr -> t
val ff_ba : A.arr -> A.elt -> t
val ff_bb : A.arr -> A.arr -> t
val df_da : t -> t -> t -> t -> t
val df_db : t -> t -> t -> t -> t
val df_dab : t -> t -> t -> t -> t -> t
val dr_ab : t -> t -> t -> t Stdlib.ref -> t * t
val dr_a : t -> t -> t -> t Stdlib.ref -> t
val dr_b : t -> t -> t -> t Stdlib.ref -> t
\ No newline at end of file diff --git a/owl/Owl_optimise/S/Algodiff/Builder/module-type-Siao/index.html b/owl/Owl_optimise/S/Algodiff/Builder/module-type-Siao/index.html deleted file mode 100644 index aa99d9c40..000000000 --- a/owl/Owl_optimise/S/Algodiff/Builder/module-type-Siao/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Siao (owl.Owl_optimise.S.Algodiff.Builder.Siao)

Module type Builder.Siao

val label : string
val ff_f : A.elt -> t array
val ff_arr : A.arr -> t array
val df : t array -> t -> t -> t array
val dr : t -> t -> t Stdlib.ref array -> t Stdlib.ref array -> t
\ No newline at end of file diff --git a/owl/Owl_optimise/S/Algodiff/Builder/module-type-Sipo/index.html b/owl/Owl_optimise/S/Algodiff/Builder/module-type-Sipo/index.html deleted file mode 100644 index 010e325a9..000000000 --- a/owl/Owl_optimise/S/Algodiff/Builder/module-type-Sipo/index.html +++ /dev/null @@ -1,7 +0,0 @@ - -Sipo (owl.Owl_optimise.S.Algodiff.Builder.Sipo)

Module type Builder.Sipo

val label : string
val ff_f : A.elt -> t * t
val ff_arr : A.arr -> t * t
val df : t -> t -> t -> t
val dr : - t -> - t -> - (t Stdlib.ref * t Stdlib.ref) -> - (t Stdlib.ref * t Stdlib.ref) -> - t
\ No newline at end of file diff --git a/owl/Owl_optimise/S/Algodiff/Builder/module-type-Siso/index.html b/owl/Owl_optimise/S/Algodiff/Builder/module-type-Siso/index.html deleted file mode 100644 index ce3ed3370..000000000 --- a/owl/Owl_optimise/S/Algodiff/Builder/module-type-Siso/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Siso (owl.Owl_optimise.S.Algodiff.Builder.Siso)

Module type Builder.Siso

val label : string
val ff_f : A.elt -> t
val ff_arr : A.arr -> t
val df : t -> t -> t -> t
val dr : t -> t -> t Stdlib.ref -> t
\ No newline at end of file diff --git a/owl/Owl_optimise/S/Algodiff/Builder/module-type-Sito/index.html b/owl/Owl_optimise/S/Algodiff/Builder/module-type-Sito/index.html deleted file mode 100644 index d5b7f10a4..000000000 --- a/owl/Owl_optimise/S/Algodiff/Builder/module-type-Sito/index.html +++ /dev/null @@ -1,7 +0,0 @@ - -Sito (owl.Owl_optimise.S.Algodiff.Builder.Sito)

Module type Builder.Sito

val label : string
val ff_f : A.elt -> t * t * t
val ff_arr : A.arr -> t * t * t
val df : t -> t -> t -> t
val dr : - t -> - t -> - (t Stdlib.ref * t Stdlib.ref * t Stdlib.ref) -> - (t Stdlib.ref * t Stdlib.ref * t Stdlib.ref) -> - t
\ No newline at end of file diff --git a/owl/Owl_optimise/S/Algodiff/Linalg/index.html b/owl/Owl_optimise/S/Algodiff/Linalg/index.html deleted file mode 100644 index c66904f9a..000000000 --- a/owl/Owl_optimise/S/Algodiff/Linalg/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -Linalg (owl.Owl_optimise.S.Algodiff.Linalg)

Module Algodiff.Linalg

val inv : t -> t
val logdet : t -> t
val chol : ?upper:bool -> t -> t
val qr : t -> t * t
val lq : t -> t * t
val svd : ?thin:bool -> t -> t * t * t
val sylvester : t -> t -> t -> t
val lyapunov : t -> t -> t
val discrete_lyapunov : - ?solver:[ `bilinear | `default | `direct ] -> - t -> - t -> - t
val (/@) : t -> t -> t
val linsolve : ?trans:bool -> ?typ:[ `l | `n | `u ] -> t -> t -> t
val care : ?diag_r:bool -> t -> t -> t -> t -> t
val dare : ?diag_r:bool -> t -> t -> t -> t -> t
\ No newline at end of file diff --git a/owl/Owl_optimise/S/Algodiff/Mat/index.html b/owl/Owl_optimise/S/Algodiff/Mat/index.html deleted file mode 100644 index 5808310c1..000000000 --- a/owl/Owl_optimise/S/Algodiff/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl.Owl_optimise.S.Algodiff.Mat)

Module Algodiff.Mat

val empty : int -> int -> t
val zeros : int -> int -> t
val eye : int -> t
val ones : int -> int -> t
val uniform : ?a:A.elt -> ?b:A.elt -> int -> int -> t
val gaussian : ?mu:A.elt -> ?sigma:A.elt -> int -> int -> t
val shape : t -> int * int
val numel : t -> int
val row_num : t -> int
val col_num : t -> int
val reset : t -> unit
val reshape : int -> int -> t -> t
val get : t -> int -> int -> t
val set : t -> int -> int -> t -> t
val row : t -> int -> t
val mean : t -> t
val add : t -> t -> t
val sub : t -> t -> t
val mul : t -> t -> t
val div : t -> t -> t
val dot : t -> t -> t
val map_by_row : (t -> t) -> t -> t
val of_arrays : A.elt array array -> t
val init_2d : int -> int -> (int -> int -> t) -> t
val print : t -> unit
\ No newline at end of file diff --git a/owl/Owl_optimise/S/Algodiff/Maths/index.html b/owl/Owl_optimise/S/Algodiff/Maths/index.html deleted file mode 100644 index b70f57564..000000000 --- a/owl/Owl_optimise/S/Algodiff/Maths/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Maths (owl.Owl_optimise.S.Algodiff.Maths)

Module Algodiff.Maths

val (+) : t -> t -> t
val (-) : t -> t -> t
val (*) : t -> t -> t
val (/) : t -> t -> t
val (*@) : t -> t -> t
val (**) : t -> t -> t
val add : t -> t -> t
val sub : t -> t -> t
val mul : t -> t -> t
val div : t -> t -> t
val kron : t -> t -> t
val dot : t -> t -> t
val pow : t -> t -> t
val atan2 : t -> t -> t
val min2 : t -> t -> t
val max2 : t -> t -> t
val cross_entropy : t -> t -> t
val inv : t -> t
val neg : t -> t
val abs : t -> t
val signum : t -> t
val floor : t -> t
val ceil : t -> t
val round : t -> t
val sqr : t -> t
val sqrt : t -> t
val log : t -> t
val log2 : t -> t
val log10 : t -> t
val exp : t -> t
val sin : t -> t
val cos : t -> t
val tan : t -> t
val sinh : t -> t
val cosh : t -> t
val tanh : t -> t
val asin : t -> t
val acos : t -> t
val atan : t -> t
val asinh : t -> t
val acosh : t -> t
val atanh : t -> t
val sum' : t -> t
val log_sum_exp' : t -> t
val log_sum_exp : ?axis:int -> ?keep_dims:bool -> t -> t
val sum : ?axis:int -> ?keep_dims:bool -> t -> t
val sum_reduce : ?axis:int array -> t -> t
val mean : t -> t
val transpose : ?axis:int array -> t -> t
val swap : int -> int -> t -> t
val l1norm' : t -> t
val l2norm' : t -> t
val l2norm_sqr' : t -> t
val sigmoid : t -> t
val relu : t -> t
val dawsn : t -> t
val softplus : t -> t
val softsign : t -> t
val softmax : ?axis:int -> t -> t
val reshape : t -> int array -> t
val flatten : t -> t
val get_item : t -> int -> int -> t
val get_row : t -> int -> t
val concat : axis:int -> t -> t -> t
val split : axis:int -> int array -> t -> t array
val of_arrays : t array array -> t
val to_arrays : t -> t array array
val concatenate : axis:int -> t array -> t
val stack : axis:int -> t array -> t
val get_slice : int list list -> t -> t
val set_slice : int list list -> t -> t -> t
val get_fancy : Owl_types.index list -> t -> t
val set_fancy : Owl_types.index list -> t -> t -> t
val diag : ?k:int -> t -> t
val diagm : ?k:int -> t -> t
val trace : t -> t
val triu : ?k:int -> t -> t
val tril : ?k:int -> t -> t
\ No newline at end of file diff --git a/owl/Owl_optimise/S/Algodiff/NN/index.html b/owl/Owl_optimise/S/Algodiff/NN/index.html deleted file mode 100644 index 178bc51c7..000000000 --- a/owl/Owl_optimise/S/Algodiff/NN/index.html +++ /dev/null @@ -1,20 +0,0 @@ - -NN (owl.Owl_optimise.S.Algodiff.NN)

Module Algodiff.NN

val dropout : ?rate:float -> t -> t
val conv1d : ?padding:Owl_types.padding -> t -> t -> int array -> t
val conv2d : ?padding:Owl_types.padding -> t -> t -> int array -> t
val conv3d : ?padding:Owl_types.padding -> t -> t -> int array -> t
val dilated_conv1d : - ?padding:Owl_types.padding -> - t -> - t -> - int array -> - int array -> - t
val dilated_conv2d : - ?padding:Owl_types.padding -> - t -> - t -> - int array -> - int array -> - t
val dilated_conv3d : - ?padding:Owl_types.padding -> - t -> - t -> - int array -> - int array -> - t
val transpose_conv1d : ?padding:Owl_types.padding -> t -> t -> int array -> t
val transpose_conv2d : ?padding:Owl_types.padding -> t -> t -> int array -> t
val transpose_conv3d : ?padding:Owl_types.padding -> t -> t -> int array -> t
val max_pool1d : Owl_types.padding -> t -> int array -> int array -> t
val max_pool2d : Owl_types.padding -> t -> int array -> int array -> t
val max_pool3d : Owl_types.padding -> t -> int array -> int array -> t
val avg_pool1d : Owl_types.padding -> t -> int array -> int array -> t
val avg_pool2d : Owl_types.padding -> t -> int array -> int array -> t
val avg_pool3d : Owl_types.padding -> t -> int array -> int array -> t
val upsampling2d : t -> int array -> t
val pad : ?v:A.elt -> int list list -> t -> t
\ No newline at end of file diff --git a/owl/Owl_optimise/S/Algodiff/index.html b/owl/Owl_optimise/S/Algodiff/index.html deleted file mode 100644 index 5c91c0f20..000000000 --- a/owl/Owl_optimise/S/Algodiff/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Algodiff (owl.Owl_optimise.S.Algodiff)

Module S.Algodiff

module A : sig ... end
type t = Owl_algodiff_generic.Make(Owl_algodiff_primal_ops.S).t =
  1. | F of A.elt
  2. | Arr of A.arr
  3. | DF of t * t * int
  4. | DR of t * t Stdlib.ref * op * int Stdlib.ref * int * int Stdlib.ref
and adjoint = t -> t Stdlib.ref -> (t * t) list -> (t * t) list
and register = t list -> t list
and label = string * t list
and op = adjoint * register * label
val tag : unit -> int
val primal : t -> t
val primal' : t -> t
val zero : t -> t
val reset_zero : t -> t
val tangent : t -> t
val adjref : t -> t Stdlib.ref
val adjval : t -> t
val shape : t -> int array
val is_float : t -> bool
val is_arr : t -> bool
val row_num : t -> int
val col_num : t -> int
val numel : t -> int
val clip_by_value : amin:A.elt -> amax:A.elt -> t -> t
val clip_by_l2norm : A.elt -> t -> t
val copy_primal' : t -> t
val tile : t -> int array -> t
val repeat : t -> int array -> t
val pack_elt : A.elt -> t
val unpack_elt : t -> A.elt
val pack_flt : float -> t
val _f : float -> t
val unpack_flt : t -> float
val pack_arr : A.arr -> t
val unpack_arr : t -> A.arr
val deep_info : t -> string
val type_info : t -> string
val error_binop : string -> t -> t -> 'a
val error_uniop : string -> t -> 'a
val make_forward : t -> t -> int -> t
val make_reverse : t -> int -> t
val reverse_prop : t -> t -> unit
val diff : (t -> t) -> t -> t
val diff' : (t -> t) -> t -> t * t
val grad : (t -> t) -> t -> t
val grad' : (t -> t) -> t -> t * t
val jacobian : (t -> t) -> t -> t
val jacobian' : (t -> t) -> t -> t * t
val jacobianv : (t -> t) -> t -> t -> t
val jacobianv' : (t -> t) -> t -> t -> t * t
val jacobianTv : (t -> t) -> t -> t -> t
val jacobianTv' : (t -> t) -> t -> t -> t * t
val hessian : (t -> t) -> t -> t
val hessian' : (t -> t) -> t -> t * t
val hessianv : (t -> t) -> t -> t -> t
val hessianv' : (t -> t) -> t -> t -> t * t
val laplacian : (t -> t) -> t -> t
val laplacian' : (t -> t) -> t -> t * t
val gradhessian : (t -> t) -> t -> t * t
val gradhessian' : (t -> t) -> t -> t * t * t
val gradhessianv : (t -> t) -> t -> t -> t * t
val gradhessianv' : (t -> t) -> t -> t -> t * t * t
module Builder : sig ... end
module Maths : sig ... end
module Linalg : sig ... end
module NN : sig ... end
module Mat : sig ... end
module Arr : sig ... end
val to_trace : t list -> string
val to_dot : t list -> string
val pp_num : Stdlib.Format.formatter -> t -> unit
\ No newline at end of file diff --git a/owl/Owl_optimise/S/Batch/index.html b/owl/Owl_optimise/S/Batch/index.html deleted file mode 100644 index cdfb7c279..000000000 --- a/owl/Owl_optimise/S/Batch/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Batch (owl.Owl_optimise.S.Batch)

Module S.Batch

type typ = - Owl_optimise_generic.Make(Owl_algodiff_generic.Make(Owl_algodiff_primal_ops.S)).Batch.typ = -
  1. | Full
  2. | Mini of int
  3. | Sample of int
  4. | Stochastic
val run : typ -> Algodiff.t -> Algodiff.t -> int -> Algodiff.t * Algodiff.t
val batches : typ -> Algodiff.t -> int
val to_string : typ -> string
\ No newline at end of file diff --git a/owl/Owl_optimise/S/Checkpoint/index.html b/owl/Owl_optimise/S/Checkpoint/index.html deleted file mode 100644 index e64359ff1..000000000 --- a/owl/Owl_optimise/S/Checkpoint/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -Checkpoint (owl.Owl_optimise.S.Checkpoint)

Module S.Checkpoint

type state = - Owl_optimise_generic.Make(Owl_algodiff_generic.Make(Owl_algodiff_primal_ops.S)).Checkpoint.state = - {
  1. mutable current_batch : int;
  2. mutable batches_per_epoch : int;
  3. mutable epochs : float;
  4. mutable batches : int;
  5. mutable loss : Algodiff.t array;
  6. mutable start_at : float;
  7. mutable stop : bool;
  8. mutable gs : Algodiff.t array array;
  9. mutable ps : Algodiff.t array array;
  10. mutable us : Algodiff.t array array;
  11. mutable ch : Algodiff.t array array array;
}
type typ = - Owl_optimise_generic.Make(Owl_algodiff_generic.Make(Owl_algodiff_primal_ops.S)).Checkpoint.typ = -
  1. | Batch of int
  2. | Epoch of float
  3. | Custom of state -> unit
  4. | None
val init_state : int -> float -> state
val default_checkpoint_fun : (string -> 'a) -> 'a
val print_state_info : state -> unit
val print_summary : state -> unit
val run : typ -> (string -> unit) -> int -> Algodiff.t -> state -> unit
val to_string : typ -> string
\ No newline at end of file diff --git a/owl/Owl_optimise/S/Clipping/index.html b/owl/Owl_optimise/S/Clipping/index.html deleted file mode 100644 index 2a8533ba2..000000000 --- a/owl/Owl_optimise/S/Clipping/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Clipping (owl.Owl_optimise.S.Clipping)

Module S.Clipping

type typ = - Owl_optimise_generic.Make(Owl_algodiff_generic.Make(Owl_algodiff_primal_ops.S)).Clipping.typ = -
  1. | L2norm of float
  2. | Value of float * float
  3. | None
val run : typ -> Algodiff.t -> Algodiff.t
val default : typ -> typ
val to_string : typ -> string
\ No newline at end of file diff --git a/owl/Owl_optimise/S/Gradient/index.html b/owl/Owl_optimise/S/Gradient/index.html deleted file mode 100644 index 7670ad8ab..000000000 --- a/owl/Owl_optimise/S/Gradient/index.html +++ /dev/null @@ -1,11 +0,0 @@ - -Gradient (owl.Owl_optimise.S.Gradient)

Module S.Gradient

type typ = - Owl_optimise_generic.Make(Owl_algodiff_generic.Make(Owl_algodiff_primal_ops.S)).Gradient.typ = -
  1. | GD
  2. | CG
  3. | CD
  4. | NonlinearCG
  5. | DaiYuanCG
  6. | NewtonCG
  7. | Newton
val run : - typ -> - (Algodiff.t -> Algodiff.t) -> - Algodiff.t -> - Algodiff.t -> - Algodiff.t -> - Algodiff.t -> - Algodiff.t
val to_string : typ -> string
\ No newline at end of file diff --git a/owl/Owl_optimise/S/Learning_Rate/index.html b/owl/Owl_optimise/S/Learning_Rate/index.html deleted file mode 100644 index 825b5e1a2..000000000 --- a/owl/Owl_optimise/S/Learning_Rate/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Learning_Rate (owl.Owl_optimise.S.Learning_Rate)

Module S.Learning_Rate

type typ = - Owl_optimise_generic.Make(Owl_algodiff_generic.Make(Owl_algodiff_primal_ops.S)).Learning_Rate.typ = -
  1. | Adagrad of float
  2. | Const of float
  3. | Decay of float * float
  4. | Exp_decay of float * float
  5. | RMSprop of float * float
  6. | Adam of float * float * float
  7. | Schedule of float array
val run : typ -> int -> Algodiff.t -> Algodiff.t array -> Algodiff.t
val default : typ -> typ
val update_ch : typ -> Algodiff.t -> Algodiff.t array -> Algodiff.t array
val to_string : typ -> string
\ No newline at end of file diff --git a/owl/Owl_optimise/S/Loss/index.html b/owl/Owl_optimise/S/Loss/index.html deleted file mode 100644 index 407c6c761..000000000 --- a/owl/Owl_optimise/S/Loss/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Loss (owl.Owl_optimise.S.Loss)

Module S.Loss

type typ = - Owl_optimise_generic.Make(Owl_algodiff_generic.Make(Owl_algodiff_primal_ops.S)).Loss.typ = -
  1. | Hinge
  2. | L1norm
  3. | L2norm
  4. | Quadratic
  5. | Cross_entropy
  6. | Custom of Algodiff.t -> Algodiff.t -> Algodiff.t
val run : typ -> Algodiff.t -> Algodiff.t -> Algodiff.t
val to_string : typ -> string
\ No newline at end of file diff --git a/owl/Owl_optimise/S/Momentum/index.html b/owl/Owl_optimise/S/Momentum/index.html deleted file mode 100644 index c8578b30b..000000000 --- a/owl/Owl_optimise/S/Momentum/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Momentum (owl.Owl_optimise.S.Momentum)

Module S.Momentum

type typ = - Owl_optimise_generic.Make(Owl_algodiff_generic.Make(Owl_algodiff_primal_ops.S)).Momentum.typ = -
  1. | Standard of float
  2. | Nesterov of float
  3. | None
val run : typ -> Algodiff.t -> Algodiff.t -> Algodiff.t
val default : typ -> typ
val to_string : typ -> string
\ No newline at end of file diff --git a/owl/Owl_optimise/S/Params/index.html b/owl/Owl_optimise/S/Params/index.html deleted file mode 100644 index 473067cbf..000000000 --- a/owl/Owl_optimise/S/Params/index.html +++ /dev/null @@ -1,16 +0,0 @@ - -Params (owl.Owl_optimise.S.Params)

Module S.Params

type typ = - Owl_optimise_generic.Make(Owl_algodiff_generic.Make(Owl_algodiff_primal_ops.S)).Params.typ = - {
  1. mutable epochs : float;
  2. mutable batch : Batch.typ;
  3. mutable gradient : Gradient.typ;
  4. mutable loss : Loss.typ;
  5. mutable learning_rate : Learning_Rate.typ;
  6. mutable regularisation : Regularisation.typ;
  7. mutable momentum : Momentum.typ;
  8. mutable clipping : Clipping.typ;
  9. mutable stopping : Stopping.typ;
  10. mutable checkpoint : Checkpoint.typ;
  11. mutable verbosity : bool;
}
val default : unit -> typ
val config : - ?batch:Batch.typ -> - ?gradient:Gradient.typ -> - ?loss:Loss.typ -> - ?learning_rate:Learning_Rate.typ -> - ?regularisation:Regularisation.typ -> - ?momentum:Momentum.typ -> - ?clipping:Clipping.typ -> - ?stopping:Stopping.typ -> - ?checkpoint:Checkpoint.typ -> - ?verbosity:bool -> - float -> - typ
val to_string : typ -> string
\ No newline at end of file diff --git a/owl/Owl_optimise/S/Regularisation/index.html b/owl/Owl_optimise/S/Regularisation/index.html deleted file mode 100644 index 620f38d16..000000000 --- a/owl/Owl_optimise/S/Regularisation/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Regularisation (owl.Owl_optimise.S.Regularisation)

Module S.Regularisation

type typ = - Owl_optimise_generic.Make(Owl_algodiff_generic.Make(Owl_algodiff_primal_ops.S)).Regularisation.typ = -
  1. | L1norm of float
  2. | L2norm of float
  3. | Elastic_net of float * float
  4. | None
val run : typ -> Algodiff.t -> Algodiff.t
val to_string : typ -> string
\ No newline at end of file diff --git a/owl/Owl_optimise/S/Stopping/index.html b/owl/Owl_optimise/S/Stopping/index.html deleted file mode 100644 index ef760b970..000000000 --- a/owl/Owl_optimise/S/Stopping/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Stopping (owl.Owl_optimise.S.Stopping)

Module S.Stopping

val run : typ -> float -> bool
val default : typ -> typ
val to_string : typ -> string
\ No newline at end of file diff --git a/owl/Owl_optimise/S/Utils/index.html b/owl/Owl_optimise/S/Utils/index.html deleted file mode 100644 index da0313c75..000000000 --- a/owl/Owl_optimise/S/Utils/index.html +++ /dev/null @@ -1,7 +0,0 @@ - -Utils (owl.Owl_optimise.S.Utils)

Module S.Utils

val sample_num : Algodiff.t -> int
val draw_samples : Algodiff.t -> Algodiff.t -> int -> Algodiff.t * Algodiff.t
val get_chunk : - Algodiff.t -> - Algodiff.t -> - int -> - int -> - Algodiff.t * Algodiff.t
\ No newline at end of file diff --git a/owl/Owl_optimise/S/index.html b/owl/Owl_optimise/S/index.html deleted file mode 100644 index ef8a793dc..000000000 --- a/owl/Owl_optimise/S/index.html +++ /dev/null @@ -1,31 +0,0 @@ - -S (owl.Owl_optimise.S)

Module Owl_optimise.S

module Algodiff : sig ... end
module Utils : sig ... end
module Learning_Rate : sig ... end
module Batch : sig ... end
module Loss : sig ... end
module Gradient : sig ... end
module Momentum : sig ... end
module Regularisation : sig ... end
module Clipping : sig ... end
module Stopping : sig ... end
module Checkpoint : sig ... end
module Params : sig ... end
val minimise_weight : - ?state:Checkpoint.state -> - Params.typ -> - (Algodiff.t -> Algodiff.t -> Algodiff.t) -> - Algodiff.t -> - Algodiff.t -> - Algodiff.t -> - Checkpoint.state * Algodiff.t
val minimise_network : - ?state:Checkpoint.state -> - Params.typ -> - (Algodiff.t -> Algodiff.t * Algodiff.t array array) -> - (Algodiff.t -> Algodiff.t array array * Algodiff.t array array) -> - (Algodiff.t array array -> unit) -> - (string -> unit) -> - Algodiff.t -> - Algodiff.t -> - Checkpoint.state
val minimise_fun : - ?state:Checkpoint.state -> - Params.typ -> - (Algodiff.t -> Algodiff.t) -> - Algodiff.t -> - Checkpoint.state * Algodiff.t
val minimise_compiled_network : - ?state:Checkpoint.state -> - Params.typ -> - (Algodiff.t -> Algodiff.t -> Algodiff.t) -> - (unit -> unit) -> - (string -> unit) -> - Algodiff.t -> - Algodiff.t -> - Checkpoint.state
\ No newline at end of file diff --git a/owl/Owl_regression/.dummy b/owl/Owl_regression/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl/Owl_regression/D/Optimise/Algodiff/A/Linalg/index.html b/owl/Owl_regression/D/Optimise/Algodiff/A/Linalg/index.html deleted file mode 100644 index 0ec7ce034..000000000 --- a/owl/Owl_regression/D/Optimise/Algodiff/A/Linalg/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -Linalg (owl.Owl_regression.D.Optimise.Algodiff.A.Linalg)

Module A.Linalg

val inv : arr -> arr
val logdet : arr -> elt
val chol : ?upper:bool -> arr -> arr
val svd : ?thin:bool -> arr -> arr * arr * arr
val qr : arr -> arr * arr
val lq : arr -> arr * arr
val sylvester : arr -> arr -> arr -> arr
val lyapunov : arr -> arr -> arr
val discrete_lyapunov : - ?solver:[ `bilinear | `default | `direct ] -> - arr -> - arr -> - arr
val linsolve : ?trans:bool -> ?typ:[ `l | `n | `u ] -> arr -> arr -> arr
val care : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
val dare : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
\ No newline at end of file diff --git a/owl/Owl_regression/D/Optimise/Algodiff/A/Mat/index.html b/owl/Owl_regression/D/Optimise/Algodiff/A/Mat/index.html deleted file mode 100644 index 2cd811684..000000000 --- a/owl/Owl_regression/D/Optimise/Algodiff/A/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl.Owl_regression.D.Optimise.Algodiff.A.Mat)

Module A.Mat

val diagm : ?k:int -> arr -> arr
val triu : ?k:int -> arr -> arr
val tril : ?k:int -> arr -> arr
val eye : int -> arr
\ No newline at end of file diff --git a/owl/Owl_regression/D/Optimise/Algodiff/A/Scalar/index.html b/owl/Owl_regression/D/Optimise/Algodiff/A/Scalar/index.html deleted file mode 100644 index 6bdb3cce1..000000000 --- a/owl/Owl_regression/D/Optimise/Algodiff/A/Scalar/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Scalar (owl.Owl_regression.D.Optimise.Algodiff.A.Scalar)

Module A.Scalar

val add : elt -> elt -> elt
val sub : elt -> elt -> elt
val mul : elt -> elt -> elt
val div : elt -> elt -> elt
val pow : elt -> elt -> elt
val atan2 : elt -> elt -> elt
val abs : elt -> elt
val neg : elt -> elt
val sqr : elt -> elt
val sqrt : elt -> elt
val exp : elt -> elt
val log : elt -> elt
val log2 : elt -> elt
val log10 : elt -> elt
val signum : elt -> elt
val floor : elt -> elt
val ceil : elt -> elt
val round : elt -> elt
val sin : elt -> elt
val cos : elt -> elt
val tan : elt -> elt
val sinh : elt -> elt
val cosh : elt -> elt
val tanh : elt -> elt
val asin : elt -> elt
val acos : elt -> elt
val atan : elt -> elt
val asinh : elt -> elt
val acosh : elt -> elt
val atanh : elt -> elt
val relu : elt -> elt
val dawsn : elt -> elt
val sigmoid : elt -> elt
\ No newline at end of file diff --git a/owl/Owl_regression/D/Optimise/Algodiff/A/index.html b/owl/Owl_regression/D/Optimise/Algodiff/A/index.html deleted file mode 100644 index 65c308385..000000000 --- a/owl/Owl_regression/D/Optimise/Algodiff/A/index.html +++ /dev/null @@ -1,160 +0,0 @@ - -A (owl.Owl_regression.D.Optimise.Algodiff.A)

Module Algodiff.A

val empty : int array -> arr
val zeros : int array -> arr
val ones : int array -> arr
val create : int array -> elt -> arr
val sequential : ?a:elt -> ?step:elt -> int array -> arr
val uniform : ?a:elt -> ?b:elt -> int array -> arr
val gaussian : ?mu:elt -> ?sigma:elt -> int array -> arr
val bernoulli : ?p:elt -> int array -> arr
val init : int array -> (int -> elt) -> arr
val init_nd : int array -> (int array -> elt) -> arr
val shape : arr -> int array
val numel : arr -> int
val get : arr -> int array -> elt
val set : arr -> int array -> elt -> unit
val get_slice : int list list -> arr -> arr
val set_slice : int list list -> arr -> arr -> unit
val get_fancy : Owl_types_common.index list -> arr -> arr
val set_fancy : Owl_types_common.index list -> arr -> arr -> unit
val copy : arr -> arr
val copy_ : out:arr -> arr -> unit
val reset : arr -> unit
val reshape : arr -> int array -> arr
val reverse : arr -> arr
val tile : arr -> int array -> arr
val repeat : arr -> int array -> arr
val concatenate : ?axis:int -> arr array -> arr
val stack : ?axis:int -> arr array -> arr
val split : ?axis:int -> int array -> arr -> arr array
val expand : ?hi:bool -> arr -> int -> arr
val squeeze : ?axis:int array -> arr -> arr
val draw : ?axis:int -> arr -> int -> arr * int array
val map : (elt -> elt) -> arr -> arr
val fold : ?axis:int -> (elt -> elt -> elt) -> elt -> arr -> arr
val scan : ?axis:int -> (elt -> elt -> elt) -> arr -> arr
val one_hot : int -> arr -> arr
val pad : ?v:elt -> int list list -> arr -> arr
val print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(elt -> string) -> - arr -> - unit
val abs : arr -> arr
val neg : arr -> arr
val floor : arr -> arr
val ceil : arr -> arr
val round : arr -> arr
val sqr : arr -> arr
val sqrt : arr -> arr
val log : arr -> arr
val log2 : arr -> arr
val log10 : arr -> arr
val exp : arr -> arr
val sin : arr -> arr
val cos : arr -> arr
val tan : arr -> arr
val sinh : arr -> arr
val cosh : arr -> arr
val tanh : arr -> arr
val asin : arr -> arr
val acos : arr -> arr
val atan : arr -> arr
val asinh : arr -> arr
val acosh : arr -> arr
val atanh : arr -> arr
val min : ?axis:int -> ?keep_dims:bool -> arr -> arr
val max : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum_reduce : ?axis:int array -> arr -> arr
val signum : arr -> arr
val sigmoid : arr -> arr
val relu : arr -> arr
val dawsn : arr -> arr
val min' : arr -> elt
val max' : arr -> elt
val sum' : arr -> elt
val log_sum_exp' : arr -> elt
val log_sum_exp : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm' : arr -> elt
val l2norm' : arr -> elt
val l2norm_sqr' : arr -> elt
val clip_by_value : ?amin:elt -> ?amax:elt -> arr -> arr
val clip_by_l2norm : elt -> arr -> arr
val pow : arr -> arr -> arr
val scalar_pow : elt -> arr -> arr
val pow_scalar : arr -> elt -> arr
val atan2 : arr -> arr -> arr
val scalar_atan2 : elt -> arr -> arr
val atan2_scalar : arr -> elt -> arr
val add : arr -> arr -> arr
val sub : arr -> arr -> arr
val mul : arr -> arr -> arr
val div : arr -> arr -> arr
val add_scalar : arr -> elt -> arr
val sub_scalar : arr -> elt -> arr
val mul_scalar : arr -> elt -> arr
val div_scalar : arr -> elt -> arr
val scalar_add : elt -> arr -> arr
val scalar_sub : elt -> arr -> arr
val scalar_mul : elt -> arr -> arr
val scalar_div : elt -> arr -> arr
val fma : arr -> arr -> arr -> arr
val conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val dilated_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val transpose_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val max_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val upsampling2d : arr -> int array -> arr
val conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val dilated_conv1d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv1d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val transpose_conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val max_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val upsampling2d_backward : arr -> int array -> arr -> arr
val row_num : arr -> int
val col_num : arr -> int
val row : arr -> int -> arr
val rows : arr -> int array -> arr
val copy_row_to : arr -> arr -> int -> unit
val copy_col_to : arr -> arr -> int -> unit
val dot : arr -> arr -> arr
val diag : ?k:int -> arr -> arr
val trace : arr -> elt
val transpose : ?axis:int array -> arr -> arr
val to_rows : arr -> arr array
val of_rows : arr array -> arr
val to_cols : arr -> arr array
val of_cols : arr array -> arr
val of_array : elt array -> int array -> arr
val of_arrays : elt array array -> arr
val float_to_elt : float -> elt
val elt_to_float : elt -> float
val elt_equal : arr -> arr -> arr
val elt_not_equal : arr -> arr -> arr
val elt_less : arr -> arr -> arr
val elt_greater : arr -> arr -> arr
val elt_less_equal : arr -> arr -> arr
val elt_greater_equal : arr -> arr -> arr
val elt_equal_scalar : arr -> elt -> arr
val elt_not_equal_scalar : arr -> elt -> arr
val elt_less_scalar : arr -> elt -> arr
val elt_greater_scalar : arr -> elt -> arr
val elt_less_equal_scalar : arr -> elt -> arr
val elt_greater_equal_scalar : arr -> elt -> arr
module Scalar : sig ... end
module Mat : sig ... end
module Linalg : sig ... end
\ No newline at end of file diff --git a/owl/Owl_regression/D/Optimise/Algodiff/Arr/index.html b/owl/Owl_regression/D/Optimise/Algodiff/Arr/index.html deleted file mode 100644 index b7f2a6ebd..000000000 --- a/owl/Owl_regression/D/Optimise/Algodiff/Arr/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Arr (owl.Owl_regression.D.Optimise.Algodiff.Arr)

Module Algodiff.Arr

val empty : int array -> t
val zeros : int array -> t
val ones : int array -> t
val uniform : ?a:A.elt -> ?b:A.elt -> int array -> t
val gaussian : ?mu:A.elt -> ?sigma:A.elt -> int array -> t
val shape : t -> int array
val numel : t -> int
val reset : t -> unit
val reshape : t -> int array -> t
val add : t -> t -> t
val sub : t -> t -> t
val mul : t -> t -> t
val div : t -> t -> t
val dot : t -> t -> t
\ No newline at end of file diff --git a/owl/Owl_regression/D/Optimise/Algodiff/Builder/index.html b/owl/Owl_regression/D/Optimise/Algodiff/Builder/index.html deleted file mode 100644 index 2d64d529d..000000000 --- a/owl/Owl_regression/D/Optimise/Algodiff/Builder/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Builder (owl.Owl_regression.D.Optimise.Algodiff.Builder)

Module Algodiff.Builder

module type Siso = sig ... end
val build_siso : (module Siso) -> t -> t
module type Sipo = sig ... end
val build_sipo : (module Sipo) -> t -> t * t
module type Sito = sig ... end
val build_sito : (module Sito) -> t -> t * t * t
module type Siao = sig ... end
val build_siao : (module Siao) -> t -> t array
module type Piso = sig ... end
val build_piso : (module Piso) -> t -> t -> t
module type Aiso = sig ... end
val build_aiso : (module Aiso) -> t array -> t
\ No newline at end of file diff --git a/owl/Owl_regression/D/Optimise/Algodiff/Builder/module-type-Aiso/index.html b/owl/Owl_regression/D/Optimise/Algodiff/Builder/module-type-Aiso/index.html deleted file mode 100644 index 040ccbecd..000000000 --- a/owl/Owl_regression/D/Optimise/Algodiff/Builder/module-type-Aiso/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Aiso (owl.Owl_regression.D.Optimise.Algodiff.Builder.Aiso)

Module type Builder.Aiso

val label : string
val ff : t array -> t
val df : int list -> t -> t array -> t array -> t
val dr : int list -> t array -> t -> t Stdlib.ref -> t list
\ No newline at end of file diff --git a/owl/Owl_regression/D/Optimise/Algodiff/Builder/module-type-Piso/index.html b/owl/Owl_regression/D/Optimise/Algodiff/Builder/module-type-Piso/index.html deleted file mode 100644 index 0b5567aca..000000000 --- a/owl/Owl_regression/D/Optimise/Algodiff/Builder/module-type-Piso/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Piso (owl.Owl_regression.D.Optimise.Algodiff.Builder.Piso)

Module type Builder.Piso

val label : string
val ff_aa : A.elt -> A.elt -> t
val ff_ab : A.elt -> A.arr -> t
val ff_ba : A.arr -> A.elt -> t
val ff_bb : A.arr -> A.arr -> t
val df_da : t -> t -> t -> t -> t
val df_db : t -> t -> t -> t -> t
val df_dab : t -> t -> t -> t -> t -> t
val dr_ab : t -> t -> t -> t Stdlib.ref -> t * t
val dr_a : t -> t -> t -> t Stdlib.ref -> t
val dr_b : t -> t -> t -> t Stdlib.ref -> t
\ No newline at end of file diff --git a/owl/Owl_regression/D/Optimise/Algodiff/Builder/module-type-Siao/index.html b/owl/Owl_regression/D/Optimise/Algodiff/Builder/module-type-Siao/index.html deleted file mode 100644 index f5d51217e..000000000 --- a/owl/Owl_regression/D/Optimise/Algodiff/Builder/module-type-Siao/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Siao (owl.Owl_regression.D.Optimise.Algodiff.Builder.Siao)

Module type Builder.Siao

val label : string
val ff_f : A.elt -> t array
val ff_arr : A.arr -> t array
val df : t array -> t -> t -> t array
val dr : t -> t -> t Stdlib.ref array -> t Stdlib.ref array -> t
\ No newline at end of file diff --git a/owl/Owl_regression/D/Optimise/Algodiff/Builder/module-type-Sipo/index.html b/owl/Owl_regression/D/Optimise/Algodiff/Builder/module-type-Sipo/index.html deleted file mode 100644 index 1125d0f5c..000000000 --- a/owl/Owl_regression/D/Optimise/Algodiff/Builder/module-type-Sipo/index.html +++ /dev/null @@ -1,7 +0,0 @@ - -Sipo (owl.Owl_regression.D.Optimise.Algodiff.Builder.Sipo)

Module type Builder.Sipo

val label : string
val ff_f : A.elt -> t * t
val ff_arr : A.arr -> t * t
val df : t -> t -> t -> t
val dr : - t -> - t -> - (t Stdlib.ref * t Stdlib.ref) -> - (t Stdlib.ref * t Stdlib.ref) -> - t
\ No newline at end of file diff --git a/owl/Owl_regression/D/Optimise/Algodiff/Builder/module-type-Siso/index.html b/owl/Owl_regression/D/Optimise/Algodiff/Builder/module-type-Siso/index.html deleted file mode 100644 index dc97480ac..000000000 --- a/owl/Owl_regression/D/Optimise/Algodiff/Builder/module-type-Siso/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Siso (owl.Owl_regression.D.Optimise.Algodiff.Builder.Siso)

Module type Builder.Siso

val label : string
val ff_f : A.elt -> t
val ff_arr : A.arr -> t
val df : t -> t -> t -> t
val dr : t -> t -> t Stdlib.ref -> t
\ No newline at end of file diff --git a/owl/Owl_regression/D/Optimise/Algodiff/Builder/module-type-Sito/index.html b/owl/Owl_regression/D/Optimise/Algodiff/Builder/module-type-Sito/index.html deleted file mode 100644 index 67e446a55..000000000 --- a/owl/Owl_regression/D/Optimise/Algodiff/Builder/module-type-Sito/index.html +++ /dev/null @@ -1,7 +0,0 @@ - -Sito (owl.Owl_regression.D.Optimise.Algodiff.Builder.Sito)

Module type Builder.Sito

val label : string
val ff_f : A.elt -> t * t * t
val ff_arr : A.arr -> t * t * t
val df : t -> t -> t -> t
val dr : - t -> - t -> - (t Stdlib.ref * t Stdlib.ref * t Stdlib.ref) -> - (t Stdlib.ref * t Stdlib.ref * t Stdlib.ref) -> - t
\ No newline at end of file diff --git a/owl/Owl_regression/D/Optimise/Algodiff/Linalg/index.html b/owl/Owl_regression/D/Optimise/Algodiff/Linalg/index.html deleted file mode 100644 index 59b3f35a1..000000000 --- a/owl/Owl_regression/D/Optimise/Algodiff/Linalg/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -Linalg (owl.Owl_regression.D.Optimise.Algodiff.Linalg)

Module Algodiff.Linalg

val inv : t -> t
val logdet : t -> t
val chol : ?upper:bool -> t -> t
val qr : t -> t * t
val lq : t -> t * t
val svd : ?thin:bool -> t -> t * t * t
val sylvester : t -> t -> t -> t
val lyapunov : t -> t -> t
val discrete_lyapunov : - ?solver:[ `bilinear | `default | `direct ] -> - t -> - t -> - t
val (/@) : t -> t -> t
val linsolve : ?trans:bool -> ?typ:[ `l | `n | `u ] -> t -> t -> t
val care : ?diag_r:bool -> t -> t -> t -> t -> t
val dare : ?diag_r:bool -> t -> t -> t -> t -> t
\ No newline at end of file diff --git a/owl/Owl_regression/D/Optimise/Algodiff/Mat/index.html b/owl/Owl_regression/D/Optimise/Algodiff/Mat/index.html deleted file mode 100644 index 5a2d633be..000000000 --- a/owl/Owl_regression/D/Optimise/Algodiff/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl.Owl_regression.D.Optimise.Algodiff.Mat)

Module Algodiff.Mat

val empty : int -> int -> t
val zeros : int -> int -> t
val eye : int -> t
val ones : int -> int -> t
val uniform : ?a:A.elt -> ?b:A.elt -> int -> int -> t
val gaussian : ?mu:A.elt -> ?sigma:A.elt -> int -> int -> t
val shape : t -> int * int
val numel : t -> int
val row_num : t -> int
val col_num : t -> int
val reset : t -> unit
val reshape : int -> int -> t -> t
val get : t -> int -> int -> t
val set : t -> int -> int -> t -> t
val row : t -> int -> t
val mean : t -> t
val add : t -> t -> t
val sub : t -> t -> t
val mul : t -> t -> t
val div : t -> t -> t
val dot : t -> t -> t
val map_by_row : (t -> t) -> t -> t
val of_arrays : A.elt array array -> t
val init_2d : int -> int -> (int -> int -> t) -> t
val print : t -> unit
\ No newline at end of file diff --git a/owl/Owl_regression/D/Optimise/Algodiff/Maths/index.html b/owl/Owl_regression/D/Optimise/Algodiff/Maths/index.html deleted file mode 100644 index 210f10f81..000000000 --- a/owl/Owl_regression/D/Optimise/Algodiff/Maths/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Maths (owl.Owl_regression.D.Optimise.Algodiff.Maths)

Module Algodiff.Maths

val (+) : t -> t -> t
val (-) : t -> t -> t
val (*) : t -> t -> t
val (/) : t -> t -> t
val (*@) : t -> t -> t
val (**) : t -> t -> t
val add : t -> t -> t
val sub : t -> t -> t
val mul : t -> t -> t
val div : t -> t -> t
val kron : t -> t -> t
val dot : t -> t -> t
val pow : t -> t -> t
val atan2 : t -> t -> t
val min2 : t -> t -> t
val max2 : t -> t -> t
val cross_entropy : t -> t -> t
val inv : t -> t
val neg : t -> t
val abs : t -> t
val signum : t -> t
val floor : t -> t
val ceil : t -> t
val round : t -> t
val sqr : t -> t
val sqrt : t -> t
val log : t -> t
val log2 : t -> t
val log10 : t -> t
val exp : t -> t
val sin : t -> t
val cos : t -> t
val tan : t -> t
val sinh : t -> t
val cosh : t -> t
val tanh : t -> t
val asin : t -> t
val acos : t -> t
val atan : t -> t
val asinh : t -> t
val acosh : t -> t
val atanh : t -> t
val sum' : t -> t
val log_sum_exp' : t -> t
val log_sum_exp : ?axis:int -> ?keep_dims:bool -> t -> t
val sum : ?axis:int -> ?keep_dims:bool -> t -> t
val sum_reduce : ?axis:int array -> t -> t
val mean : t -> t
val transpose : ?axis:int array -> t -> t
val swap : int -> int -> t -> t
val l1norm' : t -> t
val l2norm' : t -> t
val l2norm_sqr' : t -> t
val sigmoid : t -> t
val relu : t -> t
val dawsn : t -> t
val softplus : t -> t
val softsign : t -> t
val softmax : ?axis:int -> t -> t
val reshape : t -> int array -> t
val flatten : t -> t
val get_item : t -> int -> int -> t
val get_row : t -> int -> t
val concat : axis:int -> t -> t -> t
val split : axis:int -> int array -> t -> t array
val of_arrays : t array array -> t
val to_arrays : t -> t array array
val concatenate : axis:int -> t array -> t
val stack : axis:int -> t array -> t
val get_slice : int list list -> t -> t
val set_slice : int list list -> t -> t -> t
val get_fancy : Owl_types.index list -> t -> t
val set_fancy : Owl_types.index list -> t -> t -> t
val diag : ?k:int -> t -> t
val diagm : ?k:int -> t -> t
val trace : t -> t
val triu : ?k:int -> t -> t
val tril : ?k:int -> t -> t
\ No newline at end of file diff --git a/owl/Owl_regression/D/Optimise/Algodiff/NN/index.html b/owl/Owl_regression/D/Optimise/Algodiff/NN/index.html deleted file mode 100644 index 9024f59e0..000000000 --- a/owl/Owl_regression/D/Optimise/Algodiff/NN/index.html +++ /dev/null @@ -1,20 +0,0 @@ - -NN (owl.Owl_regression.D.Optimise.Algodiff.NN)

Module Algodiff.NN

val dropout : ?rate:float -> t -> t
val conv1d : ?padding:Owl_types.padding -> t -> t -> int array -> t
val conv2d : ?padding:Owl_types.padding -> t -> t -> int array -> t
val conv3d : ?padding:Owl_types.padding -> t -> t -> int array -> t
val dilated_conv1d : - ?padding:Owl_types.padding -> - t -> - t -> - int array -> - int array -> - t
val dilated_conv2d : - ?padding:Owl_types.padding -> - t -> - t -> - int array -> - int array -> - t
val dilated_conv3d : - ?padding:Owl_types.padding -> - t -> - t -> - int array -> - int array -> - t
val transpose_conv1d : ?padding:Owl_types.padding -> t -> t -> int array -> t
val transpose_conv2d : ?padding:Owl_types.padding -> t -> t -> int array -> t
val transpose_conv3d : ?padding:Owl_types.padding -> t -> t -> int array -> t
val max_pool1d : Owl_types.padding -> t -> int array -> int array -> t
val max_pool2d : Owl_types.padding -> t -> int array -> int array -> t
val max_pool3d : Owl_types.padding -> t -> int array -> int array -> t
val avg_pool1d : Owl_types.padding -> t -> int array -> int array -> t
val avg_pool2d : Owl_types.padding -> t -> int array -> int array -> t
val avg_pool3d : Owl_types.padding -> t -> int array -> int array -> t
val upsampling2d : t -> int array -> t
val pad : ?v:A.elt -> int list list -> t -> t
\ No newline at end of file diff --git a/owl/Owl_regression/D/Optimise/Algodiff/index.html b/owl/Owl_regression/D/Optimise/Algodiff/index.html deleted file mode 100644 index 3901d63c4..000000000 --- a/owl/Owl_regression/D/Optimise/Algodiff/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Algodiff (owl.Owl_regression.D.Optimise.Algodiff)

Module Optimise.Algodiff

module A : sig ... end
type t = - Owl_optimise_generic.Make(Owl_algodiff_generic.Make(Owl_algodiff_primal_ops.D)).Algodiff.t = -
  1. | F of A.elt
  2. | Arr of A.arr
  3. | DF of t * t * int
  4. | DR of t * t Stdlib.ref * op * int Stdlib.ref * int * int Stdlib.ref
and adjoint = t -> t Stdlib.ref -> (t * t) list -> (t * t) list
and register = t list -> t list
and label = string * t list
and op = adjoint * register * label
val tag : unit -> int
val primal : t -> t
val primal' : t -> t
val zero : t -> t
val reset_zero : t -> t
val tangent : t -> t
val adjref : t -> t Stdlib.ref
val adjval : t -> t
val shape : t -> int array
val is_float : t -> bool
val is_arr : t -> bool
val row_num : t -> int
val col_num : t -> int
val numel : t -> int
val clip_by_value : amin:A.elt -> amax:A.elt -> t -> t
val clip_by_l2norm : A.elt -> t -> t
val copy_primal' : t -> t
val tile : t -> int array -> t
val repeat : t -> int array -> t
val pack_elt : A.elt -> t
val unpack_elt : t -> A.elt
val pack_flt : float -> t
val _f : float -> t
val unpack_flt : t -> float
val pack_arr : A.arr -> t
val unpack_arr : t -> A.arr
val deep_info : t -> string
val type_info : t -> string
val error_binop : string -> t -> t -> 'a
val error_uniop : string -> t -> 'a
val make_forward : t -> t -> int -> t
val make_reverse : t -> int -> t
val reverse_prop : t -> t -> unit
val diff : (t -> t) -> t -> t
val diff' : (t -> t) -> t -> t * t
val grad : (t -> t) -> t -> t
val grad' : (t -> t) -> t -> t * t
val jacobian : (t -> t) -> t -> t
val jacobian' : (t -> t) -> t -> t * t
val jacobianv : (t -> t) -> t -> t -> t
val jacobianv' : (t -> t) -> t -> t -> t * t
val jacobianTv : (t -> t) -> t -> t -> t
val jacobianTv' : (t -> t) -> t -> t -> t * t
val hessian : (t -> t) -> t -> t
val hessian' : (t -> t) -> t -> t * t
val hessianv : (t -> t) -> t -> t -> t
val hessianv' : (t -> t) -> t -> t -> t * t
val laplacian : (t -> t) -> t -> t
val laplacian' : (t -> t) -> t -> t * t
val gradhessian : (t -> t) -> t -> t * t
val gradhessian' : (t -> t) -> t -> t * t * t
val gradhessianv : (t -> t) -> t -> t -> t * t
val gradhessianv' : (t -> t) -> t -> t -> t * t * t
module Builder : sig ... end
module Maths : sig ... end
module Linalg : sig ... end
module NN : sig ... end
module Mat : sig ... end
module Arr : sig ... end
val to_trace : t list -> string
val to_dot : t list -> string
val pp_num : Stdlib.Format.formatter -> t -> unit
\ No newline at end of file diff --git a/owl/Owl_regression/D/Optimise/Batch/index.html b/owl/Owl_regression/D/Optimise/Batch/index.html deleted file mode 100644 index d0a2d13ea..000000000 --- a/owl/Owl_regression/D/Optimise/Batch/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Batch (owl.Owl_regression.D.Optimise.Batch)

Module Optimise.Batch

type typ = - Owl_optimise_generic.Make(Owl_algodiff_generic.Make(Owl_algodiff_primal_ops.D)).Batch.typ = -
  1. | Full
  2. | Mini of int
  3. | Sample of int
  4. | Stochastic
val run : typ -> Algodiff.t -> Algodiff.t -> int -> Algodiff.t * Algodiff.t
val batches : typ -> Algodiff.t -> int
val to_string : typ -> string
\ No newline at end of file diff --git a/owl/Owl_regression/D/Optimise/Checkpoint/index.html b/owl/Owl_regression/D/Optimise/Checkpoint/index.html deleted file mode 100644 index a01f6612a..000000000 --- a/owl/Owl_regression/D/Optimise/Checkpoint/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -Checkpoint (owl.Owl_regression.D.Optimise.Checkpoint)

Module Optimise.Checkpoint

type state = - Owl_optimise_generic.Make(Owl_algodiff_generic.Make(Owl_algodiff_primal_ops.D)).Checkpoint.state = - {
  1. mutable current_batch : int;
  2. mutable batches_per_epoch : int;
  3. mutable epochs : float;
  4. mutable batches : int;
  5. mutable loss : Algodiff.t array;
  6. mutable start_at : float;
  7. mutable stop : bool;
  8. mutable gs : Algodiff.t array array;
  9. mutable ps : Algodiff.t array array;
  10. mutable us : Algodiff.t array array;
  11. mutable ch : Algodiff.t array array array;
}
type typ = - Owl_optimise_generic.Make(Owl_algodiff_generic.Make(Owl_algodiff_primal_ops.D)).Checkpoint.typ = -
  1. | Batch of int
  2. | Epoch of float
  3. | Custom of state -> unit
  4. | None
val init_state : int -> float -> state
val default_checkpoint_fun : (string -> 'a) -> 'a
val print_state_info : state -> unit
val print_summary : state -> unit
val run : typ -> (string -> unit) -> int -> Algodiff.t -> state -> unit
val to_string : typ -> string
\ No newline at end of file diff --git a/owl/Owl_regression/D/Optimise/Clipping/index.html b/owl/Owl_regression/D/Optimise/Clipping/index.html deleted file mode 100644 index e98d8b9e9..000000000 --- a/owl/Owl_regression/D/Optimise/Clipping/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Clipping (owl.Owl_regression.D.Optimise.Clipping)

Module Optimise.Clipping

type typ = - Owl_optimise_generic.Make(Owl_algodiff_generic.Make(Owl_algodiff_primal_ops.D)).Clipping.typ = -
  1. | L2norm of float
  2. | Value of float * float
  3. | None
val run : typ -> Algodiff.t -> Algodiff.t
val default : typ -> typ
val to_string : typ -> string
\ No newline at end of file diff --git a/owl/Owl_regression/D/Optimise/Gradient/index.html b/owl/Owl_regression/D/Optimise/Gradient/index.html deleted file mode 100644 index 3448b9dd9..000000000 --- a/owl/Owl_regression/D/Optimise/Gradient/index.html +++ /dev/null @@ -1,11 +0,0 @@ - -Gradient (owl.Owl_regression.D.Optimise.Gradient)

Module Optimise.Gradient

type typ = - Owl_optimise_generic.Make(Owl_algodiff_generic.Make(Owl_algodiff_primal_ops.D)).Gradient.typ = -
  1. | GD
  2. | CG
  3. | CD
  4. | NonlinearCG
  5. | DaiYuanCG
  6. | NewtonCG
  7. | Newton
val run : - typ -> - (Algodiff.t -> Algodiff.t) -> - Algodiff.t -> - Algodiff.t -> - Algodiff.t -> - Algodiff.t -> - Algodiff.t
val to_string : typ -> string
\ No newline at end of file diff --git a/owl/Owl_regression/D/Optimise/Learning_Rate/index.html b/owl/Owl_regression/D/Optimise/Learning_Rate/index.html deleted file mode 100644 index 2baf2abbb..000000000 --- a/owl/Owl_regression/D/Optimise/Learning_Rate/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Learning_Rate (owl.Owl_regression.D.Optimise.Learning_Rate)

Module Optimise.Learning_Rate

type typ = - Owl_optimise_generic.Make(Owl_algodiff_generic.Make(Owl_algodiff_primal_ops.D)).Learning_Rate.typ = -
  1. | Adagrad of float
  2. | Const of float
  3. | Decay of float * float
  4. | Exp_decay of float * float
  5. | RMSprop of float * float
  6. | Adam of float * float * float
  7. | Schedule of float array
val run : typ -> int -> Algodiff.t -> Algodiff.t array -> Algodiff.t
val default : typ -> typ
val update_ch : typ -> Algodiff.t -> Algodiff.t array -> Algodiff.t array
val to_string : typ -> string
\ No newline at end of file diff --git a/owl/Owl_regression/D/Optimise/Loss/index.html b/owl/Owl_regression/D/Optimise/Loss/index.html deleted file mode 100644 index 445d1fea6..000000000 --- a/owl/Owl_regression/D/Optimise/Loss/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Loss (owl.Owl_regression.D.Optimise.Loss)

Module Optimise.Loss

type typ = - Owl_optimise_generic.Make(Owl_algodiff_generic.Make(Owl_algodiff_primal_ops.D)).Loss.typ = -
  1. | Hinge
  2. | L1norm
  3. | L2norm
  4. | Quadratic
  5. | Cross_entropy
  6. | Custom of Algodiff.t -> Algodiff.t -> Algodiff.t
val run : typ -> Algodiff.t -> Algodiff.t -> Algodiff.t
val to_string : typ -> string
\ No newline at end of file diff --git a/owl/Owl_regression/D/Optimise/Momentum/index.html b/owl/Owl_regression/D/Optimise/Momentum/index.html deleted file mode 100644 index a7b545fe0..000000000 --- a/owl/Owl_regression/D/Optimise/Momentum/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Momentum (owl.Owl_regression.D.Optimise.Momentum)

Module Optimise.Momentum

type typ = - Owl_optimise_generic.Make(Owl_algodiff_generic.Make(Owl_algodiff_primal_ops.D)).Momentum.typ = -
  1. | Standard of float
  2. | Nesterov of float
  3. | None
val run : typ -> Algodiff.t -> Algodiff.t -> Algodiff.t
val default : typ -> typ
val to_string : typ -> string
\ No newline at end of file diff --git a/owl/Owl_regression/D/Optimise/Params/index.html b/owl/Owl_regression/D/Optimise/Params/index.html deleted file mode 100644 index 34a19c301..000000000 --- a/owl/Owl_regression/D/Optimise/Params/index.html +++ /dev/null @@ -1,16 +0,0 @@ - -Params (owl.Owl_regression.D.Optimise.Params)

Module Optimise.Params

type typ = - Owl_optimise_generic.Make(Owl_algodiff_generic.Make(Owl_algodiff_primal_ops.D)).Params.typ = - {
  1. mutable epochs : float;
  2. mutable batch : Batch.typ;
  3. mutable gradient : Gradient.typ;
  4. mutable loss : Loss.typ;
  5. mutable learning_rate : Learning_Rate.typ;
  6. mutable regularisation : Regularisation.typ;
  7. mutable momentum : Momentum.typ;
  8. mutable clipping : Clipping.typ;
  9. mutable stopping : Stopping.typ;
  10. mutable checkpoint : Checkpoint.typ;
  11. mutable verbosity : bool;
}
val default : unit -> typ
val config : - ?batch:Batch.typ -> - ?gradient:Gradient.typ -> - ?loss:Loss.typ -> - ?learning_rate:Learning_Rate.typ -> - ?regularisation:Regularisation.typ -> - ?momentum:Momentum.typ -> - ?clipping:Clipping.typ -> - ?stopping:Stopping.typ -> - ?checkpoint:Checkpoint.typ -> - ?verbosity:bool -> - float -> - typ
val to_string : typ -> string
\ No newline at end of file diff --git a/owl/Owl_regression/D/Optimise/Regularisation/index.html b/owl/Owl_regression/D/Optimise/Regularisation/index.html deleted file mode 100644 index 0e509a6a9..000000000 --- a/owl/Owl_regression/D/Optimise/Regularisation/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Regularisation (owl.Owl_regression.D.Optimise.Regularisation)

Module Optimise.Regularisation

type typ = - Owl_optimise_generic.Make(Owl_algodiff_generic.Make(Owl_algodiff_primal_ops.D)).Regularisation.typ = -
  1. | L1norm of float
  2. | L2norm of float
  3. | Elastic_net of float * float
  4. | None
val run : typ -> Algodiff.t -> Algodiff.t
val to_string : typ -> string
\ No newline at end of file diff --git a/owl/Owl_regression/D/Optimise/Stopping/index.html b/owl/Owl_regression/D/Optimise/Stopping/index.html deleted file mode 100644 index 383d091e1..000000000 --- a/owl/Owl_regression/D/Optimise/Stopping/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Stopping (owl.Owl_regression.D.Optimise.Stopping)

Module Optimise.Stopping

val run : typ -> float -> bool
val default : typ -> typ
val to_string : typ -> string
\ No newline at end of file diff --git a/owl/Owl_regression/D/Optimise/Utils/index.html b/owl/Owl_regression/D/Optimise/Utils/index.html deleted file mode 100644 index b81d899ac..000000000 --- a/owl/Owl_regression/D/Optimise/Utils/index.html +++ /dev/null @@ -1,7 +0,0 @@ - -Utils (owl.Owl_regression.D.Optimise.Utils)

Module Optimise.Utils

val sample_num : Algodiff.t -> int
val draw_samples : Algodiff.t -> Algodiff.t -> int -> Algodiff.t * Algodiff.t
val get_chunk : - Algodiff.t -> - Algodiff.t -> - int -> - int -> - Algodiff.t * Algodiff.t
\ No newline at end of file diff --git a/owl/Owl_regression/D/Optimise/index.html b/owl/Owl_regression/D/Optimise/index.html deleted file mode 100644 index ee8456a13..000000000 --- a/owl/Owl_regression/D/Optimise/index.html +++ /dev/null @@ -1,31 +0,0 @@ - -Optimise (owl.Owl_regression.D.Optimise)

Module D.Optimise

module Algodiff : sig ... end
module Utils : sig ... end
module Learning_Rate : sig ... end
module Batch : sig ... end
module Loss : sig ... end
module Gradient : sig ... end
module Momentum : sig ... end
module Regularisation : sig ... end
module Clipping : sig ... end
module Stopping : sig ... end
module Checkpoint : sig ... end
module Params : sig ... end
val minimise_weight : - ?state:Checkpoint.state -> - Params.typ -> - (Algodiff.t -> Algodiff.t -> Algodiff.t) -> - Algodiff.t -> - Algodiff.t -> - Algodiff.t -> - Checkpoint.state * Algodiff.t
val minimise_network : - ?state:Checkpoint.state -> - Params.typ -> - (Algodiff.t -> Algodiff.t * Algodiff.t array array) -> - (Algodiff.t -> Algodiff.t array array * Algodiff.t array array) -> - (Algodiff.t array array -> unit) -> - (string -> unit) -> - Algodiff.t -> - Algodiff.t -> - Checkpoint.state
val minimise_fun : - ?state:Checkpoint.state -> - Params.typ -> - (Algodiff.t -> Algodiff.t) -> - Algodiff.t -> - Checkpoint.state * Algodiff.t
val minimise_compiled_network : - ?state:Checkpoint.state -> - Params.typ -> - (Algodiff.t -> Algodiff.t -> Algodiff.t) -> - (unit -> unit) -> - (string -> unit) -> - Algodiff.t -> - Algodiff.t -> - Checkpoint.state
\ No newline at end of file diff --git a/owl/Owl_regression/D/index.html b/owl/Owl_regression/D/index.html deleted file mode 100644 index 39f4a5daa..000000000 --- a/owl/Owl_regression/D/index.html +++ /dev/null @@ -1,44 +0,0 @@ - -D (owl.Owl_regression.D)

Module Owl_regression.D

module Optimise : sig ... end
val ridge : - ?i:bool -> - ?alpha:float -> - Optimise.Algodiff.A.arr -> - Optimise.Algodiff.A.arr -> - Optimise.Algodiff.A.arr array
val lasso : - ?i:bool -> - ?alpha:float -> - Optimise.Algodiff.A.arr -> - Optimise.Algodiff.A.arr -> - Optimise.Algodiff.A.arr array
val elastic_net : - ?i:bool -> - ?alpha:float -> - ?l1_ratio:float -> - Optimise.Algodiff.A.arr -> - Optimise.Algodiff.A.arr -> - Optimise.Algodiff.A.arr array
val svm : - ?i:bool -> - ?a:float -> - Optimise.Algodiff.A.arr -> - Optimise.Algodiff.A.arr -> - Optimise.Algodiff.A.arr array
val logistic : - ?i:bool -> - Optimise.Algodiff.A.arr -> - Optimise.Algodiff.A.arr -> - Optimise.Algodiff.A.arr array
\ No newline at end of file diff --git a/owl/Owl_regression/Make_Embedded/Optimise/Algodiff/A/Linalg/index.html b/owl/Owl_regression/Make_Embedded/Optimise/Algodiff/A/Linalg/index.html deleted file mode 100644 index 8d0d5ecbd..000000000 --- a/owl/Owl_regression/Make_Embedded/Optimise/Algodiff/A/Linalg/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -Linalg (owl.Owl_regression.Make_Embedded.Optimise.Algodiff.A.Linalg)

Module A.Linalg

val inv : arr -> arr
val logdet : arr -> elt
val chol : ?upper:bool -> arr -> arr
val svd : ?thin:bool -> arr -> arr * arr * arr
val qr : arr -> arr * arr
val lq : arr -> arr * arr
val sylvester : arr -> arr -> arr -> arr
val lyapunov : arr -> arr -> arr
val discrete_lyapunov : - ?solver:[ `bilinear | `default | `direct ] -> - arr -> - arr -> - arr
val linsolve : ?trans:bool -> ?typ:[ `l | `n | `u ] -> arr -> arr -> arr
val care : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
val dare : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
\ No newline at end of file diff --git a/owl/Owl_regression/Make_Embedded/Optimise/Algodiff/A/Mat/index.html b/owl/Owl_regression/Make_Embedded/Optimise/Algodiff/A/Mat/index.html deleted file mode 100644 index b44306f37..000000000 --- a/owl/Owl_regression/Make_Embedded/Optimise/Algodiff/A/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl.Owl_regression.Make_Embedded.Optimise.Algodiff.A.Mat)

Module A.Mat

val diagm : ?k:int -> arr -> arr
val triu : ?k:int -> arr -> arr
val tril : ?k:int -> arr -> arr
val eye : int -> arr
\ No newline at end of file diff --git a/owl/Owl_regression/Make_Embedded/Optimise/Algodiff/A/Scalar/index.html b/owl/Owl_regression/Make_Embedded/Optimise/Algodiff/A/Scalar/index.html deleted file mode 100644 index a828e2750..000000000 --- a/owl/Owl_regression/Make_Embedded/Optimise/Algodiff/A/Scalar/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Scalar (owl.Owl_regression.Make_Embedded.Optimise.Algodiff.A.Scalar)

Module A.Scalar

val add : elt -> elt -> elt
val sub : elt -> elt -> elt
val mul : elt -> elt -> elt
val div : elt -> elt -> elt
val pow : elt -> elt -> elt
val atan2 : elt -> elt -> elt
val abs : elt -> elt
val neg : elt -> elt
val sqr : elt -> elt
val sqrt : elt -> elt
val exp : elt -> elt
val log : elt -> elt
val log2 : elt -> elt
val log10 : elt -> elt
val signum : elt -> elt
val floor : elt -> elt
val ceil : elt -> elt
val round : elt -> elt
val sin : elt -> elt
val cos : elt -> elt
val tan : elt -> elt
val sinh : elt -> elt
val cosh : elt -> elt
val tanh : elt -> elt
val asin : elt -> elt
val acos : elt -> elt
val atan : elt -> elt
val asinh : elt -> elt
val acosh : elt -> elt
val atanh : elt -> elt
val relu : elt -> elt
val dawsn : elt -> elt
val sigmoid : elt -> elt
\ No newline at end of file diff --git a/owl/Owl_regression/Make_Embedded/Optimise/Algodiff/A/index.html b/owl/Owl_regression/Make_Embedded/Optimise/Algodiff/A/index.html deleted file mode 100644 index dce9eb9e8..000000000 --- a/owl/Owl_regression/Make_Embedded/Optimise/Algodiff/A/index.html +++ /dev/null @@ -1,160 +0,0 @@ - -A (owl.Owl_regression.Make_Embedded.Optimise.Algodiff.A)

Module Algodiff.A

val empty : int array -> arr
val zeros : int array -> arr
val ones : int array -> arr
val create : int array -> elt -> arr
val sequential : ?a:elt -> ?step:elt -> int array -> arr
val uniform : ?a:elt -> ?b:elt -> int array -> arr
val gaussian : ?mu:elt -> ?sigma:elt -> int array -> arr
val bernoulli : ?p:elt -> int array -> arr
val init : int array -> (int -> elt) -> arr
val init_nd : int array -> (int array -> elt) -> arr
val shape : arr -> int array
val numel : arr -> int
val get : arr -> int array -> elt
val set : arr -> int array -> elt -> unit
val get_slice : int list list -> arr -> arr
val set_slice : int list list -> arr -> arr -> unit
val get_fancy : Owl_types_common.index list -> arr -> arr
val set_fancy : Owl_types_common.index list -> arr -> arr -> unit
val copy : arr -> arr
val copy_ : out:arr -> arr -> unit
val reset : arr -> unit
val reshape : arr -> int array -> arr
val reverse : arr -> arr
val tile : arr -> int array -> arr
val repeat : arr -> int array -> arr
val concatenate : ?axis:int -> arr array -> arr
val stack : ?axis:int -> arr array -> arr
val split : ?axis:int -> int array -> arr -> arr array
val expand : ?hi:bool -> arr -> int -> arr
val squeeze : ?axis:int array -> arr -> arr
val draw : ?axis:int -> arr -> int -> arr * int array
val map : (elt -> elt) -> arr -> arr
val fold : ?axis:int -> (elt -> elt -> elt) -> elt -> arr -> arr
val scan : ?axis:int -> (elt -> elt -> elt) -> arr -> arr
val one_hot : int -> arr -> arr
val pad : ?v:elt -> int list list -> arr -> arr
val print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(elt -> string) -> - arr -> - unit
val abs : arr -> arr
val neg : arr -> arr
val floor : arr -> arr
val ceil : arr -> arr
val round : arr -> arr
val sqr : arr -> arr
val sqrt : arr -> arr
val log : arr -> arr
val log2 : arr -> arr
val log10 : arr -> arr
val exp : arr -> arr
val sin : arr -> arr
val cos : arr -> arr
val tan : arr -> arr
val sinh : arr -> arr
val cosh : arr -> arr
val tanh : arr -> arr
val asin : arr -> arr
val acos : arr -> arr
val atan : arr -> arr
val asinh : arr -> arr
val acosh : arr -> arr
val atanh : arr -> arr
val min : ?axis:int -> ?keep_dims:bool -> arr -> arr
val max : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum_reduce : ?axis:int array -> arr -> arr
val signum : arr -> arr
val sigmoid : arr -> arr
val relu : arr -> arr
val dawsn : arr -> arr
val min' : arr -> elt
val max' : arr -> elt
val sum' : arr -> elt
val log_sum_exp' : arr -> elt
val log_sum_exp : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm' : arr -> elt
val l2norm' : arr -> elt
val l2norm_sqr' : arr -> elt
val clip_by_value : ?amin:elt -> ?amax:elt -> arr -> arr
val clip_by_l2norm : elt -> arr -> arr
val pow : arr -> arr -> arr
val scalar_pow : elt -> arr -> arr
val pow_scalar : arr -> elt -> arr
val atan2 : arr -> arr -> arr
val scalar_atan2 : elt -> arr -> arr
val atan2_scalar : arr -> elt -> arr
val add : arr -> arr -> arr
val sub : arr -> arr -> arr
val mul : arr -> arr -> arr
val div : arr -> arr -> arr
val add_scalar : arr -> elt -> arr
val sub_scalar : arr -> elt -> arr
val mul_scalar : arr -> elt -> arr
val div_scalar : arr -> elt -> arr
val scalar_add : elt -> arr -> arr
val scalar_sub : elt -> arr -> arr
val scalar_mul : elt -> arr -> arr
val scalar_div : elt -> arr -> arr
val fma : arr -> arr -> arr -> arr
val conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val dilated_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val transpose_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val max_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val upsampling2d : arr -> int array -> arr
val conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val dilated_conv1d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv1d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val transpose_conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val max_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val upsampling2d_backward : arr -> int array -> arr -> arr
val row_num : arr -> int
val col_num : arr -> int
val row : arr -> int -> arr
val rows : arr -> int array -> arr
val copy_row_to : arr -> arr -> int -> unit
val copy_col_to : arr -> arr -> int -> unit
val dot : arr -> arr -> arr
val diag : ?k:int -> arr -> arr
val trace : arr -> elt
val transpose : ?axis:int array -> arr -> arr
val to_rows : arr -> arr array
val of_rows : arr array -> arr
val to_cols : arr -> arr array
val of_cols : arr array -> arr
val of_array : elt array -> int array -> arr
val of_arrays : elt array array -> arr
val float_to_elt : float -> elt
val elt_to_float : elt -> float
val elt_equal : arr -> arr -> arr
val elt_not_equal : arr -> arr -> arr
val elt_less : arr -> arr -> arr
val elt_greater : arr -> arr -> arr
val elt_less_equal : arr -> arr -> arr
val elt_greater_equal : arr -> arr -> arr
val elt_equal_scalar : arr -> elt -> arr
val elt_not_equal_scalar : arr -> elt -> arr
val elt_less_scalar : arr -> elt -> arr
val elt_greater_scalar : arr -> elt -> arr
val elt_less_equal_scalar : arr -> elt -> arr
val elt_greater_equal_scalar : arr -> elt -> arr
module Scalar : sig ... end
module Mat : sig ... end
module Linalg : sig ... end
\ No newline at end of file diff --git a/owl/Owl_regression/Make_Embedded/Optimise/Algodiff/Arr/index.html b/owl/Owl_regression/Make_Embedded/Optimise/Algodiff/Arr/index.html deleted file mode 100644 index 2372a0f2c..000000000 --- a/owl/Owl_regression/Make_Embedded/Optimise/Algodiff/Arr/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Arr (owl.Owl_regression.Make_Embedded.Optimise.Algodiff.Arr)

Module Algodiff.Arr

val empty : int array -> t
val zeros : int array -> t
val ones : int array -> t
val uniform : ?a:A.elt -> ?b:A.elt -> int array -> t
val gaussian : ?mu:A.elt -> ?sigma:A.elt -> int array -> t
val shape : t -> int array
val numel : t -> int
val reset : t -> unit
val reshape : t -> int array -> t
val add : t -> t -> t
val sub : t -> t -> t
val mul : t -> t -> t
val div : t -> t -> t
val dot : t -> t -> t
\ No newline at end of file diff --git a/owl/Owl_regression/Make_Embedded/Optimise/Algodiff/Builder/index.html b/owl/Owl_regression/Make_Embedded/Optimise/Algodiff/Builder/index.html deleted file mode 100644 index d6ab790c3..000000000 --- a/owl/Owl_regression/Make_Embedded/Optimise/Algodiff/Builder/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Builder (owl.Owl_regression.Make_Embedded.Optimise.Algodiff.Builder)

Module Algodiff.Builder

module type Siso = sig ... end
val build_siso : (module Siso) -> t -> t
module type Sipo = sig ... end
val build_sipo : (module Sipo) -> t -> t * t
module type Sito = sig ... end
val build_sito : (module Sito) -> t -> t * t * t
module type Siao = sig ... end
val build_siao : (module Siao) -> t -> t array
module type Piso = sig ... end
val build_piso : (module Piso) -> t -> t -> t
module type Aiso = sig ... end
val build_aiso : (module Aiso) -> t array -> t
\ No newline at end of file diff --git a/owl/Owl_regression/Make_Embedded/Optimise/Algodiff/Builder/module-type-Aiso/index.html b/owl/Owl_regression/Make_Embedded/Optimise/Algodiff/Builder/module-type-Aiso/index.html deleted file mode 100644 index 5f171bb0f..000000000 --- a/owl/Owl_regression/Make_Embedded/Optimise/Algodiff/Builder/module-type-Aiso/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Aiso (owl.Owl_regression.Make_Embedded.Optimise.Algodiff.Builder.Aiso)

Module type Builder.Aiso

val label : string
val ff : t array -> t
val df : int list -> t -> t array -> t array -> t
val dr : int list -> t array -> t -> t Stdlib.ref -> t list
\ No newline at end of file diff --git a/owl/Owl_regression/Make_Embedded/Optimise/Algodiff/Builder/module-type-Piso/index.html b/owl/Owl_regression/Make_Embedded/Optimise/Algodiff/Builder/module-type-Piso/index.html deleted file mode 100644 index 63a176a0c..000000000 --- a/owl/Owl_regression/Make_Embedded/Optimise/Algodiff/Builder/module-type-Piso/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Piso (owl.Owl_regression.Make_Embedded.Optimise.Algodiff.Builder.Piso)

Module type Builder.Piso

val label : string
val ff_aa : A.elt -> A.elt -> t
val ff_ab : A.elt -> A.arr -> t
val ff_ba : A.arr -> A.elt -> t
val ff_bb : A.arr -> A.arr -> t
val df_da : t -> t -> t -> t -> t
val df_db : t -> t -> t -> t -> t
val df_dab : t -> t -> t -> t -> t -> t
val dr_ab : t -> t -> t -> t Stdlib.ref -> t * t
val dr_a : t -> t -> t -> t Stdlib.ref -> t
val dr_b : t -> t -> t -> t Stdlib.ref -> t
\ No newline at end of file diff --git a/owl/Owl_regression/Make_Embedded/Optimise/Algodiff/Builder/module-type-Siao/index.html b/owl/Owl_regression/Make_Embedded/Optimise/Algodiff/Builder/module-type-Siao/index.html deleted file mode 100644 index d7376966a..000000000 --- a/owl/Owl_regression/Make_Embedded/Optimise/Algodiff/Builder/module-type-Siao/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Siao (owl.Owl_regression.Make_Embedded.Optimise.Algodiff.Builder.Siao)

Module type Builder.Siao

val label : string
val ff_f : A.elt -> t array
val ff_arr : A.arr -> t array
val df : t array -> t -> t -> t array
val dr : t -> t -> t Stdlib.ref array -> t Stdlib.ref array -> t
\ No newline at end of file diff --git a/owl/Owl_regression/Make_Embedded/Optimise/Algodiff/Builder/module-type-Sipo/index.html b/owl/Owl_regression/Make_Embedded/Optimise/Algodiff/Builder/module-type-Sipo/index.html deleted file mode 100644 index 3e8137807..000000000 --- a/owl/Owl_regression/Make_Embedded/Optimise/Algodiff/Builder/module-type-Sipo/index.html +++ /dev/null @@ -1,7 +0,0 @@ - -Sipo (owl.Owl_regression.Make_Embedded.Optimise.Algodiff.Builder.Sipo)

Module type Builder.Sipo

val label : string
val ff_f : A.elt -> t * t
val ff_arr : A.arr -> t * t
val df : t -> t -> t -> t
val dr : - t -> - t -> - (t Stdlib.ref * t Stdlib.ref) -> - (t Stdlib.ref * t Stdlib.ref) -> - t
\ No newline at end of file diff --git a/owl/Owl_regression/Make_Embedded/Optimise/Algodiff/Builder/module-type-Siso/index.html b/owl/Owl_regression/Make_Embedded/Optimise/Algodiff/Builder/module-type-Siso/index.html deleted file mode 100644 index 0662bf1a2..000000000 --- a/owl/Owl_regression/Make_Embedded/Optimise/Algodiff/Builder/module-type-Siso/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Siso (owl.Owl_regression.Make_Embedded.Optimise.Algodiff.Builder.Siso)

Module type Builder.Siso

val label : string
val ff_f : A.elt -> t
val ff_arr : A.arr -> t
val df : t -> t -> t -> t
val dr : t -> t -> t Stdlib.ref -> t
\ No newline at end of file diff --git a/owl/Owl_regression/Make_Embedded/Optimise/Algodiff/Builder/module-type-Sito/index.html b/owl/Owl_regression/Make_Embedded/Optimise/Algodiff/Builder/module-type-Sito/index.html deleted file mode 100644 index fb5a55b00..000000000 --- a/owl/Owl_regression/Make_Embedded/Optimise/Algodiff/Builder/module-type-Sito/index.html +++ /dev/null @@ -1,7 +0,0 @@ - -Sito (owl.Owl_regression.Make_Embedded.Optimise.Algodiff.Builder.Sito)

Module type Builder.Sito

val label : string
val ff_f : A.elt -> t * t * t
val ff_arr : A.arr -> t * t * t
val df : t -> t -> t -> t
val dr : - t -> - t -> - (t Stdlib.ref * t Stdlib.ref * t Stdlib.ref) -> - (t Stdlib.ref * t Stdlib.ref * t Stdlib.ref) -> - t
\ No newline at end of file diff --git a/owl/Owl_regression/Make_Embedded/Optimise/Algodiff/Linalg/index.html b/owl/Owl_regression/Make_Embedded/Optimise/Algodiff/Linalg/index.html deleted file mode 100644 index 82c14a27c..000000000 --- a/owl/Owl_regression/Make_Embedded/Optimise/Algodiff/Linalg/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -Linalg (owl.Owl_regression.Make_Embedded.Optimise.Algodiff.Linalg)

Module Algodiff.Linalg

val inv : t -> t
val logdet : t -> t
val chol : ?upper:bool -> t -> t
val qr : t -> t * t
val lq : t -> t * t
val svd : ?thin:bool -> t -> t * t * t
val sylvester : t -> t -> t -> t
val lyapunov : t -> t -> t
val discrete_lyapunov : - ?solver:[ `bilinear | `default | `direct ] -> - t -> - t -> - t
val (/@) : t -> t -> t
val linsolve : ?trans:bool -> ?typ:[ `l | `n | `u ] -> t -> t -> t
val care : ?diag_r:bool -> t -> t -> t -> t -> t
val dare : ?diag_r:bool -> t -> t -> t -> t -> t
\ No newline at end of file diff --git a/owl/Owl_regression/Make_Embedded/Optimise/Algodiff/Mat/index.html b/owl/Owl_regression/Make_Embedded/Optimise/Algodiff/Mat/index.html deleted file mode 100644 index 711d7f85a..000000000 --- a/owl/Owl_regression/Make_Embedded/Optimise/Algodiff/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl.Owl_regression.Make_Embedded.Optimise.Algodiff.Mat)

Module Algodiff.Mat

val empty : int -> int -> t
val zeros : int -> int -> t
val eye : int -> t
val ones : int -> int -> t
val uniform : ?a:A.elt -> ?b:A.elt -> int -> int -> t
val gaussian : ?mu:A.elt -> ?sigma:A.elt -> int -> int -> t
val shape : t -> int * int
val numel : t -> int
val row_num : t -> int
val col_num : t -> int
val reset : t -> unit
val reshape : int -> int -> t -> t
val get : t -> int -> int -> t
val set : t -> int -> int -> t -> t
val row : t -> int -> t
val mean : t -> t
val add : t -> t -> t
val sub : t -> t -> t
val mul : t -> t -> t
val div : t -> t -> t
val dot : t -> t -> t
val map_by_row : (t -> t) -> t -> t
val of_arrays : A.elt array array -> t
val init_2d : int -> int -> (int -> int -> t) -> t
val print : t -> unit
\ No newline at end of file diff --git a/owl/Owl_regression/Make_Embedded/Optimise/Algodiff/Maths/index.html b/owl/Owl_regression/Make_Embedded/Optimise/Algodiff/Maths/index.html deleted file mode 100644 index 6b7d01fbd..000000000 --- a/owl/Owl_regression/Make_Embedded/Optimise/Algodiff/Maths/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Maths (owl.Owl_regression.Make_Embedded.Optimise.Algodiff.Maths)

Module Algodiff.Maths

val (+) : t -> t -> t
val (-) : t -> t -> t
val (*) : t -> t -> t
val (/) : t -> t -> t
val (*@) : t -> t -> t
val (**) : t -> t -> t
val add : t -> t -> t
val sub : t -> t -> t
val mul : t -> t -> t
val div : t -> t -> t
val kron : t -> t -> t
val dot : t -> t -> t
val pow : t -> t -> t
val atan2 : t -> t -> t
val min2 : t -> t -> t
val max2 : t -> t -> t
val cross_entropy : t -> t -> t
val inv : t -> t
val neg : t -> t
val abs : t -> t
val signum : t -> t
val floor : t -> t
val ceil : t -> t
val round : t -> t
val sqr : t -> t
val sqrt : t -> t
val log : t -> t
val log2 : t -> t
val log10 : t -> t
val exp : t -> t
val sin : t -> t
val cos : t -> t
val tan : t -> t
val sinh : t -> t
val cosh : t -> t
val tanh : t -> t
val asin : t -> t
val acos : t -> t
val atan : t -> t
val asinh : t -> t
val acosh : t -> t
val atanh : t -> t
val sum' : t -> t
val log_sum_exp' : t -> t
val log_sum_exp : ?axis:int -> ?keep_dims:bool -> t -> t
val sum : ?axis:int -> ?keep_dims:bool -> t -> t
val sum_reduce : ?axis:int array -> t -> t
val mean : t -> t
val transpose : ?axis:int array -> t -> t
val swap : int -> int -> t -> t
val l1norm' : t -> t
val l2norm' : t -> t
val l2norm_sqr' : t -> t
val sigmoid : t -> t
val relu : t -> t
val dawsn : t -> t
val softplus : t -> t
val softsign : t -> t
val softmax : ?axis:int -> t -> t
val reshape : t -> int array -> t
val flatten : t -> t
val get_item : t -> int -> int -> t
val get_row : t -> int -> t
val concat : axis:int -> t -> t -> t
val split : axis:int -> int array -> t -> t array
val of_arrays : t array array -> t
val to_arrays : t -> t array array
val concatenate : axis:int -> t array -> t
val stack : axis:int -> t array -> t
val get_slice : int list list -> t -> t
val set_slice : int list list -> t -> t -> t
val get_fancy : Owl_types.index list -> t -> t
val set_fancy : Owl_types.index list -> t -> t -> t
val diag : ?k:int -> t -> t
val diagm : ?k:int -> t -> t
val trace : t -> t
val triu : ?k:int -> t -> t
val tril : ?k:int -> t -> t
\ No newline at end of file diff --git a/owl/Owl_regression/Make_Embedded/Optimise/Algodiff/NN/index.html b/owl/Owl_regression/Make_Embedded/Optimise/Algodiff/NN/index.html deleted file mode 100644 index b5cbdfbd5..000000000 --- a/owl/Owl_regression/Make_Embedded/Optimise/Algodiff/NN/index.html +++ /dev/null @@ -1,20 +0,0 @@ - -NN (owl.Owl_regression.Make_Embedded.Optimise.Algodiff.NN)

Module Algodiff.NN

val dropout : ?rate:float -> t -> t
val conv1d : ?padding:Owl_types.padding -> t -> t -> int array -> t
val conv2d : ?padding:Owl_types.padding -> t -> t -> int array -> t
val conv3d : ?padding:Owl_types.padding -> t -> t -> int array -> t
val dilated_conv1d : - ?padding:Owl_types.padding -> - t -> - t -> - int array -> - int array -> - t
val dilated_conv2d : - ?padding:Owl_types.padding -> - t -> - t -> - int array -> - int array -> - t
val dilated_conv3d : - ?padding:Owl_types.padding -> - t -> - t -> - int array -> - int array -> - t
val transpose_conv1d : ?padding:Owl_types.padding -> t -> t -> int array -> t
val transpose_conv2d : ?padding:Owl_types.padding -> t -> t -> int array -> t
val transpose_conv3d : ?padding:Owl_types.padding -> t -> t -> int array -> t
val max_pool1d : Owl_types.padding -> t -> int array -> int array -> t
val max_pool2d : Owl_types.padding -> t -> int array -> int array -> t
val max_pool3d : Owl_types.padding -> t -> int array -> int array -> t
val avg_pool1d : Owl_types.padding -> t -> int array -> int array -> t
val avg_pool2d : Owl_types.padding -> t -> int array -> int array -> t
val avg_pool3d : Owl_types.padding -> t -> int array -> int array -> t
val upsampling2d : t -> int array -> t
val pad : ?v:A.elt -> int list list -> t -> t
\ No newline at end of file diff --git a/owl/Owl_regression/Make_Embedded/Optimise/Algodiff/index.html b/owl/Owl_regression/Make_Embedded/Optimise/Algodiff/index.html deleted file mode 100644 index bcc9e6632..000000000 --- a/owl/Owl_regression/Make_Embedded/Optimise/Algodiff/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Algodiff (owl.Owl_regression.Make_Embedded.Optimise.Algodiff)

Module Optimise.Algodiff

module A : sig ... end
type t = Owl_optimise_generic.Make(Owl_algodiff_generic.Make(A)).Algodiff.t =
  1. | F of A.elt
  2. | Arr of A.arr
  3. | DF of t * t * int
  4. | DR of t * t Stdlib.ref * op * int Stdlib.ref * int * int Stdlib.ref
and adjoint = t -> t Stdlib.ref -> (t * t) list -> (t * t) list
and register = t list -> t list
and label = string * t list
and op = adjoint * register * label
val tag : unit -> int
val primal : t -> t
val primal' : t -> t
val zero : t -> t
val reset_zero : t -> t
val tangent : t -> t
val adjref : t -> t Stdlib.ref
val adjval : t -> t
val shape : t -> int array
val is_float : t -> bool
val is_arr : t -> bool
val row_num : t -> int
val col_num : t -> int
val numel : t -> int
val clip_by_value : amin:A.elt -> amax:A.elt -> t -> t
val clip_by_l2norm : A.elt -> t -> t
val copy_primal' : t -> t
val tile : t -> int array -> t
val repeat : t -> int array -> t
val pack_elt : A.elt -> t
val unpack_elt : t -> A.elt
val pack_flt : float -> t
val _f : float -> t
val unpack_flt : t -> float
val pack_arr : A.arr -> t
val unpack_arr : t -> A.arr
val deep_info : t -> string
val type_info : t -> string
val error_binop : string -> t -> t -> 'a
val error_uniop : string -> t -> 'a
val make_forward : t -> t -> int -> t
val make_reverse : t -> int -> t
val reverse_prop : t -> t -> unit
val diff : (t -> t) -> t -> t
val diff' : (t -> t) -> t -> t * t
val grad : (t -> t) -> t -> t
val grad' : (t -> t) -> t -> t * t
val jacobian : (t -> t) -> t -> t
val jacobian' : (t -> t) -> t -> t * t
val jacobianv : (t -> t) -> t -> t -> t
val jacobianv' : (t -> t) -> t -> t -> t * t
val jacobianTv : (t -> t) -> t -> t -> t
val jacobianTv' : (t -> t) -> t -> t -> t * t
val hessian : (t -> t) -> t -> t
val hessian' : (t -> t) -> t -> t * t
val hessianv : (t -> t) -> t -> t -> t
val hessianv' : (t -> t) -> t -> t -> t * t
val laplacian : (t -> t) -> t -> t
val laplacian' : (t -> t) -> t -> t * t
val gradhessian : (t -> t) -> t -> t * t
val gradhessian' : (t -> t) -> t -> t * t * t
val gradhessianv : (t -> t) -> t -> t -> t * t
val gradhessianv' : (t -> t) -> t -> t -> t * t * t
module Builder : sig ... end
module Maths : sig ... end
module Linalg : sig ... end
module NN : sig ... end
module Mat : sig ... end
module Arr : sig ... end
val to_trace : t list -> string
val to_dot : t list -> string
val pp_num : Stdlib.Format.formatter -> t -> unit
\ No newline at end of file diff --git a/owl/Owl_regression/Make_Embedded/Optimise/Batch/index.html b/owl/Owl_regression/Make_Embedded/Optimise/Batch/index.html deleted file mode 100644 index 935048478..000000000 --- a/owl/Owl_regression/Make_Embedded/Optimise/Batch/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Batch (owl.Owl_regression.Make_Embedded.Optimise.Batch)

Module Optimise.Batch

type typ = Owl_optimise_generic.Make(Owl_algodiff_generic.Make(A)).Batch.typ =
  1. | Full
  2. | Mini of int
  3. | Sample of int
  4. | Stochastic
val run : typ -> Algodiff.t -> Algodiff.t -> int -> Algodiff.t * Algodiff.t
val batches : typ -> Algodiff.t -> int
val to_string : typ -> string
\ No newline at end of file diff --git a/owl/Owl_regression/Make_Embedded/Optimise/Checkpoint/index.html b/owl/Owl_regression/Make_Embedded/Optimise/Checkpoint/index.html deleted file mode 100644 index 79e130d1b..000000000 --- a/owl/Owl_regression/Make_Embedded/Optimise/Checkpoint/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -Checkpoint (owl.Owl_regression.Make_Embedded.Optimise.Checkpoint)

Module Optimise.Checkpoint

type state = - Owl_optimise_generic.Make(Owl_algodiff_generic.Make(A)).Checkpoint.state = - {
  1. mutable current_batch : int;
  2. mutable batches_per_epoch : int;
  3. mutable epochs : float;
  4. mutable batches : int;
  5. mutable loss : Algodiff.t array;
  6. mutable start_at : float;
  7. mutable stop : bool;
  8. mutable gs : Algodiff.t array array;
  9. mutable ps : Algodiff.t array array;
  10. mutable us : Algodiff.t array array;
  11. mutable ch : Algodiff.t array array array;
}
type typ = - Owl_optimise_generic.Make(Owl_algodiff_generic.Make(A)).Checkpoint.typ = -
  1. | Batch of int
  2. | Epoch of float
  3. | Custom of state -> unit
  4. | None
val init_state : int -> float -> state
val default_checkpoint_fun : (string -> 'a) -> 'a
val print_state_info : state -> unit
val print_summary : state -> unit
val run : typ -> (string -> unit) -> int -> Algodiff.t -> state -> unit
val to_string : typ -> string
\ No newline at end of file diff --git a/owl/Owl_regression/Make_Embedded/Optimise/Clipping/index.html b/owl/Owl_regression/Make_Embedded/Optimise/Clipping/index.html deleted file mode 100644 index 44d3c5552..000000000 --- a/owl/Owl_regression/Make_Embedded/Optimise/Clipping/index.html +++ /dev/null @@ -1,3 +0,0 @@ - -Clipping (owl.Owl_regression.Make_Embedded.Optimise.Clipping)

Module Optimise.Clipping

type typ = Owl_optimise_generic.Make(Owl_algodiff_generic.Make(A)).Clipping.typ = -
  1. | L2norm of float
  2. | Value of float * float
  3. | None
val run : typ -> Algodiff.t -> Algodiff.t
val default : typ -> typ
val to_string : typ -> string
\ No newline at end of file diff --git a/owl/Owl_regression/Make_Embedded/Optimise/Gradient/index.html b/owl/Owl_regression/Make_Embedded/Optimise/Gradient/index.html deleted file mode 100644 index 82ef30b89..000000000 --- a/owl/Owl_regression/Make_Embedded/Optimise/Gradient/index.html +++ /dev/null @@ -1,10 +0,0 @@ - -Gradient (owl.Owl_regression.Make_Embedded.Optimise.Gradient)

Module Optimise.Gradient

type typ = Owl_optimise_generic.Make(Owl_algodiff_generic.Make(A)).Gradient.typ = -
  1. | GD
  2. | CG
  3. | CD
  4. | NonlinearCG
  5. | DaiYuanCG
  6. | NewtonCG
  7. | Newton
val run : - typ -> - (Algodiff.t -> Algodiff.t) -> - Algodiff.t -> - Algodiff.t -> - Algodiff.t -> - Algodiff.t -> - Algodiff.t
val to_string : typ -> string
\ No newline at end of file diff --git a/owl/Owl_regression/Make_Embedded/Optimise/Learning_Rate/index.html b/owl/Owl_regression/Make_Embedded/Optimise/Learning_Rate/index.html deleted file mode 100644 index dd1b4e08a..000000000 --- a/owl/Owl_regression/Make_Embedded/Optimise/Learning_Rate/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Learning_Rate (owl.Owl_regression.Make_Embedded.Optimise.Learning_Rate)

Module Optimise.Learning_Rate

type typ = - Owl_optimise_generic.Make(Owl_algodiff_generic.Make(A)).Learning_Rate.typ = -
  1. | Adagrad of float
  2. | Const of float
  3. | Decay of float * float
  4. | Exp_decay of float * float
  5. | RMSprop of float * float
  6. | Adam of float * float * float
  7. | Schedule of float array
val run : typ -> int -> Algodiff.t -> Algodiff.t array -> Algodiff.t
val default : typ -> typ
val update_ch : typ -> Algodiff.t -> Algodiff.t array -> Algodiff.t array
val to_string : typ -> string
\ No newline at end of file diff --git a/owl/Owl_regression/Make_Embedded/Optimise/Loss/index.html b/owl/Owl_regression/Make_Embedded/Optimise/Loss/index.html deleted file mode 100644 index 5bdebc1ed..000000000 --- a/owl/Owl_regression/Make_Embedded/Optimise/Loss/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Loss (owl.Owl_regression.Make_Embedded.Optimise.Loss)

Module Optimise.Loss

type typ = Owl_optimise_generic.Make(Owl_algodiff_generic.Make(A)).Loss.typ =
  1. | Hinge
  2. | L1norm
  3. | L2norm
  4. | Quadratic
  5. | Cross_entropy
  6. | Custom of Algodiff.t -> Algodiff.t -> Algodiff.t
val run : typ -> Algodiff.t -> Algodiff.t -> Algodiff.t
val to_string : typ -> string
\ No newline at end of file diff --git a/owl/Owl_regression/Make_Embedded/Optimise/Momentum/index.html b/owl/Owl_regression/Make_Embedded/Optimise/Momentum/index.html deleted file mode 100644 index f6d09bfbb..000000000 --- a/owl/Owl_regression/Make_Embedded/Optimise/Momentum/index.html +++ /dev/null @@ -1,3 +0,0 @@ - -Momentum (owl.Owl_regression.Make_Embedded.Optimise.Momentum)

Module Optimise.Momentum

type typ = Owl_optimise_generic.Make(Owl_algodiff_generic.Make(A)).Momentum.typ = -
  1. | Standard of float
  2. | Nesterov of float
  3. | None
val run : typ -> Algodiff.t -> Algodiff.t -> Algodiff.t
val default : typ -> typ
val to_string : typ -> string
\ No newline at end of file diff --git a/owl/Owl_regression/Make_Embedded/Optimise/Params/index.html b/owl/Owl_regression/Make_Embedded/Optimise/Params/index.html deleted file mode 100644 index 5eb8e5d30..000000000 --- a/owl/Owl_regression/Make_Embedded/Optimise/Params/index.html +++ /dev/null @@ -1,14 +0,0 @@ - -Params (owl.Owl_regression.Make_Embedded.Optimise.Params)

Module Optimise.Params

type typ = Owl_optimise_generic.Make(Owl_algodiff_generic.Make(A)).Params.typ = {
  1. mutable epochs : float;
  2. mutable batch : Batch.typ;
  3. mutable gradient : Gradient.typ;
  4. mutable loss : Loss.typ;
  5. mutable learning_rate : Learning_Rate.typ;
  6. mutable regularisation : Regularisation.typ;
  7. mutable momentum : Momentum.typ;
  8. mutable clipping : Clipping.typ;
  9. mutable stopping : Stopping.typ;
  10. mutable checkpoint : Checkpoint.typ;
  11. mutable verbosity : bool;
}
val default : unit -> typ
val config : - ?batch:Batch.typ -> - ?gradient:Gradient.typ -> - ?loss:Loss.typ -> - ?learning_rate:Learning_Rate.typ -> - ?regularisation:Regularisation.typ -> - ?momentum:Momentum.typ -> - ?clipping:Clipping.typ -> - ?stopping:Stopping.typ -> - ?checkpoint:Checkpoint.typ -> - ?verbosity:bool -> - float -> - typ
val to_string : typ -> string
\ No newline at end of file diff --git a/owl/Owl_regression/Make_Embedded/Optimise/Regularisation/index.html b/owl/Owl_regression/Make_Embedded/Optimise/Regularisation/index.html deleted file mode 100644 index 5263ecdde..000000000 --- a/owl/Owl_regression/Make_Embedded/Optimise/Regularisation/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Regularisation (owl.Owl_regression.Make_Embedded.Optimise.Regularisation)

Module Optimise.Regularisation

type typ = - Owl_optimise_generic.Make(Owl_algodiff_generic.Make(A)).Regularisation.typ = -
  1. | L1norm of float
  2. | L2norm of float
  3. | Elastic_net of float * float
  4. | None
val run : typ -> Algodiff.t -> Algodiff.t
val to_string : typ -> string
\ No newline at end of file diff --git a/owl/Owl_regression/Make_Embedded/Optimise/Stopping/index.html b/owl/Owl_regression/Make_Embedded/Optimise/Stopping/index.html deleted file mode 100644 index d38319f97..000000000 --- a/owl/Owl_regression/Make_Embedded/Optimise/Stopping/index.html +++ /dev/null @@ -1,3 +0,0 @@ - -Stopping (owl.Owl_regression.Make_Embedded.Optimise.Stopping)

Module Optimise.Stopping

type typ = Owl_optimise_generic.Make(Owl_algodiff_generic.Make(A)).Stopping.typ = -
  1. | Const of float
  2. | Early of int * int
  3. | None
val run : typ -> float -> bool
val default : typ -> typ
val to_string : typ -> string
\ No newline at end of file diff --git a/owl/Owl_regression/Make_Embedded/Optimise/Utils/index.html b/owl/Owl_regression/Make_Embedded/Optimise/Utils/index.html deleted file mode 100644 index 63fbfd4d3..000000000 --- a/owl/Owl_regression/Make_Embedded/Optimise/Utils/index.html +++ /dev/null @@ -1,7 +0,0 @@ - -Utils (owl.Owl_regression.Make_Embedded.Optimise.Utils)

Module Optimise.Utils

val sample_num : Algodiff.t -> int
val draw_samples : Algodiff.t -> Algodiff.t -> int -> Algodiff.t * Algodiff.t
val get_chunk : - Algodiff.t -> - Algodiff.t -> - int -> - int -> - Algodiff.t * Algodiff.t
\ No newline at end of file diff --git a/owl/Owl_regression/Make_Embedded/Optimise/index.html b/owl/Owl_regression/Make_Embedded/Optimise/index.html deleted file mode 100644 index f0239127c..000000000 --- a/owl/Owl_regression/Make_Embedded/Optimise/index.html +++ /dev/null @@ -1,31 +0,0 @@ - -Optimise (owl.Owl_regression.Make_Embedded.Optimise)

Module Make_Embedded.Optimise

module Algodiff : sig ... end
module Utils : sig ... end
module Learning_Rate : sig ... end
module Batch : sig ... end
module Loss : sig ... end
module Gradient : sig ... end
module Momentum : sig ... end
module Regularisation : sig ... end
module Clipping : sig ... end
module Stopping : sig ... end
module Checkpoint : sig ... end
module Params : sig ... end
val minimise_weight : - ?state:Checkpoint.state -> - Params.typ -> - (Algodiff.t -> Algodiff.t -> Algodiff.t) -> - Algodiff.t -> - Algodiff.t -> - Algodiff.t -> - Checkpoint.state * Algodiff.t
val minimise_network : - ?state:Checkpoint.state -> - Params.typ -> - (Algodiff.t -> Algodiff.t * Algodiff.t array array) -> - (Algodiff.t -> Algodiff.t array array * Algodiff.t array array) -> - (Algodiff.t array array -> unit) -> - (string -> unit) -> - Algodiff.t -> - Algodiff.t -> - Checkpoint.state
val minimise_fun : - ?state:Checkpoint.state -> - Params.typ -> - (Algodiff.t -> Algodiff.t) -> - Algodiff.t -> - Checkpoint.state * Algodiff.t
val minimise_compiled_network : - ?state:Checkpoint.state -> - Params.typ -> - (Algodiff.t -> Algodiff.t -> Algodiff.t) -> - (unit -> unit) -> - (string -> unit) -> - Algodiff.t -> - Algodiff.t -> - Checkpoint.state
\ No newline at end of file diff --git a/owl/Owl_regression/Make_Embedded/argument-1-A/Linalg/index.html b/owl/Owl_regression/Make_Embedded/argument-1-A/Linalg/index.html deleted file mode 100644 index 884b66bf3..000000000 --- a/owl/Owl_regression/Make_Embedded/argument-1-A/Linalg/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -Linalg (owl.Owl_regression.Make_Embedded.A.Linalg)

Module A.Linalg

val inv : arr -> arr
val logdet : arr -> elt
val chol : ?upper:bool -> arr -> arr
val svd : ?thin:bool -> arr -> arr * arr * arr
val qr : arr -> arr * arr
val lq : arr -> arr * arr
val sylvester : arr -> arr -> arr -> arr
val lyapunov : arr -> arr -> arr
val discrete_lyapunov : - ?solver:[ `default | `bilinear | `direct ] -> - arr -> - arr -> - arr
val linsolve : ?trans:bool -> ?typ:[ `n | `u | `l ] -> arr -> arr -> arr
val care : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
val dare : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
\ No newline at end of file diff --git a/owl/Owl_regression/Make_Embedded/argument-1-A/Mat/index.html b/owl/Owl_regression/Make_Embedded/argument-1-A/Mat/index.html deleted file mode 100644 index 446b3adaa..000000000 --- a/owl/Owl_regression/Make_Embedded/argument-1-A/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl.Owl_regression.Make_Embedded.A.Mat)

Module A.Mat

val diagm : ?k:int -> arr -> arr
val triu : ?k:int -> arr -> arr
val tril : ?k:int -> arr -> arr
val eye : int -> arr
\ No newline at end of file diff --git a/owl/Owl_regression/Make_Embedded/argument-1-A/Scalar/index.html b/owl/Owl_regression/Make_Embedded/argument-1-A/Scalar/index.html deleted file mode 100644 index fe2f5f8c7..000000000 --- a/owl/Owl_regression/Make_Embedded/argument-1-A/Scalar/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Scalar (owl.Owl_regression.Make_Embedded.A.Scalar)

Module A.Scalar

val add : elt -> elt -> elt
val sub : elt -> elt -> elt
val mul : elt -> elt -> elt
val div : elt -> elt -> elt
val pow : elt -> elt -> elt
val atan2 : elt -> elt -> elt
val abs : elt -> elt
val neg : elt -> elt
val sqr : elt -> elt
val sqrt : elt -> elt
val exp : elt -> elt
val log : elt -> elt
val log2 : elt -> elt
val log10 : elt -> elt
val signum : elt -> elt
val floor : elt -> elt
val ceil : elt -> elt
val round : elt -> elt
val sin : elt -> elt
val cos : elt -> elt
val tan : elt -> elt
val sinh : elt -> elt
val cosh : elt -> elt
val tanh : elt -> elt
val asin : elt -> elt
val acos : elt -> elt
val atan : elt -> elt
val asinh : elt -> elt
val acosh : elt -> elt
val atanh : elt -> elt
val relu : elt -> elt
val dawsn : elt -> elt
val sigmoid : elt -> elt
\ No newline at end of file diff --git a/owl/Owl_regression/Make_Embedded/argument-1-A/index.html b/owl/Owl_regression/Make_Embedded/argument-1-A/index.html deleted file mode 100644 index 3b238c227..000000000 --- a/owl/Owl_regression/Make_Embedded/argument-1-A/index.html +++ /dev/null @@ -1,158 +0,0 @@ - -A (owl.Owl_regression.Make_Embedded.A)

Parameter Make_Embedded.A

include Owl_types_ndarray_eltcmp.Sig
include Owl_types_ndarray_basic.Sig
type arr
type elt
val empty : int array -> arr
val zeros : int array -> arr
val ones : int array -> arr
val create : int array -> elt -> arr
val sequential : ?a:elt -> ?step:elt -> int array -> arr
val uniform : ?a:elt -> ?b:elt -> int array -> arr
val gaussian : ?mu:elt -> ?sigma:elt -> int array -> arr
val bernoulli : ?p:elt -> int array -> arr
val init : int array -> (int -> elt) -> arr
val init_nd : int array -> (int array -> elt) -> arr
val shape : arr -> int array
val numel : arr -> int
val get : arr -> int array -> elt
val set : arr -> int array -> elt -> unit
val get_slice : int list list -> arr -> arr
val set_slice : int list list -> arr -> arr -> unit
val get_fancy : Owl_types_common.index list -> arr -> arr
val set_fancy : Owl_types_common.index list -> arr -> arr -> unit
val copy : arr -> arr
val copy_ : out:arr -> arr -> unit
val reset : arr -> unit
val reshape : arr -> int array -> arr
val reverse : arr -> arr
val tile : arr -> int array -> arr
val repeat : arr -> int array -> arr
val concatenate : ?axis:int -> arr array -> arr
val stack : ?axis:int -> arr array -> arr
val split : ?axis:int -> int array -> arr -> arr array
val expand : ?hi:bool -> arr -> int -> arr
val squeeze : ?axis:int array -> arr -> arr
val draw : ?axis:int -> arr -> int -> arr * int array
val map : (elt -> elt) -> arr -> arr
val fold : ?axis:int -> (elt -> elt -> elt) -> elt -> arr -> arr
val scan : ?axis:int -> (elt -> elt -> elt) -> arr -> arr
val one_hot : int -> arr -> arr
val pad : ?v:elt -> int list list -> arr -> arr
val print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(elt -> string) -> - arr -> - unit
val abs : arr -> arr
val neg : arr -> arr
val floor : arr -> arr
val ceil : arr -> arr
val round : arr -> arr
val sqr : arr -> arr
val sqrt : arr -> arr
val log : arr -> arr
val log2 : arr -> arr
val log10 : arr -> arr
val exp : arr -> arr
val sin : arr -> arr
val cos : arr -> arr
val tan : arr -> arr
val sinh : arr -> arr
val cosh : arr -> arr
val tanh : arr -> arr
val asin : arr -> arr
val acos : arr -> arr
val atan : arr -> arr
val asinh : arr -> arr
val acosh : arr -> arr
val atanh : arr -> arr
val min : ?axis:int -> ?keep_dims:bool -> arr -> arr
val max : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum_reduce : ?axis:int array -> arr -> arr
val signum : arr -> arr
val sigmoid : arr -> arr
val relu : arr -> arr
val dawsn : arr -> arr
val min' : arr -> elt
val max' : arr -> elt
val sum' : arr -> elt
val log_sum_exp' : arr -> elt
val log_sum_exp : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm' : arr -> elt
val l2norm' : arr -> elt
val l2norm_sqr' : arr -> elt
val clip_by_value : ?amin:elt -> ?amax:elt -> arr -> arr
val clip_by_l2norm : elt -> arr -> arr
val pow : arr -> arr -> arr
val scalar_pow : elt -> arr -> arr
val pow_scalar : arr -> elt -> arr
val atan2 : arr -> arr -> arr
val scalar_atan2 : elt -> arr -> arr
val atan2_scalar : arr -> elt -> arr
val add : arr -> arr -> arr
val sub : arr -> arr -> arr
val mul : arr -> arr -> arr
val div : arr -> arr -> arr
val add_scalar : arr -> elt -> arr
val sub_scalar : arr -> elt -> arr
val mul_scalar : arr -> elt -> arr
val div_scalar : arr -> elt -> arr
val scalar_add : elt -> arr -> arr
val scalar_sub : elt -> arr -> arr
val scalar_mul : elt -> arr -> arr
val scalar_div : elt -> arr -> arr
val fma : arr -> arr -> arr -> arr
val conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val dilated_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val transpose_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val max_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val upsampling2d : arr -> int array -> arr
val conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val dilated_conv1d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv1d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val transpose_conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val max_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val upsampling2d_backward : arr -> int array -> arr -> arr
val row_num : arr -> int
val col_num : arr -> int
val row : arr -> int -> arr
val rows : arr -> int array -> arr
val copy_row_to : arr -> arr -> int -> unit
val copy_col_to : arr -> arr -> int -> unit
val dot : arr -> arr -> arr
val diag : ?k:int -> arr -> arr
val trace : arr -> elt
val transpose : ?axis:int array -> arr -> arr
val to_rows : arr -> arr array
val of_rows : arr array -> arr
val to_cols : arr -> arr array
val of_cols : arr array -> arr
val of_array : elt array -> int array -> arr
val of_arrays : elt array array -> arr
val float_to_elt : float -> elt
val elt_to_float : elt -> float
val elt_equal : arr -> arr -> arr
val elt_not_equal : arr -> arr -> arr
val elt_less : arr -> arr -> arr
val elt_greater : arr -> arr -> arr
val elt_less_equal : arr -> arr -> arr
val elt_greater_equal : arr -> arr -> arr
val elt_equal_scalar : arr -> elt -> arr
val elt_not_equal_scalar : arr -> elt -> arr
val elt_less_scalar : arr -> elt -> arr
val elt_greater_scalar : arr -> elt -> arr
val elt_less_equal_scalar : arr -> elt -> arr
val elt_greater_equal_scalar : arr -> elt -> arr
module Scalar : sig ... end
module Mat : sig ... end
module Linalg : sig ... end
\ No newline at end of file diff --git a/owl/Owl_regression/Make_Embedded/index.html b/owl/Owl_regression/Make_Embedded/index.html deleted file mode 100644 index d1120be1f..000000000 --- a/owl/Owl_regression/Make_Embedded/index.html +++ /dev/null @@ -1,44 +0,0 @@ - -Make_Embedded (owl.Owl_regression.Make_Embedded)

Module Owl_regression.Make_Embedded

Parameters

Signature

include sig ... end
module Optimise : sig ... end
val ridge : - ?i:bool -> - ?alpha:float -> - Optimise.Algodiff.A.arr -> - Optimise.Algodiff.A.arr -> - Optimise.Algodiff.A.arr array
val lasso : - ?i:bool -> - ?alpha:float -> - Optimise.Algodiff.A.arr -> - Optimise.Algodiff.A.arr -> - Optimise.Algodiff.A.arr array
val elastic_net : - ?i:bool -> - ?alpha:float -> - ?l1_ratio:float -> - Optimise.Algodiff.A.arr -> - Optimise.Algodiff.A.arr -> - Optimise.Algodiff.A.arr array
val svm : - ?i:bool -> - ?a:float -> - Optimise.Algodiff.A.arr -> - Optimise.Algodiff.A.arr -> - Optimise.Algodiff.A.arr array
val logistic : - ?i:bool -> - Optimise.Algodiff.A.arr -> - Optimise.Algodiff.A.arr -> - Optimise.Algodiff.A.arr array
\ No newline at end of file diff --git a/owl/Owl_regression/S/Optimise/Algodiff/A/Linalg/index.html b/owl/Owl_regression/S/Optimise/Algodiff/A/Linalg/index.html deleted file mode 100644 index 908c54d69..000000000 --- a/owl/Owl_regression/S/Optimise/Algodiff/A/Linalg/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -Linalg (owl.Owl_regression.S.Optimise.Algodiff.A.Linalg)

Module A.Linalg

val inv : arr -> arr
val logdet : arr -> elt
val chol : ?upper:bool -> arr -> arr
val svd : ?thin:bool -> arr -> arr * arr * arr
val qr : arr -> arr * arr
val lq : arr -> arr * arr
val sylvester : arr -> arr -> arr -> arr
val lyapunov : arr -> arr -> arr
val discrete_lyapunov : - ?solver:[ `bilinear | `default | `direct ] -> - arr -> - arr -> - arr
val linsolve : ?trans:bool -> ?typ:[ `l | `n | `u ] -> arr -> arr -> arr
val care : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
val dare : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
\ No newline at end of file diff --git a/owl/Owl_regression/S/Optimise/Algodiff/A/Mat/index.html b/owl/Owl_regression/S/Optimise/Algodiff/A/Mat/index.html deleted file mode 100644 index 093d8a939..000000000 --- a/owl/Owl_regression/S/Optimise/Algodiff/A/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl.Owl_regression.S.Optimise.Algodiff.A.Mat)

Module A.Mat

val diagm : ?k:int -> arr -> arr
val triu : ?k:int -> arr -> arr
val tril : ?k:int -> arr -> arr
val eye : int -> arr
\ No newline at end of file diff --git a/owl/Owl_regression/S/Optimise/Algodiff/A/Scalar/index.html b/owl/Owl_regression/S/Optimise/Algodiff/A/Scalar/index.html deleted file mode 100644 index a7ece5853..000000000 --- a/owl/Owl_regression/S/Optimise/Algodiff/A/Scalar/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Scalar (owl.Owl_regression.S.Optimise.Algodiff.A.Scalar)

Module A.Scalar

val add : elt -> elt -> elt
val sub : elt -> elt -> elt
val mul : elt -> elt -> elt
val div : elt -> elt -> elt
val pow : elt -> elt -> elt
val atan2 : elt -> elt -> elt
val abs : elt -> elt
val neg : elt -> elt
val sqr : elt -> elt
val sqrt : elt -> elt
val exp : elt -> elt
val log : elt -> elt
val log2 : elt -> elt
val log10 : elt -> elt
val signum : elt -> elt
val floor : elt -> elt
val ceil : elt -> elt
val round : elt -> elt
val sin : elt -> elt
val cos : elt -> elt
val tan : elt -> elt
val sinh : elt -> elt
val cosh : elt -> elt
val tanh : elt -> elt
val asin : elt -> elt
val acos : elt -> elt
val atan : elt -> elt
val asinh : elt -> elt
val acosh : elt -> elt
val atanh : elt -> elt
val relu : elt -> elt
val dawsn : elt -> elt
val sigmoid : elt -> elt
\ No newline at end of file diff --git a/owl/Owl_regression/S/Optimise/Algodiff/A/index.html b/owl/Owl_regression/S/Optimise/Algodiff/A/index.html deleted file mode 100644 index 3044f4841..000000000 --- a/owl/Owl_regression/S/Optimise/Algodiff/A/index.html +++ /dev/null @@ -1,160 +0,0 @@ - -A (owl.Owl_regression.S.Optimise.Algodiff.A)

Module Algodiff.A

val empty : int array -> arr
val zeros : int array -> arr
val ones : int array -> arr
val create : int array -> elt -> arr
val sequential : ?a:elt -> ?step:elt -> int array -> arr
val uniform : ?a:elt -> ?b:elt -> int array -> arr
val gaussian : ?mu:elt -> ?sigma:elt -> int array -> arr
val bernoulli : ?p:elt -> int array -> arr
val init : int array -> (int -> elt) -> arr
val init_nd : int array -> (int array -> elt) -> arr
val shape : arr -> int array
val numel : arr -> int
val get : arr -> int array -> elt
val set : arr -> int array -> elt -> unit
val get_slice : int list list -> arr -> arr
val set_slice : int list list -> arr -> arr -> unit
val get_fancy : Owl_types_common.index list -> arr -> arr
val set_fancy : Owl_types_common.index list -> arr -> arr -> unit
val copy : arr -> arr
val copy_ : out:arr -> arr -> unit
val reset : arr -> unit
val reshape : arr -> int array -> arr
val reverse : arr -> arr
val tile : arr -> int array -> arr
val repeat : arr -> int array -> arr
val concatenate : ?axis:int -> arr array -> arr
val stack : ?axis:int -> arr array -> arr
val split : ?axis:int -> int array -> arr -> arr array
val expand : ?hi:bool -> arr -> int -> arr
val squeeze : ?axis:int array -> arr -> arr
val draw : ?axis:int -> arr -> int -> arr * int array
val map : (elt -> elt) -> arr -> arr
val fold : ?axis:int -> (elt -> elt -> elt) -> elt -> arr -> arr
val scan : ?axis:int -> (elt -> elt -> elt) -> arr -> arr
val one_hot : int -> arr -> arr
val pad : ?v:elt -> int list list -> arr -> arr
val print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(elt -> string) -> - arr -> - unit
val abs : arr -> arr
val neg : arr -> arr
val floor : arr -> arr
val ceil : arr -> arr
val round : arr -> arr
val sqr : arr -> arr
val sqrt : arr -> arr
val log : arr -> arr
val log2 : arr -> arr
val log10 : arr -> arr
val exp : arr -> arr
val sin : arr -> arr
val cos : arr -> arr
val tan : arr -> arr
val sinh : arr -> arr
val cosh : arr -> arr
val tanh : arr -> arr
val asin : arr -> arr
val acos : arr -> arr
val atan : arr -> arr
val asinh : arr -> arr
val acosh : arr -> arr
val atanh : arr -> arr
val min : ?axis:int -> ?keep_dims:bool -> arr -> arr
val max : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum_reduce : ?axis:int array -> arr -> arr
val signum : arr -> arr
val sigmoid : arr -> arr
val relu : arr -> arr
val dawsn : arr -> arr
val min' : arr -> elt
val max' : arr -> elt
val sum' : arr -> elt
val log_sum_exp' : arr -> elt
val log_sum_exp : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm' : arr -> elt
val l2norm' : arr -> elt
val l2norm_sqr' : arr -> elt
val clip_by_value : ?amin:elt -> ?amax:elt -> arr -> arr
val clip_by_l2norm : elt -> arr -> arr
val pow : arr -> arr -> arr
val scalar_pow : elt -> arr -> arr
val pow_scalar : arr -> elt -> arr
val atan2 : arr -> arr -> arr
val scalar_atan2 : elt -> arr -> arr
val atan2_scalar : arr -> elt -> arr
val add : arr -> arr -> arr
val sub : arr -> arr -> arr
val mul : arr -> arr -> arr
val div : arr -> arr -> arr
val add_scalar : arr -> elt -> arr
val sub_scalar : arr -> elt -> arr
val mul_scalar : arr -> elt -> arr
val div_scalar : arr -> elt -> arr
val scalar_add : elt -> arr -> arr
val scalar_sub : elt -> arr -> arr
val scalar_mul : elt -> arr -> arr
val scalar_div : elt -> arr -> arr
val fma : arr -> arr -> arr -> arr
val conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val dilated_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val transpose_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val max_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val upsampling2d : arr -> int array -> arr
val conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val dilated_conv1d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv1d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val transpose_conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val max_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val upsampling2d_backward : arr -> int array -> arr -> arr
val row_num : arr -> int
val col_num : arr -> int
val row : arr -> int -> arr
val rows : arr -> int array -> arr
val copy_row_to : arr -> arr -> int -> unit
val copy_col_to : arr -> arr -> int -> unit
val dot : arr -> arr -> arr
val diag : ?k:int -> arr -> arr
val trace : arr -> elt
val transpose : ?axis:int array -> arr -> arr
val to_rows : arr -> arr array
val of_rows : arr array -> arr
val to_cols : arr -> arr array
val of_cols : arr array -> arr
val of_array : elt array -> int array -> arr
val of_arrays : elt array array -> arr
val float_to_elt : float -> elt
val elt_to_float : elt -> float
val elt_equal : arr -> arr -> arr
val elt_not_equal : arr -> arr -> arr
val elt_less : arr -> arr -> arr
val elt_greater : arr -> arr -> arr
val elt_less_equal : arr -> arr -> arr
val elt_greater_equal : arr -> arr -> arr
val elt_equal_scalar : arr -> elt -> arr
val elt_not_equal_scalar : arr -> elt -> arr
val elt_less_scalar : arr -> elt -> arr
val elt_greater_scalar : arr -> elt -> arr
val elt_less_equal_scalar : arr -> elt -> arr
val elt_greater_equal_scalar : arr -> elt -> arr
module Scalar : sig ... end
module Mat : sig ... end
module Linalg : sig ... end
\ No newline at end of file diff --git a/owl/Owl_regression/S/Optimise/Algodiff/Arr/index.html b/owl/Owl_regression/S/Optimise/Algodiff/Arr/index.html deleted file mode 100644 index 68f3317a2..000000000 --- a/owl/Owl_regression/S/Optimise/Algodiff/Arr/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Arr (owl.Owl_regression.S.Optimise.Algodiff.Arr)

Module Algodiff.Arr

val empty : int array -> t
val zeros : int array -> t
val ones : int array -> t
val uniform : ?a:A.elt -> ?b:A.elt -> int array -> t
val gaussian : ?mu:A.elt -> ?sigma:A.elt -> int array -> t
val shape : t -> int array
val numel : t -> int
val reset : t -> unit
val reshape : t -> int array -> t
val add : t -> t -> t
val sub : t -> t -> t
val mul : t -> t -> t
val div : t -> t -> t
val dot : t -> t -> t
\ No newline at end of file diff --git a/owl/Owl_regression/S/Optimise/Algodiff/Builder/index.html b/owl/Owl_regression/S/Optimise/Algodiff/Builder/index.html deleted file mode 100644 index 1a54efd3d..000000000 --- a/owl/Owl_regression/S/Optimise/Algodiff/Builder/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Builder (owl.Owl_regression.S.Optimise.Algodiff.Builder)

Module Algodiff.Builder

module type Siso = sig ... end
val build_siso : (module Siso) -> t -> t
module type Sipo = sig ... end
val build_sipo : (module Sipo) -> t -> t * t
module type Sito = sig ... end
val build_sito : (module Sito) -> t -> t * t * t
module type Siao = sig ... end
val build_siao : (module Siao) -> t -> t array
module type Piso = sig ... end
val build_piso : (module Piso) -> t -> t -> t
module type Aiso = sig ... end
val build_aiso : (module Aiso) -> t array -> t
\ No newline at end of file diff --git a/owl/Owl_regression/S/Optimise/Algodiff/Builder/module-type-Aiso/index.html b/owl/Owl_regression/S/Optimise/Algodiff/Builder/module-type-Aiso/index.html deleted file mode 100644 index 75a798039..000000000 --- a/owl/Owl_regression/S/Optimise/Algodiff/Builder/module-type-Aiso/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Aiso (owl.Owl_regression.S.Optimise.Algodiff.Builder.Aiso)

Module type Builder.Aiso

val label : string
val ff : t array -> t
val df : int list -> t -> t array -> t array -> t
val dr : int list -> t array -> t -> t Stdlib.ref -> t list
\ No newline at end of file diff --git a/owl/Owl_regression/S/Optimise/Algodiff/Builder/module-type-Piso/index.html b/owl/Owl_regression/S/Optimise/Algodiff/Builder/module-type-Piso/index.html deleted file mode 100644 index df71215d3..000000000 --- a/owl/Owl_regression/S/Optimise/Algodiff/Builder/module-type-Piso/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Piso (owl.Owl_regression.S.Optimise.Algodiff.Builder.Piso)

Module type Builder.Piso

val label : string
val ff_aa : A.elt -> A.elt -> t
val ff_ab : A.elt -> A.arr -> t
val ff_ba : A.arr -> A.elt -> t
val ff_bb : A.arr -> A.arr -> t
val df_da : t -> t -> t -> t -> t
val df_db : t -> t -> t -> t -> t
val df_dab : t -> t -> t -> t -> t -> t
val dr_ab : t -> t -> t -> t Stdlib.ref -> t * t
val dr_a : t -> t -> t -> t Stdlib.ref -> t
val dr_b : t -> t -> t -> t Stdlib.ref -> t
\ No newline at end of file diff --git a/owl/Owl_regression/S/Optimise/Algodiff/Builder/module-type-Siao/index.html b/owl/Owl_regression/S/Optimise/Algodiff/Builder/module-type-Siao/index.html deleted file mode 100644 index 993c85fc4..000000000 --- a/owl/Owl_regression/S/Optimise/Algodiff/Builder/module-type-Siao/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Siao (owl.Owl_regression.S.Optimise.Algodiff.Builder.Siao)

Module type Builder.Siao

val label : string
val ff_f : A.elt -> t array
val ff_arr : A.arr -> t array
val df : t array -> t -> t -> t array
val dr : t -> t -> t Stdlib.ref array -> t Stdlib.ref array -> t
\ No newline at end of file diff --git a/owl/Owl_regression/S/Optimise/Algodiff/Builder/module-type-Sipo/index.html b/owl/Owl_regression/S/Optimise/Algodiff/Builder/module-type-Sipo/index.html deleted file mode 100644 index 7182dc7eb..000000000 --- a/owl/Owl_regression/S/Optimise/Algodiff/Builder/module-type-Sipo/index.html +++ /dev/null @@ -1,7 +0,0 @@ - -Sipo (owl.Owl_regression.S.Optimise.Algodiff.Builder.Sipo)

Module type Builder.Sipo

val label : string
val ff_f : A.elt -> t * t
val ff_arr : A.arr -> t * t
val df : t -> t -> t -> t
val dr : - t -> - t -> - (t Stdlib.ref * t Stdlib.ref) -> - (t Stdlib.ref * t Stdlib.ref) -> - t
\ No newline at end of file diff --git a/owl/Owl_regression/S/Optimise/Algodiff/Builder/module-type-Siso/index.html b/owl/Owl_regression/S/Optimise/Algodiff/Builder/module-type-Siso/index.html deleted file mode 100644 index 113884789..000000000 --- a/owl/Owl_regression/S/Optimise/Algodiff/Builder/module-type-Siso/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Siso (owl.Owl_regression.S.Optimise.Algodiff.Builder.Siso)

Module type Builder.Siso

val label : string
val ff_f : A.elt -> t
val ff_arr : A.arr -> t
val df : t -> t -> t -> t
val dr : t -> t -> t Stdlib.ref -> t
\ No newline at end of file diff --git a/owl/Owl_regression/S/Optimise/Algodiff/Builder/module-type-Sito/index.html b/owl/Owl_regression/S/Optimise/Algodiff/Builder/module-type-Sito/index.html deleted file mode 100644 index 1b36eac7a..000000000 --- a/owl/Owl_regression/S/Optimise/Algodiff/Builder/module-type-Sito/index.html +++ /dev/null @@ -1,7 +0,0 @@ - -Sito (owl.Owl_regression.S.Optimise.Algodiff.Builder.Sito)

Module type Builder.Sito

val label : string
val ff_f : A.elt -> t * t * t
val ff_arr : A.arr -> t * t * t
val df : t -> t -> t -> t
val dr : - t -> - t -> - (t Stdlib.ref * t Stdlib.ref * t Stdlib.ref) -> - (t Stdlib.ref * t Stdlib.ref * t Stdlib.ref) -> - t
\ No newline at end of file diff --git a/owl/Owl_regression/S/Optimise/Algodiff/Linalg/index.html b/owl/Owl_regression/S/Optimise/Algodiff/Linalg/index.html deleted file mode 100644 index f8f9ca42b..000000000 --- a/owl/Owl_regression/S/Optimise/Algodiff/Linalg/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -Linalg (owl.Owl_regression.S.Optimise.Algodiff.Linalg)

Module Algodiff.Linalg

val inv : t -> t
val logdet : t -> t
val chol : ?upper:bool -> t -> t
val qr : t -> t * t
val lq : t -> t * t
val svd : ?thin:bool -> t -> t * t * t
val sylvester : t -> t -> t -> t
val lyapunov : t -> t -> t
val discrete_lyapunov : - ?solver:[ `bilinear | `default | `direct ] -> - t -> - t -> - t
val (/@) : t -> t -> t
val linsolve : ?trans:bool -> ?typ:[ `l | `n | `u ] -> t -> t -> t
val care : ?diag_r:bool -> t -> t -> t -> t -> t
val dare : ?diag_r:bool -> t -> t -> t -> t -> t
\ No newline at end of file diff --git a/owl/Owl_regression/S/Optimise/Algodiff/Mat/index.html b/owl/Owl_regression/S/Optimise/Algodiff/Mat/index.html deleted file mode 100644 index 8c398a100..000000000 --- a/owl/Owl_regression/S/Optimise/Algodiff/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl.Owl_regression.S.Optimise.Algodiff.Mat)

Module Algodiff.Mat

val empty : int -> int -> t
val zeros : int -> int -> t
val eye : int -> t
val ones : int -> int -> t
val uniform : ?a:A.elt -> ?b:A.elt -> int -> int -> t
val gaussian : ?mu:A.elt -> ?sigma:A.elt -> int -> int -> t
val shape : t -> int * int
val numel : t -> int
val row_num : t -> int
val col_num : t -> int
val reset : t -> unit
val reshape : int -> int -> t -> t
val get : t -> int -> int -> t
val set : t -> int -> int -> t -> t
val row : t -> int -> t
val mean : t -> t
val add : t -> t -> t
val sub : t -> t -> t
val mul : t -> t -> t
val div : t -> t -> t
val dot : t -> t -> t
val map_by_row : (t -> t) -> t -> t
val of_arrays : A.elt array array -> t
val init_2d : int -> int -> (int -> int -> t) -> t
val print : t -> unit
\ No newline at end of file diff --git a/owl/Owl_regression/S/Optimise/Algodiff/Maths/index.html b/owl/Owl_regression/S/Optimise/Algodiff/Maths/index.html deleted file mode 100644 index f3d8097a3..000000000 --- a/owl/Owl_regression/S/Optimise/Algodiff/Maths/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Maths (owl.Owl_regression.S.Optimise.Algodiff.Maths)

Module Algodiff.Maths

val (+) : t -> t -> t
val (-) : t -> t -> t
val (*) : t -> t -> t
val (/) : t -> t -> t
val (*@) : t -> t -> t
val (**) : t -> t -> t
val add : t -> t -> t
val sub : t -> t -> t
val mul : t -> t -> t
val div : t -> t -> t
val kron : t -> t -> t
val dot : t -> t -> t
val pow : t -> t -> t
val atan2 : t -> t -> t
val min2 : t -> t -> t
val max2 : t -> t -> t
val cross_entropy : t -> t -> t
val inv : t -> t
val neg : t -> t
val abs : t -> t
val signum : t -> t
val floor : t -> t
val ceil : t -> t
val round : t -> t
val sqr : t -> t
val sqrt : t -> t
val log : t -> t
val log2 : t -> t
val log10 : t -> t
val exp : t -> t
val sin : t -> t
val cos : t -> t
val tan : t -> t
val sinh : t -> t
val cosh : t -> t
val tanh : t -> t
val asin : t -> t
val acos : t -> t
val atan : t -> t
val asinh : t -> t
val acosh : t -> t
val atanh : t -> t
val sum' : t -> t
val log_sum_exp' : t -> t
val log_sum_exp : ?axis:int -> ?keep_dims:bool -> t -> t
val sum : ?axis:int -> ?keep_dims:bool -> t -> t
val sum_reduce : ?axis:int array -> t -> t
val mean : t -> t
val transpose : ?axis:int array -> t -> t
val swap : int -> int -> t -> t
val l1norm' : t -> t
val l2norm' : t -> t
val l2norm_sqr' : t -> t
val sigmoid : t -> t
val relu : t -> t
val dawsn : t -> t
val softplus : t -> t
val softsign : t -> t
val softmax : ?axis:int -> t -> t
val reshape : t -> int array -> t
val flatten : t -> t
val get_item : t -> int -> int -> t
val get_row : t -> int -> t
val concat : axis:int -> t -> t -> t
val split : axis:int -> int array -> t -> t array
val of_arrays : t array array -> t
val to_arrays : t -> t array array
val concatenate : axis:int -> t array -> t
val stack : axis:int -> t array -> t
val get_slice : int list list -> t -> t
val set_slice : int list list -> t -> t -> t
val get_fancy : Owl_types.index list -> t -> t
val set_fancy : Owl_types.index list -> t -> t -> t
val diag : ?k:int -> t -> t
val diagm : ?k:int -> t -> t
val trace : t -> t
val triu : ?k:int -> t -> t
val tril : ?k:int -> t -> t
\ No newline at end of file diff --git a/owl/Owl_regression/S/Optimise/Algodiff/NN/index.html b/owl/Owl_regression/S/Optimise/Algodiff/NN/index.html deleted file mode 100644 index 6f8785764..000000000 --- a/owl/Owl_regression/S/Optimise/Algodiff/NN/index.html +++ /dev/null @@ -1,20 +0,0 @@ - -NN (owl.Owl_regression.S.Optimise.Algodiff.NN)

Module Algodiff.NN

val dropout : ?rate:float -> t -> t
val conv1d : ?padding:Owl_types.padding -> t -> t -> int array -> t
val conv2d : ?padding:Owl_types.padding -> t -> t -> int array -> t
val conv3d : ?padding:Owl_types.padding -> t -> t -> int array -> t
val dilated_conv1d : - ?padding:Owl_types.padding -> - t -> - t -> - int array -> - int array -> - t
val dilated_conv2d : - ?padding:Owl_types.padding -> - t -> - t -> - int array -> - int array -> - t
val dilated_conv3d : - ?padding:Owl_types.padding -> - t -> - t -> - int array -> - int array -> - t
val transpose_conv1d : ?padding:Owl_types.padding -> t -> t -> int array -> t
val transpose_conv2d : ?padding:Owl_types.padding -> t -> t -> int array -> t
val transpose_conv3d : ?padding:Owl_types.padding -> t -> t -> int array -> t
val max_pool1d : Owl_types.padding -> t -> int array -> int array -> t
val max_pool2d : Owl_types.padding -> t -> int array -> int array -> t
val max_pool3d : Owl_types.padding -> t -> int array -> int array -> t
val avg_pool1d : Owl_types.padding -> t -> int array -> int array -> t
val avg_pool2d : Owl_types.padding -> t -> int array -> int array -> t
val avg_pool3d : Owl_types.padding -> t -> int array -> int array -> t
val upsampling2d : t -> int array -> t
val pad : ?v:A.elt -> int list list -> t -> t
\ No newline at end of file diff --git a/owl/Owl_regression/S/Optimise/Algodiff/index.html b/owl/Owl_regression/S/Optimise/Algodiff/index.html deleted file mode 100644 index 4527c490d..000000000 --- a/owl/Owl_regression/S/Optimise/Algodiff/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Algodiff (owl.Owl_regression.S.Optimise.Algodiff)

Module Optimise.Algodiff

module A : sig ... end
type t = - Owl_optimise_generic.Make(Owl_algodiff_generic.Make(Owl_algodiff_primal_ops.S)).Algodiff.t = -
  1. | F of A.elt
  2. | Arr of A.arr
  3. | DF of t * t * int
  4. | DR of t * t Stdlib.ref * op * int Stdlib.ref * int * int Stdlib.ref
and adjoint = t -> t Stdlib.ref -> (t * t) list -> (t * t) list
and register = t list -> t list
and label = string * t list
and op = adjoint * register * label
val tag : unit -> int
val primal : t -> t
val primal' : t -> t
val zero : t -> t
val reset_zero : t -> t
val tangent : t -> t
val adjref : t -> t Stdlib.ref
val adjval : t -> t
val shape : t -> int array
val is_float : t -> bool
val is_arr : t -> bool
val row_num : t -> int
val col_num : t -> int
val numel : t -> int
val clip_by_value : amin:A.elt -> amax:A.elt -> t -> t
val clip_by_l2norm : A.elt -> t -> t
val copy_primal' : t -> t
val tile : t -> int array -> t
val repeat : t -> int array -> t
val pack_elt : A.elt -> t
val unpack_elt : t -> A.elt
val pack_flt : float -> t
val _f : float -> t
val unpack_flt : t -> float
val pack_arr : A.arr -> t
val unpack_arr : t -> A.arr
val deep_info : t -> string
val type_info : t -> string
val error_binop : string -> t -> t -> 'a
val error_uniop : string -> t -> 'a
val make_forward : t -> t -> int -> t
val make_reverse : t -> int -> t
val reverse_prop : t -> t -> unit
val diff : (t -> t) -> t -> t
val diff' : (t -> t) -> t -> t * t
val grad : (t -> t) -> t -> t
val grad' : (t -> t) -> t -> t * t
val jacobian : (t -> t) -> t -> t
val jacobian' : (t -> t) -> t -> t * t
val jacobianv : (t -> t) -> t -> t -> t
val jacobianv' : (t -> t) -> t -> t -> t * t
val jacobianTv : (t -> t) -> t -> t -> t
val jacobianTv' : (t -> t) -> t -> t -> t * t
val hessian : (t -> t) -> t -> t
val hessian' : (t -> t) -> t -> t * t
val hessianv : (t -> t) -> t -> t -> t
val hessianv' : (t -> t) -> t -> t -> t * t
val laplacian : (t -> t) -> t -> t
val laplacian' : (t -> t) -> t -> t * t
val gradhessian : (t -> t) -> t -> t * t
val gradhessian' : (t -> t) -> t -> t * t * t
val gradhessianv : (t -> t) -> t -> t -> t * t
val gradhessianv' : (t -> t) -> t -> t -> t * t * t
module Builder : sig ... end
module Maths : sig ... end
module Linalg : sig ... end
module NN : sig ... end
module Mat : sig ... end
module Arr : sig ... end
val to_trace : t list -> string
val to_dot : t list -> string
val pp_num : Stdlib.Format.formatter -> t -> unit
\ No newline at end of file diff --git a/owl/Owl_regression/S/Optimise/Batch/index.html b/owl/Owl_regression/S/Optimise/Batch/index.html deleted file mode 100644 index 996168421..000000000 --- a/owl/Owl_regression/S/Optimise/Batch/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Batch (owl.Owl_regression.S.Optimise.Batch)

Module Optimise.Batch

type typ = - Owl_optimise_generic.Make(Owl_algodiff_generic.Make(Owl_algodiff_primal_ops.S)).Batch.typ = -
  1. | Full
  2. | Mini of int
  3. | Sample of int
  4. | Stochastic
val run : typ -> Algodiff.t -> Algodiff.t -> int -> Algodiff.t * Algodiff.t
val batches : typ -> Algodiff.t -> int
val to_string : typ -> string
\ No newline at end of file diff --git a/owl/Owl_regression/S/Optimise/Checkpoint/index.html b/owl/Owl_regression/S/Optimise/Checkpoint/index.html deleted file mode 100644 index a9d1cae6f..000000000 --- a/owl/Owl_regression/S/Optimise/Checkpoint/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -Checkpoint (owl.Owl_regression.S.Optimise.Checkpoint)

Module Optimise.Checkpoint

type state = - Owl_optimise_generic.Make(Owl_algodiff_generic.Make(Owl_algodiff_primal_ops.S)).Checkpoint.state = - {
  1. mutable current_batch : int;
  2. mutable batches_per_epoch : int;
  3. mutable epochs : float;
  4. mutable batches : int;
  5. mutable loss : Algodiff.t array;
  6. mutable start_at : float;
  7. mutable stop : bool;
  8. mutable gs : Algodiff.t array array;
  9. mutable ps : Algodiff.t array array;
  10. mutable us : Algodiff.t array array;
  11. mutable ch : Algodiff.t array array array;
}
type typ = - Owl_optimise_generic.Make(Owl_algodiff_generic.Make(Owl_algodiff_primal_ops.S)).Checkpoint.typ = -
  1. | Batch of int
  2. | Epoch of float
  3. | Custom of state -> unit
  4. | None
val init_state : int -> float -> state
val default_checkpoint_fun : (string -> 'a) -> 'a
val print_state_info : state -> unit
val print_summary : state -> unit
val run : typ -> (string -> unit) -> int -> Algodiff.t -> state -> unit
val to_string : typ -> string
\ No newline at end of file diff --git a/owl/Owl_regression/S/Optimise/Clipping/index.html b/owl/Owl_regression/S/Optimise/Clipping/index.html deleted file mode 100644 index 32a3bf15b..000000000 --- a/owl/Owl_regression/S/Optimise/Clipping/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Clipping (owl.Owl_regression.S.Optimise.Clipping)

Module Optimise.Clipping

type typ = - Owl_optimise_generic.Make(Owl_algodiff_generic.Make(Owl_algodiff_primal_ops.S)).Clipping.typ = -
  1. | L2norm of float
  2. | Value of float * float
  3. | None
val run : typ -> Algodiff.t -> Algodiff.t
val default : typ -> typ
val to_string : typ -> string
\ No newline at end of file diff --git a/owl/Owl_regression/S/Optimise/Gradient/index.html b/owl/Owl_regression/S/Optimise/Gradient/index.html deleted file mode 100644 index e3a3aa1e7..000000000 --- a/owl/Owl_regression/S/Optimise/Gradient/index.html +++ /dev/null @@ -1,11 +0,0 @@ - -Gradient (owl.Owl_regression.S.Optimise.Gradient)

Module Optimise.Gradient

type typ = - Owl_optimise_generic.Make(Owl_algodiff_generic.Make(Owl_algodiff_primal_ops.S)).Gradient.typ = -
  1. | GD
  2. | CG
  3. | CD
  4. | NonlinearCG
  5. | DaiYuanCG
  6. | NewtonCG
  7. | Newton
val run : - typ -> - (Algodiff.t -> Algodiff.t) -> - Algodiff.t -> - Algodiff.t -> - Algodiff.t -> - Algodiff.t -> - Algodiff.t
val to_string : typ -> string
\ No newline at end of file diff --git a/owl/Owl_regression/S/Optimise/Learning_Rate/index.html b/owl/Owl_regression/S/Optimise/Learning_Rate/index.html deleted file mode 100644 index 34b05f9c2..000000000 --- a/owl/Owl_regression/S/Optimise/Learning_Rate/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Learning_Rate (owl.Owl_regression.S.Optimise.Learning_Rate)

Module Optimise.Learning_Rate

type typ = - Owl_optimise_generic.Make(Owl_algodiff_generic.Make(Owl_algodiff_primal_ops.S)).Learning_Rate.typ = -
  1. | Adagrad of float
  2. | Const of float
  3. | Decay of float * float
  4. | Exp_decay of float * float
  5. | RMSprop of float * float
  6. | Adam of float * float * float
  7. | Schedule of float array
val run : typ -> int -> Algodiff.t -> Algodiff.t array -> Algodiff.t
val default : typ -> typ
val update_ch : typ -> Algodiff.t -> Algodiff.t array -> Algodiff.t array
val to_string : typ -> string
\ No newline at end of file diff --git a/owl/Owl_regression/S/Optimise/Loss/index.html b/owl/Owl_regression/S/Optimise/Loss/index.html deleted file mode 100644 index bead12612..000000000 --- a/owl/Owl_regression/S/Optimise/Loss/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Loss (owl.Owl_regression.S.Optimise.Loss)

Module Optimise.Loss

type typ = - Owl_optimise_generic.Make(Owl_algodiff_generic.Make(Owl_algodiff_primal_ops.S)).Loss.typ = -
  1. | Hinge
  2. | L1norm
  3. | L2norm
  4. | Quadratic
  5. | Cross_entropy
  6. | Custom of Algodiff.t -> Algodiff.t -> Algodiff.t
val run : typ -> Algodiff.t -> Algodiff.t -> Algodiff.t
val to_string : typ -> string
\ No newline at end of file diff --git a/owl/Owl_regression/S/Optimise/Momentum/index.html b/owl/Owl_regression/S/Optimise/Momentum/index.html deleted file mode 100644 index a43a5d27f..000000000 --- a/owl/Owl_regression/S/Optimise/Momentum/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Momentum (owl.Owl_regression.S.Optimise.Momentum)

Module Optimise.Momentum

type typ = - Owl_optimise_generic.Make(Owl_algodiff_generic.Make(Owl_algodiff_primal_ops.S)).Momentum.typ = -
  1. | Standard of float
  2. | Nesterov of float
  3. | None
val run : typ -> Algodiff.t -> Algodiff.t -> Algodiff.t
val default : typ -> typ
val to_string : typ -> string
\ No newline at end of file diff --git a/owl/Owl_regression/S/Optimise/Params/index.html b/owl/Owl_regression/S/Optimise/Params/index.html deleted file mode 100644 index 6957ca079..000000000 --- a/owl/Owl_regression/S/Optimise/Params/index.html +++ /dev/null @@ -1,16 +0,0 @@ - -Params (owl.Owl_regression.S.Optimise.Params)

Module Optimise.Params

type typ = - Owl_optimise_generic.Make(Owl_algodiff_generic.Make(Owl_algodiff_primal_ops.S)).Params.typ = - {
  1. mutable epochs : float;
  2. mutable batch : Batch.typ;
  3. mutable gradient : Gradient.typ;
  4. mutable loss : Loss.typ;
  5. mutable learning_rate : Learning_Rate.typ;
  6. mutable regularisation : Regularisation.typ;
  7. mutable momentum : Momentum.typ;
  8. mutable clipping : Clipping.typ;
  9. mutable stopping : Stopping.typ;
  10. mutable checkpoint : Checkpoint.typ;
  11. mutable verbosity : bool;
}
val default : unit -> typ
val config : - ?batch:Batch.typ -> - ?gradient:Gradient.typ -> - ?loss:Loss.typ -> - ?learning_rate:Learning_Rate.typ -> - ?regularisation:Regularisation.typ -> - ?momentum:Momentum.typ -> - ?clipping:Clipping.typ -> - ?stopping:Stopping.typ -> - ?checkpoint:Checkpoint.typ -> - ?verbosity:bool -> - float -> - typ
val to_string : typ -> string
\ No newline at end of file diff --git a/owl/Owl_regression/S/Optimise/Regularisation/index.html b/owl/Owl_regression/S/Optimise/Regularisation/index.html deleted file mode 100644 index d34734447..000000000 --- a/owl/Owl_regression/S/Optimise/Regularisation/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Regularisation (owl.Owl_regression.S.Optimise.Regularisation)

Module Optimise.Regularisation

type typ = - Owl_optimise_generic.Make(Owl_algodiff_generic.Make(Owl_algodiff_primal_ops.S)).Regularisation.typ = -
  1. | L1norm of float
  2. | L2norm of float
  3. | Elastic_net of float * float
  4. | None
val run : typ -> Algodiff.t -> Algodiff.t
val to_string : typ -> string
\ No newline at end of file diff --git a/owl/Owl_regression/S/Optimise/Stopping/index.html b/owl/Owl_regression/S/Optimise/Stopping/index.html deleted file mode 100644 index 85575dd5f..000000000 --- a/owl/Owl_regression/S/Optimise/Stopping/index.html +++ /dev/null @@ -1,4 +0,0 @@ - -Stopping (owl.Owl_regression.S.Optimise.Stopping)

Module Optimise.Stopping

val run : typ -> float -> bool
val default : typ -> typ
val to_string : typ -> string
\ No newline at end of file diff --git a/owl/Owl_regression/S/Optimise/Utils/index.html b/owl/Owl_regression/S/Optimise/Utils/index.html deleted file mode 100644 index ad8d58a00..000000000 --- a/owl/Owl_regression/S/Optimise/Utils/index.html +++ /dev/null @@ -1,7 +0,0 @@ - -Utils (owl.Owl_regression.S.Optimise.Utils)

Module Optimise.Utils

val sample_num : Algodiff.t -> int
val draw_samples : Algodiff.t -> Algodiff.t -> int -> Algodiff.t * Algodiff.t
val get_chunk : - Algodiff.t -> - Algodiff.t -> - int -> - int -> - Algodiff.t * Algodiff.t
\ No newline at end of file diff --git a/owl/Owl_regression/S/Optimise/index.html b/owl/Owl_regression/S/Optimise/index.html deleted file mode 100644 index 68dbfe431..000000000 --- a/owl/Owl_regression/S/Optimise/index.html +++ /dev/null @@ -1,31 +0,0 @@ - -Optimise (owl.Owl_regression.S.Optimise)

Module S.Optimise

module Algodiff : sig ... end
module Utils : sig ... end
module Learning_Rate : sig ... end
module Batch : sig ... end
module Loss : sig ... end
module Gradient : sig ... end
module Momentum : sig ... end
module Regularisation : sig ... end
module Clipping : sig ... end
module Stopping : sig ... end
module Checkpoint : sig ... end
module Params : sig ... end
val minimise_weight : - ?state:Checkpoint.state -> - Params.typ -> - (Algodiff.t -> Algodiff.t -> Algodiff.t) -> - Algodiff.t -> - Algodiff.t -> - Algodiff.t -> - Checkpoint.state * Algodiff.t
val minimise_network : - ?state:Checkpoint.state -> - Params.typ -> - (Algodiff.t -> Algodiff.t * Algodiff.t array array) -> - (Algodiff.t -> Algodiff.t array array * Algodiff.t array array) -> - (Algodiff.t array array -> unit) -> - (string -> unit) -> - Algodiff.t -> - Algodiff.t -> - Checkpoint.state
val minimise_fun : - ?state:Checkpoint.state -> - Params.typ -> - (Algodiff.t -> Algodiff.t) -> - Algodiff.t -> - Checkpoint.state * Algodiff.t
val minimise_compiled_network : - ?state:Checkpoint.state -> - Params.typ -> - (Algodiff.t -> Algodiff.t -> Algodiff.t) -> - (unit -> unit) -> - (string -> unit) -> - Algodiff.t -> - Algodiff.t -> - Checkpoint.state
\ No newline at end of file diff --git a/owl/Owl_regression/S/index.html b/owl/Owl_regression/S/index.html deleted file mode 100644 index 052526ecb..000000000 --- a/owl/Owl_regression/S/index.html +++ /dev/null @@ -1,44 +0,0 @@ - -S (owl.Owl_regression.S)

Module Owl_regression.S

module Optimise : sig ... end
val ridge : - ?i:bool -> - ?alpha:float -> - Optimise.Algodiff.A.arr -> - Optimise.Algodiff.A.arr -> - Optimise.Algodiff.A.arr array
val lasso : - ?i:bool -> - ?alpha:float -> - Optimise.Algodiff.A.arr -> - Optimise.Algodiff.A.arr -> - Optimise.Algodiff.A.arr array
val elastic_net : - ?i:bool -> - ?alpha:float -> - ?l1_ratio:float -> - Optimise.Algodiff.A.arr -> - Optimise.Algodiff.A.arr -> - Optimise.Algodiff.A.arr array
val svm : - ?i:bool -> - ?a:float -> - Optimise.Algodiff.A.arr -> - Optimise.Algodiff.A.arr -> - Optimise.Algodiff.A.arr array
val logistic : - ?i:bool -> - Optimise.Algodiff.A.arr -> - Optimise.Algodiff.A.arr -> - Optimise.Algodiff.A.arr array
\ No newline at end of file diff --git a/owl/Owl_regression_generic/.dummy b/owl/Owl_regression_generic/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl/Owl_regression_generic/Make/argument-1-Optimise/Algodiff/A/Linalg/index.html b/owl/Owl_regression_generic/Make/argument-1-Optimise/Algodiff/A/Linalg/index.html deleted file mode 100644 index b9fa61052..000000000 --- a/owl/Owl_regression_generic/Make/argument-1-Optimise/Algodiff/A/Linalg/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -Linalg (owl.Owl_regression_generic.Make.Optimise.Algodiff.A.Linalg)

Module A.Linalg

val inv : arr -> arr
val logdet : arr -> elt
val chol : ?upper:bool -> arr -> arr
val svd : ?thin:bool -> arr -> arr * arr * arr
val qr : arr -> arr * arr
val lq : arr -> arr * arr
val sylvester : arr -> arr -> arr -> arr
val lyapunov : arr -> arr -> arr
val discrete_lyapunov : - ?solver:[ `default | `bilinear | `direct ] -> - arr -> - arr -> - arr
val linsolve : ?trans:bool -> ?typ:[ `n | `u | `l ] -> arr -> arr -> arr
val care : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
val dare : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
\ No newline at end of file diff --git a/owl/Owl_regression_generic/Make/argument-1-Optimise/Algodiff/A/Mat/index.html b/owl/Owl_regression_generic/Make/argument-1-Optimise/Algodiff/A/Mat/index.html deleted file mode 100644 index 1f2458cf2..000000000 --- a/owl/Owl_regression_generic/Make/argument-1-Optimise/Algodiff/A/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl.Owl_regression_generic.Make.Optimise.Algodiff.A.Mat)

Module A.Mat

val diagm : ?k:int -> arr -> arr
val triu : ?k:int -> arr -> arr
val tril : ?k:int -> arr -> arr
val eye : int -> arr
\ No newline at end of file diff --git a/owl/Owl_regression_generic/Make/argument-1-Optimise/Algodiff/A/Scalar/index.html b/owl/Owl_regression_generic/Make/argument-1-Optimise/Algodiff/A/Scalar/index.html deleted file mode 100644 index f70e897a9..000000000 --- a/owl/Owl_regression_generic/Make/argument-1-Optimise/Algodiff/A/Scalar/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Scalar (owl.Owl_regression_generic.Make.Optimise.Algodiff.A.Scalar)

Module A.Scalar

val add : elt -> elt -> elt
val sub : elt -> elt -> elt
val mul : elt -> elt -> elt
val div : elt -> elt -> elt
val pow : elt -> elt -> elt
val atan2 : elt -> elt -> elt
val abs : elt -> elt
val neg : elt -> elt
val sqr : elt -> elt
val sqrt : elt -> elt
val exp : elt -> elt
val log : elt -> elt
val log2 : elt -> elt
val log10 : elt -> elt
val signum : elt -> elt
val floor : elt -> elt
val ceil : elt -> elt
val round : elt -> elt
val sin : elt -> elt
val cos : elt -> elt
val tan : elt -> elt
val sinh : elt -> elt
val cosh : elt -> elt
val tanh : elt -> elt
val asin : elt -> elt
val acos : elt -> elt
val atan : elt -> elt
val asinh : elt -> elt
val acosh : elt -> elt
val atanh : elt -> elt
val relu : elt -> elt
val dawsn : elt -> elt
val sigmoid : elt -> elt
\ No newline at end of file diff --git a/owl/Owl_regression_generic/Make/argument-1-Optimise/Algodiff/A/index.html b/owl/Owl_regression_generic/Make/argument-1-Optimise/Algodiff/A/index.html deleted file mode 100644 index c7edfb82e..000000000 --- a/owl/Owl_regression_generic/Make/argument-1-Optimise/Algodiff/A/index.html +++ /dev/null @@ -1,158 +0,0 @@ - -A (owl.Owl_regression_generic.Make.Optimise.Algodiff.A)

Module Algodiff.A

include Owl_types_ndarray_eltcmp.Sig
include Owl_types_ndarray_basic.Sig
type arr
type elt
val empty : int array -> arr
val zeros : int array -> arr
val ones : int array -> arr
val create : int array -> elt -> arr
val sequential : ?a:elt -> ?step:elt -> int array -> arr
val uniform : ?a:elt -> ?b:elt -> int array -> arr
val gaussian : ?mu:elt -> ?sigma:elt -> int array -> arr
val bernoulli : ?p:elt -> int array -> arr
val init : int array -> (int -> elt) -> arr
val init_nd : int array -> (int array -> elt) -> arr
val shape : arr -> int array
val numel : arr -> int
val get : arr -> int array -> elt
val set : arr -> int array -> elt -> unit
val get_slice : int list list -> arr -> arr
val set_slice : int list list -> arr -> arr -> unit
val get_fancy : Owl_types_common.index list -> arr -> arr
val set_fancy : Owl_types_common.index list -> arr -> arr -> unit
val copy : arr -> arr
val copy_ : out:arr -> arr -> unit
val reset : arr -> unit
val reshape : arr -> int array -> arr
val reverse : arr -> arr
val tile : arr -> int array -> arr
val repeat : arr -> int array -> arr
val concatenate : ?axis:int -> arr array -> arr
val stack : ?axis:int -> arr array -> arr
val split : ?axis:int -> int array -> arr -> arr array
val expand : ?hi:bool -> arr -> int -> arr
val squeeze : ?axis:int array -> arr -> arr
val draw : ?axis:int -> arr -> int -> arr * int array
val map : (elt -> elt) -> arr -> arr
val fold : ?axis:int -> (elt -> elt -> elt) -> elt -> arr -> arr
val scan : ?axis:int -> (elt -> elt -> elt) -> arr -> arr
val one_hot : int -> arr -> arr
val pad : ?v:elt -> int list list -> arr -> arr
val print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(elt -> string) -> - arr -> - unit
val abs : arr -> arr
val neg : arr -> arr
val floor : arr -> arr
val ceil : arr -> arr
val round : arr -> arr
val sqr : arr -> arr
val sqrt : arr -> arr
val log : arr -> arr
val log2 : arr -> arr
val log10 : arr -> arr
val exp : arr -> arr
val sin : arr -> arr
val cos : arr -> arr
val tan : arr -> arr
val sinh : arr -> arr
val cosh : arr -> arr
val tanh : arr -> arr
val asin : arr -> arr
val acos : arr -> arr
val atan : arr -> arr
val asinh : arr -> arr
val acosh : arr -> arr
val atanh : arr -> arr
val min : ?axis:int -> ?keep_dims:bool -> arr -> arr
val max : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum_reduce : ?axis:int array -> arr -> arr
val signum : arr -> arr
val sigmoid : arr -> arr
val relu : arr -> arr
val dawsn : arr -> arr
val min' : arr -> elt
val max' : arr -> elt
val sum' : arr -> elt
val log_sum_exp' : arr -> elt
val log_sum_exp : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm' : arr -> elt
val l2norm' : arr -> elt
val l2norm_sqr' : arr -> elt
val clip_by_value : ?amin:elt -> ?amax:elt -> arr -> arr
val clip_by_l2norm : elt -> arr -> arr
val pow : arr -> arr -> arr
val scalar_pow : elt -> arr -> arr
val pow_scalar : arr -> elt -> arr
val atan2 : arr -> arr -> arr
val scalar_atan2 : elt -> arr -> arr
val atan2_scalar : arr -> elt -> arr
val add : arr -> arr -> arr
val sub : arr -> arr -> arr
val mul : arr -> arr -> arr
val div : arr -> arr -> arr
val add_scalar : arr -> elt -> arr
val sub_scalar : arr -> elt -> arr
val mul_scalar : arr -> elt -> arr
val div_scalar : arr -> elt -> arr
val scalar_add : elt -> arr -> arr
val scalar_sub : elt -> arr -> arr
val scalar_mul : elt -> arr -> arr
val scalar_div : elt -> arr -> arr
val fma : arr -> arr -> arr -> arr
val conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val dilated_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val transpose_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val max_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val upsampling2d : arr -> int array -> arr
val conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val dilated_conv1d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv1d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val transpose_conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val max_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val upsampling2d_backward : arr -> int array -> arr -> arr
val row_num : arr -> int
val col_num : arr -> int
val row : arr -> int -> arr
val rows : arr -> int array -> arr
val copy_row_to : arr -> arr -> int -> unit
val copy_col_to : arr -> arr -> int -> unit
val dot : arr -> arr -> arr
val diag : ?k:int -> arr -> arr
val trace : arr -> elt
val transpose : ?axis:int array -> arr -> arr
val to_rows : arr -> arr array
val of_rows : arr array -> arr
val to_cols : arr -> arr array
val of_cols : arr array -> arr
val of_array : elt array -> int array -> arr
val of_arrays : elt array array -> arr
val float_to_elt : float -> elt
val elt_to_float : elt -> float
val elt_equal : arr -> arr -> arr
val elt_not_equal : arr -> arr -> arr
val elt_less : arr -> arr -> arr
val elt_greater : arr -> arr -> arr
val elt_less_equal : arr -> arr -> arr
val elt_greater_equal : arr -> arr -> arr
val elt_equal_scalar : arr -> elt -> arr
val elt_not_equal_scalar : arr -> elt -> arr
val elt_less_scalar : arr -> elt -> arr
val elt_greater_scalar : arr -> elt -> arr
val elt_less_equal_scalar : arr -> elt -> arr
val elt_greater_equal_scalar : arr -> elt -> arr
module Scalar : sig ... end
module Mat : sig ... end
module Linalg : sig ... end
\ No newline at end of file diff --git a/owl/Owl_regression_generic/Make/argument-1-Optimise/Algodiff/Arr/index.html b/owl/Owl_regression_generic/Make/argument-1-Optimise/Algodiff/Arr/index.html deleted file mode 100644 index ee861dd07..000000000 --- a/owl/Owl_regression_generic/Make/argument-1-Optimise/Algodiff/Arr/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Arr (owl.Owl_regression_generic.Make.Optimise.Algodiff.Arr)

Module Algodiff.Arr

val empty : int array -> t
val zeros : int array -> t
val ones : int array -> t
val uniform : ?a:A.elt -> ?b:A.elt -> int array -> t
val gaussian : ?mu:A.elt -> ?sigma:A.elt -> int array -> t
val shape : t -> int array
val numel : t -> int
val reset : t -> unit
val reshape : t -> int array -> t
val add : t -> t -> t
val sub : t -> t -> t
val mul : t -> t -> t
val div : t -> t -> t
val dot : t -> t -> t
\ No newline at end of file diff --git a/owl/Owl_regression_generic/Make/argument-1-Optimise/Algodiff/Builder/index.html b/owl/Owl_regression_generic/Make/argument-1-Optimise/Algodiff/Builder/index.html deleted file mode 100644 index ee55d191d..000000000 --- a/owl/Owl_regression_generic/Make/argument-1-Optimise/Algodiff/Builder/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Builder (owl.Owl_regression_generic.Make.Optimise.Algodiff.Builder)

Module Algodiff.Builder

Ops Builder
module type Siso = sig ... end
val build_siso : (module Siso) -> t -> t

build single input single output operations

module type Sipo = sig ... end
val build_sipo : (module Sipo) -> t -> t * t

build single input pair outputs operations

module type Sito = sig ... end
val build_sito : (module Sito) -> t -> t * t * t

build single input triple outputs operations

module type Siao = sig ... end
val build_siao : (module Siao) -> t -> t array

build single input array output operations

module type Piso = sig ... end
val build_piso : (module Piso) -> t -> t -> t

build pair inputs single output operations

module type Aiso = sig ... end
val build_aiso : (module Aiso) -> t array -> t

build array input single output operations

\ No newline at end of file diff --git a/owl/Owl_regression_generic/Make/argument-1-Optimise/Algodiff/Builder/module-type-Aiso/index.html b/owl/Owl_regression_generic/Make/argument-1-Optimise/Algodiff/Builder/module-type-Aiso/index.html deleted file mode 100644 index 66230f748..000000000 --- a/owl/Owl_regression_generic/Make/argument-1-Optimise/Algodiff/Builder/module-type-Aiso/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Aiso (owl.Owl_regression_generic.Make.Optimise.Algodiff.Builder.Aiso)

Module type Builder.Aiso

val label : string
val ff : t array -> t
val df : int list -> t -> t array -> t array -> t
val dr : int list -> t array -> t -> t Stdlib.ref -> t list
\ No newline at end of file diff --git a/owl/Owl_regression_generic/Make/argument-1-Optimise/Algodiff/Builder/module-type-Piso/index.html b/owl/Owl_regression_generic/Make/argument-1-Optimise/Algodiff/Builder/module-type-Piso/index.html deleted file mode 100644 index 9396c5f44..000000000 --- a/owl/Owl_regression_generic/Make/argument-1-Optimise/Algodiff/Builder/module-type-Piso/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Piso (owl.Owl_regression_generic.Make.Optimise.Algodiff.Builder.Piso)

Module type Builder.Piso

val label : string
val ff_aa : A.elt -> A.elt -> t
val ff_ab : A.elt -> A.arr -> t
val ff_ba : A.arr -> A.elt -> t
val ff_bb : A.arr -> A.arr -> t
val df_da : t -> t -> t -> t -> t
val df_db : t -> t -> t -> t -> t
val df_dab : t -> t -> t -> t -> t -> t
val dr_ab : t -> t -> t -> t Stdlib.ref -> t * t
val dr_a : t -> t -> t -> t Stdlib.ref -> t
val dr_b : t -> t -> t -> t Stdlib.ref -> t
\ No newline at end of file diff --git a/owl/Owl_regression_generic/Make/argument-1-Optimise/Algodiff/Builder/module-type-Siao/index.html b/owl/Owl_regression_generic/Make/argument-1-Optimise/Algodiff/Builder/module-type-Siao/index.html deleted file mode 100644 index 2aab12248..000000000 --- a/owl/Owl_regression_generic/Make/argument-1-Optimise/Algodiff/Builder/module-type-Siao/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Siao (owl.Owl_regression_generic.Make.Optimise.Algodiff.Builder.Siao)

Module type Builder.Siao

val label : string
val ff_f : A.elt -> t array
val ff_arr : A.arr -> t array
val df : t array -> t -> t -> t array
val dr : t -> t -> t Stdlib.ref array -> t Stdlib.ref array -> t
\ No newline at end of file diff --git a/owl/Owl_regression_generic/Make/argument-1-Optimise/Algodiff/Builder/module-type-Sipo/index.html b/owl/Owl_regression_generic/Make/argument-1-Optimise/Algodiff/Builder/module-type-Sipo/index.html deleted file mode 100644 index 2f6be97ba..000000000 --- a/owl/Owl_regression_generic/Make/argument-1-Optimise/Algodiff/Builder/module-type-Sipo/index.html +++ /dev/null @@ -1,7 +0,0 @@ - -Sipo (owl.Owl_regression_generic.Make.Optimise.Algodiff.Builder.Sipo)

Module type Builder.Sipo

val label : string
val ff_f : A.elt -> t * t
val ff_arr : A.arr -> t * t
val df : t -> t -> t -> t
val dr : - t -> - t -> - (t Stdlib.ref * t Stdlib.ref) -> - (t Stdlib.ref * t Stdlib.ref) -> - t
\ No newline at end of file diff --git a/owl/Owl_regression_generic/Make/argument-1-Optimise/Algodiff/Builder/module-type-Siso/index.html b/owl/Owl_regression_generic/Make/argument-1-Optimise/Algodiff/Builder/module-type-Siso/index.html deleted file mode 100644 index acbeb5392..000000000 --- a/owl/Owl_regression_generic/Make/argument-1-Optimise/Algodiff/Builder/module-type-Siso/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Siso (owl.Owl_regression_generic.Make.Optimise.Algodiff.Builder.Siso)

Module type Builder.Siso

val label : string
val ff_f : A.elt -> t
val ff_arr : A.arr -> t
val df : t -> t -> t -> t
val dr : t -> t -> t Stdlib.ref -> t
\ No newline at end of file diff --git a/owl/Owl_regression_generic/Make/argument-1-Optimise/Algodiff/Builder/module-type-Sito/index.html b/owl/Owl_regression_generic/Make/argument-1-Optimise/Algodiff/Builder/module-type-Sito/index.html deleted file mode 100644 index b7b79d75c..000000000 --- a/owl/Owl_regression_generic/Make/argument-1-Optimise/Algodiff/Builder/module-type-Sito/index.html +++ /dev/null @@ -1,7 +0,0 @@ - -Sito (owl.Owl_regression_generic.Make.Optimise.Algodiff.Builder.Sito)

Module type Builder.Sito

val label : string
val ff_f : A.elt -> t * t * t
val ff_arr : A.arr -> t * t * t
val df : t -> t -> t -> t
val dr : - t -> - t -> - (t Stdlib.ref * t Stdlib.ref * t Stdlib.ref) -> - (t Stdlib.ref * t Stdlib.ref * t Stdlib.ref) -> - t
\ No newline at end of file diff --git a/owl/Owl_regression_generic/Make/argument-1-Optimise/Algodiff/Linalg/index.html b/owl/Owl_regression_generic/Make/argument-1-Optimise/Algodiff/Linalg/index.html deleted file mode 100644 index 1f344d85f..000000000 --- a/owl/Owl_regression_generic/Make/argument-1-Optimise/Algodiff/Linalg/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -Linalg (owl.Owl_regression_generic.Make.Optimise.Algodiff.Linalg)

Module Algodiff.Linalg

val inv : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val logdet : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val chol : ?upper:bool -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val qr : t -> t * t

Refer to :doc:`owl_dense_ndarray_generic`

val lq : t -> t * t

Refer to :doc:`owl_dense_ndarray_generic`

val svd : ?thin:bool -> t -> t * t * t

Refer to :doc:`owl_dense_ndarray_generic`

val sylvester : t -> t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val lyapunov : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val discrete_lyapunov : - ?solver:[ `default | `bilinear | `direct ] -> - t -> - t -> - t

Refer to :doc:`owl_dense_ndarray_generic`

val (/@) : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val linsolve : ?trans:bool -> ?typ:[ `n | `u | `l ] -> t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val care : ?diag_r:bool -> t -> t -> t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val dare : ?diag_r:bool -> t -> t -> t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

\ No newline at end of file diff --git a/owl/Owl_regression_generic/Make/argument-1-Optimise/Algodiff/Mat/index.html b/owl/Owl_regression_generic/Make/argument-1-Optimise/Algodiff/Mat/index.html deleted file mode 100644 index 5830e6a5d..000000000 --- a/owl/Owl_regression_generic/Make/argument-1-Optimise/Algodiff/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl.Owl_regression_generic.Make.Optimise.Algodiff.Mat)

Module Algodiff.Mat

val empty : int -> int -> t
val zeros : int -> int -> t
val eye : int -> t
val ones : int -> int -> t
val uniform : ?a:A.elt -> ?b:A.elt -> int -> int -> t
val gaussian : ?mu:A.elt -> ?sigma:A.elt -> int -> int -> t
val shape : t -> int * int
val numel : t -> int
val row_num : t -> int
val col_num : t -> int
val reset : t -> unit
val reshape : int -> int -> t -> t
val get : t -> int -> int -> t
val set : t -> int -> int -> t -> t
val row : t -> int -> t
val mean : t -> t
val add : t -> t -> t
val sub : t -> t -> t
val mul : t -> t -> t
val div : t -> t -> t
val dot : t -> t -> t
val map_by_row : (t -> t) -> t -> t
val of_arrays : A.elt array array -> t
val init_2d : int -> int -> (int -> int -> t) -> t
val print : t -> unit
\ No newline at end of file diff --git a/owl/Owl_regression_generic/Make/argument-1-Optimise/Algodiff/Maths/index.html b/owl/Owl_regression_generic/Make/argument-1-Optimise/Algodiff/Maths/index.html deleted file mode 100644 index 7c806e5a0..000000000 --- a/owl/Owl_regression_generic/Make/argument-1-Optimise/Algodiff/Maths/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Maths (owl.Owl_regression_generic.Make.Optimise.Algodiff.Maths)

Module Algodiff.Maths

val (+) : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val (-) : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val (*) : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val (/) : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val (*@) : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val (**) : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val add : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val sub : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val mul : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val div : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val kron : t -> t -> t

Refer to :doc:`owl_dense_matrix_generic`

val dot : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val pow : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val atan2 : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val min2 : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val max2 : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val cross_entropy : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val inv : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val neg : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val abs : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val signum : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val floor : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val ceil : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val round : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val sqr : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val sqrt : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val log : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val log2 : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val log10 : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val exp : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val sin : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val cos : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val tan : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val sinh : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val cosh : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val tanh : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val asin : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val acos : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val atan : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val asinh : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val acosh : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val atanh : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val sum' : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val log_sum_exp' : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val log_sum_exp : ?axis:int -> ?keep_dims:bool -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val sum : ?axis:int -> ?keep_dims:bool -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val sum_reduce : ?axis:int array -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val mean : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val transpose : ?axis:int array -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val swap : int -> int -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val l1norm' : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val l2norm' : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val l2norm_sqr' : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val sigmoid : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val relu : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val dawsn : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val softplus : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val softsign : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val softmax : ?axis:int -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val reshape : t -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val flatten : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val get_item : t -> int -> int -> t

Refer to :doc:`owl_dense_ndarray_generic`

val get_row : t -> int -> t

Refer to :doc:`owl_dense_ndarray_generic`

val concat : axis:int -> t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val split : axis:int -> int array -> t -> t array

Refer to :doc:`owl_dense_ndarray_generic`

val of_arrays : t array array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val to_arrays : t -> t array array

Refer to :doc:`owl_dense_ndarray_generic`

val concatenate : axis:int -> t array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val stack : axis:int -> t array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val get_slice : int list list -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val set_slice : int list list -> t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val get_fancy : Owl_types.index list -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val set_fancy : Owl_types.index list -> t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val diag : ?k:int -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val diagm : ?k:int -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val trace : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val triu : ?k:int -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val tril : ?k:int -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

\ No newline at end of file diff --git a/owl/Owl_regression_generic/Make/argument-1-Optimise/Algodiff/NN/index.html b/owl/Owl_regression_generic/Make/argument-1-Optimise/Algodiff/NN/index.html deleted file mode 100644 index b3b2eab41..000000000 --- a/owl/Owl_regression_generic/Make/argument-1-Optimise/Algodiff/NN/index.html +++ /dev/null @@ -1,20 +0,0 @@ - -NN (owl.Owl_regression_generic.Make.Optimise.Algodiff.NN)

Module Algodiff.NN

val dropout : ?rate:float -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val conv1d : ?padding:Owl_types.padding -> t -> t -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val conv2d : ?padding:Owl_types.padding -> t -> t -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val conv3d : ?padding:Owl_types.padding -> t -> t -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val dilated_conv1d : - ?padding:Owl_types.padding -> - t -> - t -> - int array -> - int array -> - t

Refer to :doc:`owl_dense_ndarray_generic`

val dilated_conv2d : - ?padding:Owl_types.padding -> - t -> - t -> - int array -> - int array -> - t

Refer to :doc:`owl_dense_ndarray_generic`

val dilated_conv3d : - ?padding:Owl_types.padding -> - t -> - t -> - int array -> - int array -> - t

Refer to :doc:`owl_dense_ndarray_generic`

val transpose_conv1d : ?padding:Owl_types.padding -> t -> t -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val transpose_conv2d : ?padding:Owl_types.padding -> t -> t -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val transpose_conv3d : ?padding:Owl_types.padding -> t -> t -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val max_pool1d : Owl_types.padding -> t -> int array -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val max_pool2d : Owl_types.padding -> t -> int array -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val max_pool3d : Owl_types.padding -> t -> int array -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val avg_pool1d : Owl_types.padding -> t -> int array -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val avg_pool2d : Owl_types.padding -> t -> int array -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val avg_pool3d : Owl_types.padding -> t -> int array -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val upsampling2d : t -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val pad : ?v:A.elt -> int list list -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

\ No newline at end of file diff --git a/owl/Owl_regression_generic/Make/argument-1-Optimise/Algodiff/index.html b/owl/Owl_regression_generic/Make/argument-1-Optimise/Algodiff/index.html deleted file mode 100644 index 54ede71a7..000000000 --- a/owl/Owl_regression_generic/Make/argument-1-Optimise/Algodiff/index.html +++ /dev/null @@ -1,11 +0,0 @@ - -Algodiff (owl.Owl_regression_generic.Make.Optimise.Algodiff)

Module Optimise.Algodiff

include Owl_algodiff_core_sig.Sig
Type definition
include Owl_algodiff_types_sig.Sig with type elt := A.elt and type arr := A.arr
type t =
  1. | F of A.elt
  2. | Arr of A.arr
  3. | DF of t * t * int
  4. | DR of t * t Stdlib.ref * op * int Stdlib.ref * int * int Stdlib.ref
and adjoint = t -> t Stdlib.ref -> (t * t) list -> (t * t) list
and register = t list -> t list
and label = string * t list
and op = adjoint * register * label
Core functions
val tag : unit -> int

TODO

val primal : t -> t

TODO

val primal' : t -> t

TODO

val zero : t -> t

TODO

val reset_zero : t -> t

TODO

val tangent : t -> t

TODO

val adjref : t -> t Stdlib.ref

TODO

val adjval : t -> t

TODO

val shape : t -> int array

TODO

val is_float : t -> bool

TODO

val is_arr : t -> bool

TODO

val row_num : t -> int

number of rows

val col_num : t -> int

number of columns

val numel : t -> int

number of elements

val clip_by_value : amin:A.elt -> amax:A.elt -> t -> t

other functions, without tracking gradient

val clip_by_l2norm : A.elt -> t -> t

other functions, without tracking gradient

val copy_primal' : t -> t

TODO

val tile : t -> int array -> t

TODO

val repeat : t -> int array -> t

TODO

val pack_elt : A.elt -> t

convert from elt type to t type.

val unpack_elt : t -> A.elt

convert from t type to elt type.

val pack_flt : float -> t

convert from float type to t type.

val _f : float -> t

A shortcut function for F A.(float_to_elt x).

val unpack_flt : t -> float

convert from t type to float type.

val pack_arr : A.arr -> t

convert from arr type to t type.

val unpack_arr : t -> A.arr

convert from t type to arr type.

val deep_info : t -> string

TODO

val type_info : t -> string

TODO

val error_binop : string -> t -> t -> 'a

TODO

val error_uniop : string -> t -> 'a

TODO

val make_forward : t -> t -> int -> t

TODO

val make_reverse : t -> int -> t

TODO

val reverse_prop : t -> t -> unit

TODO

val diff : (t -> t) -> t -> t

diff f x returns the exat derivative of a function f : scalar -> scalar at point x. Simply calling diff f will return its derivative function g of the same type, i.e. g : scalar -> scalar.

Keep calling this function will give you higher-order derivatives of f, i.e. f |> diff |> diff |> diff |> ...

val diff' : (t -> t) -> t -> t * t

similar to diff, but return (f x, diff f x).

val grad : (t -> t) -> t -> t

gradient of f : (vector -> scalar) at x, reverse ad.

val grad' : (t -> t) -> t -> t * t

similar to grad, but return (f x, grad f x).

val jacobian : (t -> t) -> t -> t

jacobian of f : (vector -> vector) at x, both x and y are row vectors.

val jacobian' : (t -> t) -> t -> t * t

similar to jacobian, but return (f x, jacobian f x)

val jacobianv : (t -> t) -> t -> t -> t

jacobian vector product of f : (vector -> vector) at x along v, forward ad. Namely, it calcultes (jacobian x) v

val jacobianv' : (t -> t) -> t -> t -> t * t

similar to jacobianv', but return (f x, jacobianv f x v)

val jacobianTv : (t -> t) -> t -> t -> t

transposed jacobian vector product of f : (vector -> vector) at x along v, backward ad. Namely, it calculates transpose ((jacobianv f x v)).

val jacobianTv' : (t -> t) -> t -> t -> t * t

similar to jacobianTv, but return (f x, transpose (jacobianv f x v))

val hessian : (t -> t) -> t -> t

hessian of f : (scalar -> scalar) at x.

val hessian' : (t -> t) -> t -> t * t

simiarl to hessian, but return (f x, hessian f x)

val hessianv : (t -> t) -> t -> t -> t

hessian vector product of f : (scalar -> scalar) at x along v. Namely, it calculates (hessian x) v.

val hessianv' : (t -> t) -> t -> t -> t * t

similar to hessianv, but return (f x, hessianv f x v).

val laplacian : (t -> t) -> t -> t

laplacian of f : (scalar -> scalar) at x.

val laplacian' : (t -> t) -> t -> t * t

similar to laplacian, but return (f x, laplacian f x).

val gradhessian : (t -> t) -> t -> t * t

return (grad f x, hessian f x), f : (scalar -> scalar)

val gradhessian' : (t -> t) -> t -> t * t * t

return (f x, grad f x, hessian f x)

val gradhessianv : (t -> t) -> t -> t -> t * t

return (grad f x v, hessian f x v)

val gradhessianv' : (t -> t) -> t -> t -> t * t * t

return (f x, grad f x v, hessian f x v)

include Owl_algodiff_ops_sig.Sig - with type t := t - and type elt := A.elt - and type arr := A.arr - and type op := op
module Builder : - Owl_algodiff_ops_builder_sig.Sig - with type t := t - and type elt := A.elt - and type arr := A.arr - and type op := op
Supported Maths functions
module Maths : sig ... end
Supported Linalg functions
module Linalg : sig ... end
Supported Neural Network functions
module NN : sig ... end
Supported Mat functions
module Mat : sig ... end
Supported Arr functions
module Arr : sig ... end
Helper functions
include Owl_algodiff_graph_convert_sig.Sig with type t := t
val to_trace : t list -> string

to_trace [t0; t1; ...] outputs the trace of computation graph on the terminal in a human-readable format.

val to_dot : t list -> string

to_dot [t0; t1; ...] outputs the trace of computation graph in the dot file format which you can use other tools further visualisation, such as Graphviz.

val pp_num : Stdlib.Format.formatter -> t -> unit

pp_num t pretty prints the abstract number used in Algodiff.

\ No newline at end of file diff --git a/owl/Owl_regression_generic/Make/argument-1-Optimise/Batch/index.html b/owl/Owl_regression_generic/Make/argument-1-Optimise/Batch/index.html deleted file mode 100644 index e35a5ecef..000000000 --- a/owl/Owl_regression_generic/Make/argument-1-Optimise/Batch/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Batch (owl.Owl_regression_generic.Make.Optimise.Batch)

Module Optimise.Batch

Batch module

type typ =
  1. | Full
  2. | Mini of int
  3. | Sample of int
  4. | Stochastic

Types of batches.

val run : typ -> Algodiff.t -> Algodiff.t -> int -> Algodiff.t * Algodiff.t

Execute the computations defined in module typ.

val batches : typ -> Algodiff.t -> int

Return the total number of batches given a batch typ.

val to_string : typ -> string

Convert the module typ to its string representation.

\ No newline at end of file diff --git a/owl/Owl_regression_generic/Make/argument-1-Optimise/Checkpoint/index.html b/owl/Owl_regression_generic/Make/argument-1-Optimise/Checkpoint/index.html deleted file mode 100644 index e4b64d9d0..000000000 --- a/owl/Owl_regression_generic/Make/argument-1-Optimise/Checkpoint/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Checkpoint (owl.Owl_regression_generic.Make.Optimise.Checkpoint)

Module Optimise.Checkpoint

Checkpoint module

type state = {
  1. mutable current_batch : int;
  2. mutable batches_per_epoch : int;
  3. mutable epochs : float;
  4. mutable batches : int;
  5. mutable loss : Algodiff.t array;
  6. mutable start_at : float;
  7. mutable stop : bool;
  8. mutable gs : Algodiff.t array array;
  9. mutable ps : Algodiff.t array array;
  10. mutable us : Algodiff.t array array;
  11. mutable ch : Algodiff.t array array array;
}

Type definition of checkpoint

type typ =
  1. | Batch of int
  2. | Epoch of float
  3. | Custom of state -> unit
  4. | None

Batch type.

val init_state : int -> float -> state

init_state batches_per_epoch epochs initialises a state by specifying the number of batches per epoch and the number of epochs in total.

val default_checkpoint_fun : (string -> 'a) -> 'a

This function is used for saving intermediate files during optimisation.

val print_state_info : state -> unit

Print out the detail information of current state.

val print_summary : state -> unit

Print out the summary of current state.

val run : typ -> (string -> unit) -> int -> Algodiff.t -> state -> unit

Execute the computations defined in module typ.

val to_string : typ -> string

Convert the module typ to its string representation.

\ No newline at end of file diff --git a/owl/Owl_regression_generic/Make/argument-1-Optimise/Clipping/index.html b/owl/Owl_regression_generic/Make/argument-1-Optimise/Clipping/index.html deleted file mode 100644 index 28589931a..000000000 --- a/owl/Owl_regression_generic/Make/argument-1-Optimise/Clipping/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Clipping (owl.Owl_regression_generic.Make.Optimise.Clipping)

Module Optimise.Clipping

Clipping module

type typ =
  1. | L2norm of float
  2. | Value of float * float
  3. | None

Types of clipping functions.

val run : typ -> Algodiff.t -> Algodiff.t

Execute the computations defined in module typ.

val default : typ -> typ

Create module typ with default values.

val to_string : typ -> string

Convert the module typ to its string representation.

\ No newline at end of file diff --git a/owl/Owl_regression_generic/Make/argument-1-Optimise/Gradient/index.html b/owl/Owl_regression_generic/Make/argument-1-Optimise/Gradient/index.html deleted file mode 100644 index 2f07ec616..000000000 --- a/owl/Owl_regression_generic/Make/argument-1-Optimise/Gradient/index.html +++ /dev/null @@ -1,9 +0,0 @@ - -Gradient (owl.Owl_regression_generic.Make.Optimise.Gradient)

Module Optimise.Gradient

Gradient module

type typ =
  1. | GD
  2. | CG
  3. | CD
  4. | NonlinearCG
  5. | DaiYuanCG
  6. | NewtonCG
  7. | Newton

Types of gradient function.

val run : - typ -> - (Algodiff.t -> Algodiff.t) -> - Algodiff.t -> - Algodiff.t -> - Algodiff.t -> - Algodiff.t -> - Algodiff.t

Execute the computations defined in module typ.

val to_string : typ -> string

Convert the module typ to its string representation.

\ No newline at end of file diff --git a/owl/Owl_regression_generic/Make/argument-1-Optimise/Learning_Rate/index.html b/owl/Owl_regression_generic/Make/argument-1-Optimise/Learning_Rate/index.html deleted file mode 100644 index bb95638b1..000000000 --- a/owl/Owl_regression_generic/Make/argument-1-Optimise/Learning_Rate/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Learning_Rate (owl.Owl_regression_generic.Make.Optimise.Learning_Rate)

Module Optimise.Learning_Rate

Strategies for learning rate update

type typ =
  1. | Adagrad of float
  2. | Const of float
  3. | Decay of float * float
  4. | Exp_decay of float * float
  5. | RMSprop of float * float
  6. | Adam of float * float * float
  7. | Schedule of float array

Representation of learning rate update strategies. Possible values include:

  • Adam (alpha, beta1, beta2), see ref for parameter meaning
val run : typ -> int -> Algodiff.t -> Algodiff.t array -> Algodiff.t

Execute the computations defined in module typ.

val default : typ -> typ

Create module typ with default values.

val update_ch : typ -> Algodiff.t -> Algodiff.t array -> Algodiff.t array

Update the cache of gradients.

val to_string : typ -> string

Convert the module typ to its string representation.

\ No newline at end of file diff --git a/owl/Owl_regression_generic/Make/argument-1-Optimise/Loss/index.html b/owl/Owl_regression_generic/Make/argument-1-Optimise/Loss/index.html deleted file mode 100644 index a17e2ea68..000000000 --- a/owl/Owl_regression_generic/Make/argument-1-Optimise/Loss/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Loss (owl.Owl_regression_generic.Make.Optimise.Loss)

Module Optimise.Loss

Loss module

type typ =
  1. | Hinge
  2. | L1norm
  3. | L2norm
  4. | Quadratic
  5. | Cross_entropy
  6. | Custom of Algodiff.t -> Algodiff.t -> Algodiff.t

Types of loss functions.

val run : typ -> Algodiff.t -> Algodiff.t -> Algodiff.t

Execute the computations defined in module typ.

val to_string : typ -> string

Convert the module typ to its string representation.

\ No newline at end of file diff --git a/owl/Owl_regression_generic/Make/argument-1-Optimise/Momentum/index.html b/owl/Owl_regression_generic/Make/argument-1-Optimise/Momentum/index.html deleted file mode 100644 index a23b5b2cf..000000000 --- a/owl/Owl_regression_generic/Make/argument-1-Optimise/Momentum/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Momentum (owl.Owl_regression_generic.Make.Optimise.Momentum)

Module Optimise.Momentum

Momentum module

type typ =
  1. | Standard of float
  2. | Nesterov of float
  3. | None

Types of momentum functions.

val run : typ -> Algodiff.t -> Algodiff.t -> Algodiff.t

Execute the computations defined in module typ.

val default : typ -> typ

Create module typ with default values.

val to_string : typ -> string

Convert the module typ to its string representation.

\ No newline at end of file diff --git a/owl/Owl_regression_generic/Make/argument-1-Optimise/Params/index.html b/owl/Owl_regression_generic/Make/argument-1-Optimise/Params/index.html deleted file mode 100644 index 2145eab06..000000000 --- a/owl/Owl_regression_generic/Make/argument-1-Optimise/Params/index.html +++ /dev/null @@ -1,14 +0,0 @@ - -Params (owl.Owl_regression_generic.Make.Optimise.Params)

Module Optimise.Params

Params module

type typ = {
  1. mutable epochs : float;
  2. mutable batch : Batch.typ;
  3. mutable gradient : Gradient.typ;
  4. mutable loss : Loss.typ;
  5. mutable learning_rate : Learning_Rate.typ;
  6. mutable regularisation : Regularisation.typ;
  7. mutable momentum : Momentum.typ;
  8. mutable clipping : Clipping.typ;
  9. mutable stopping : Stopping.typ;
  10. mutable checkpoint : Checkpoint.typ;
  11. mutable verbosity : bool;
}

Type definition of parameter.

val default : unit -> typ

Create module typ with default values.

val config : - ?batch:Batch.typ -> - ?gradient:Gradient.typ -> - ?loss:Loss.typ -> - ?learning_rate:Learning_Rate.typ -> - ?regularisation:Regularisation.typ -> - ?momentum:Momentum.typ -> - ?clipping:Clipping.typ -> - ?stopping:Stopping.typ -> - ?checkpoint:Checkpoint.typ -> - ?verbosity:bool -> - float -> - typ

This function creates a parameter object with many configurations.

val to_string : typ -> string

Convert the module typ to its string representation.

\ No newline at end of file diff --git a/owl/Owl_regression_generic/Make/argument-1-Optimise/Regularisation/index.html b/owl/Owl_regression_generic/Make/argument-1-Optimise/Regularisation/index.html deleted file mode 100644 index e590d299d..000000000 --- a/owl/Owl_regression_generic/Make/argument-1-Optimise/Regularisation/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Regularisation (owl.Owl_regression_generic.Make.Optimise.Regularisation)

Module Optimise.Regularisation

Regularisation module

type typ =
  1. | L1norm of float
  2. | L2norm of float
  3. | Elastic_net of float * float
  4. | None

Types of regularisation functions.

val run : typ -> Algodiff.t -> Algodiff.t

Execute the computations defined in module typ.

val to_string : typ -> string

Convert the module typ to its string representation.

\ No newline at end of file diff --git a/owl/Owl_regression_generic/Make/argument-1-Optimise/Stopping/index.html b/owl/Owl_regression_generic/Make/argument-1-Optimise/Stopping/index.html deleted file mode 100644 index e8d847616..000000000 --- a/owl/Owl_regression_generic/Make/argument-1-Optimise/Stopping/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Stopping (owl.Owl_regression_generic.Make.Optimise.Stopping)

Module Optimise.Stopping

Stopping module

type typ =
  1. | Const of float
  2. | Early of int * int
  3. | None

Types of stopping functions.

val run : typ -> float -> bool

Execute the computations defined in module typ.

val default : typ -> typ

Create module typ with default values.

val to_string : typ -> string

Convert the module typ to its string representation.

\ No newline at end of file diff --git a/owl/Owl_regression_generic/Make/argument-1-Optimise/Utils/index.html b/owl/Owl_regression_generic/Make/argument-1-Optimise/Utils/index.html deleted file mode 100644 index f15de8245..000000000 --- a/owl/Owl_regression_generic/Make/argument-1-Optimise/Utils/index.html +++ /dev/null @@ -1,7 +0,0 @@ - -Utils (owl.Owl_regression_generic.Make.Optimise.Utils)

Module Optimise.Utils

Utils module

val sample_num : Algodiff.t -> int

Return the total number of samples in passed in ndarray.

val draw_samples : Algodiff.t -> Algodiff.t -> int -> Algodiff.t * Algodiff.t

draw_samples x y draws samples from both x (observations) and y (labels). The samples will be drew along axis 0, so x and y must agree along axis 0.

val get_chunk : - Algodiff.t -> - Algodiff.t -> - int -> - int -> - Algodiff.t * Algodiff.t

get_chunk x y i c gets a continuous chunk of c samples from position i from x (observations) and y (labels).

\ No newline at end of file diff --git a/owl/Owl_regression_generic/Make/argument-1-Optimise/index.html b/owl/Owl_regression_generic/Make/argument-1-Optimise/index.html deleted file mode 100644 index 49f7b00d0..000000000 --- a/owl/Owl_regression_generic/Make/argument-1-Optimise/index.html +++ /dev/null @@ -1,31 +0,0 @@ - -Optimise (owl.Owl_regression_generic.Make.Optimise)

Parameter Make.Optimise

module Utils : sig ... end

Utils module

module Learning_Rate : sig ... end

Strategies for learning rate update

module Batch : sig ... end

Batch module

module Loss : sig ... end

Loss module

module Gradient : sig ... end

Gradient module

module Momentum : sig ... end

Momentum module

module Regularisation : sig ... end

Regularisation module

module Clipping : sig ... end

Clipping module

module Stopping : sig ... end

Stopping module

module Checkpoint : sig ... end

Checkpoint module

module Params : sig ... end

Params module

Core functions
val minimise_weight : - ?state:Checkpoint.state -> - Params.typ -> - (Algodiff.t -> Algodiff.t -> Algodiff.t) -> - Algodiff.t -> - Algodiff.t -> - Algodiff.t -> - Checkpoint.state * Algodiff.t

This function minimises the weight w of passed-in function f.

* f is a function f : w -> x -> y. * w is a row vector but y can have any shape.

val minimise_network : - ?state:Checkpoint.state -> - Params.typ -> - (Algodiff.t -> Algodiff.t * Algodiff.t array array) -> - (Algodiff.t -> Algodiff.t array array * Algodiff.t array array) -> - (Algodiff.t array array -> unit) -> - (string -> unit) -> - Algodiff.t -> - Algodiff.t -> - Checkpoint.state

This function is specifically designed for minimising the weights in a neural network of graph structure. In Owl's earlier versions, the functions in the regression module were actually implemented using this function.

val minimise_fun : - ?state:Checkpoint.state -> - Params.typ -> - (Algodiff.t -> Algodiff.t) -> - Algodiff.t -> - Checkpoint.state * Algodiff.t

This function minimises f : x -> y w.r.t x.

x is an ndarray; and y is an scalar value.

val minimise_compiled_network : - ?state:Checkpoint.state -> - Params.typ -> - (Algodiff.t -> Algodiff.t -> Algodiff.t) -> - (unit -> unit) -> - (string -> unit) -> - Algodiff.t -> - Algodiff.t -> - Checkpoint.state

TODO

\ No newline at end of file diff --git a/owl/Owl_regression_generic/Make/index.html b/owl/Owl_regression_generic/Make/index.html deleted file mode 100644 index 63bf8fffc..000000000 --- a/owl/Owl_regression_generic/Make/index.html +++ /dev/null @@ -1,44 +0,0 @@ - -Make (owl.Owl_regression_generic.Make)

Module Owl_regression_generic.Make

Parameters

Signature

module Optimise = Optimise
val ridge : - ?i:bool -> - ?alpha:float -> - Optimise.Algodiff.A.arr -> - Optimise.Algodiff.A.arr -> - Optimise.Algodiff.A.arr array
val lasso : - ?i:bool -> - ?alpha:float -> - Optimise.Algodiff.A.arr -> - Optimise.Algodiff.A.arr -> - Optimise.Algodiff.A.arr array
val elastic_net : - ?i:bool -> - ?alpha:float -> - ?l1_ratio:float -> - Optimise.Algodiff.A.arr -> - Optimise.Algodiff.A.arr -> - Optimise.Algodiff.A.arr array
val svm : - ?i:bool -> - ?a:float -> - Optimise.Algodiff.A.arr -> - Optimise.Algodiff.A.arr -> - Optimise.Algodiff.A.arr array
val logistic : - ?i:bool -> - Optimise.Algodiff.A.arr -> - Optimise.Algodiff.A.arr -> - Optimise.Algodiff.A.arr array
\ No newline at end of file diff --git a/owl/Owl_regression_generic_sig/.dummy b/owl/Owl_regression_generic_sig/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl/Owl_regression_generic_sig/module-type-Sig/Optimise/Algodiff/A/Linalg/index.html b/owl/Owl_regression_generic_sig/module-type-Sig/Optimise/Algodiff/A/Linalg/index.html deleted file mode 100644 index 98183b35b..000000000 --- a/owl/Owl_regression_generic_sig/module-type-Sig/Optimise/Algodiff/A/Linalg/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -Linalg (owl.Owl_regression_generic_sig.Sig.Optimise.Algodiff.A.Linalg)

Module A.Linalg

val inv : arr -> arr
val logdet : arr -> elt
val chol : ?upper:bool -> arr -> arr
val svd : ?thin:bool -> arr -> arr * arr * arr
val qr : arr -> arr * arr
val lq : arr -> arr * arr
val sylvester : arr -> arr -> arr -> arr
val lyapunov : arr -> arr -> arr
val discrete_lyapunov : - ?solver:[ `default | `bilinear | `direct ] -> - arr -> - arr -> - arr
val linsolve : ?trans:bool -> ?typ:[ `n | `u | `l ] -> arr -> arr -> arr
val care : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
val dare : ?diag_r:bool -> arr -> arr -> arr -> arr -> arr
\ No newline at end of file diff --git a/owl/Owl_regression_generic_sig/module-type-Sig/Optimise/Algodiff/A/Mat/index.html b/owl/Owl_regression_generic_sig/module-type-Sig/Optimise/Algodiff/A/Mat/index.html deleted file mode 100644 index 32555c128..000000000 --- a/owl/Owl_regression_generic_sig/module-type-Sig/Optimise/Algodiff/A/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl.Owl_regression_generic_sig.Sig.Optimise.Algodiff.A.Mat)

Module A.Mat

val diagm : ?k:int -> arr -> arr
val triu : ?k:int -> arr -> arr
val tril : ?k:int -> arr -> arr
val eye : int -> arr
\ No newline at end of file diff --git a/owl/Owl_regression_generic_sig/module-type-Sig/Optimise/Algodiff/A/Scalar/index.html b/owl/Owl_regression_generic_sig/module-type-Sig/Optimise/Algodiff/A/Scalar/index.html deleted file mode 100644 index 090dd08f6..000000000 --- a/owl/Owl_regression_generic_sig/module-type-Sig/Optimise/Algodiff/A/Scalar/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Scalar (owl.Owl_regression_generic_sig.Sig.Optimise.Algodiff.A.Scalar)

Module A.Scalar

val add : elt -> elt -> elt
val sub : elt -> elt -> elt
val mul : elt -> elt -> elt
val div : elt -> elt -> elt
val pow : elt -> elt -> elt
val atan2 : elt -> elt -> elt
val abs : elt -> elt
val neg : elt -> elt
val sqr : elt -> elt
val sqrt : elt -> elt
val exp : elt -> elt
val log : elt -> elt
val log2 : elt -> elt
val log10 : elt -> elt
val signum : elt -> elt
val floor : elt -> elt
val ceil : elt -> elt
val round : elt -> elt
val sin : elt -> elt
val cos : elt -> elt
val tan : elt -> elt
val sinh : elt -> elt
val cosh : elt -> elt
val tanh : elt -> elt
val asin : elt -> elt
val acos : elt -> elt
val atan : elt -> elt
val asinh : elt -> elt
val acosh : elt -> elt
val atanh : elt -> elt
val relu : elt -> elt
val dawsn : elt -> elt
val sigmoid : elt -> elt
\ No newline at end of file diff --git a/owl/Owl_regression_generic_sig/module-type-Sig/Optimise/Algodiff/A/index.html b/owl/Owl_regression_generic_sig/module-type-Sig/Optimise/Algodiff/A/index.html deleted file mode 100644 index f89571685..000000000 --- a/owl/Owl_regression_generic_sig/module-type-Sig/Optimise/Algodiff/A/index.html +++ /dev/null @@ -1,158 +0,0 @@ - -A (owl.Owl_regression_generic_sig.Sig.Optimise.Algodiff.A)

Module Algodiff.A

include Owl_types_ndarray_eltcmp.Sig
include Owl_types_ndarray_basic.Sig
type arr
type elt
val empty : int array -> arr
val zeros : int array -> arr
val ones : int array -> arr
val create : int array -> elt -> arr
val sequential : ?a:elt -> ?step:elt -> int array -> arr
val uniform : ?a:elt -> ?b:elt -> int array -> arr
val gaussian : ?mu:elt -> ?sigma:elt -> int array -> arr
val bernoulli : ?p:elt -> int array -> arr
val init : int array -> (int -> elt) -> arr
val init_nd : int array -> (int array -> elt) -> arr
val shape : arr -> int array
val numel : arr -> int
val get : arr -> int array -> elt
val set : arr -> int array -> elt -> unit
val get_slice : int list list -> arr -> arr
val set_slice : int list list -> arr -> arr -> unit
val get_fancy : Owl_types_common.index list -> arr -> arr
val set_fancy : Owl_types_common.index list -> arr -> arr -> unit
val copy : arr -> arr
val copy_ : out:arr -> arr -> unit
val reset : arr -> unit
val reshape : arr -> int array -> arr
val reverse : arr -> arr
val tile : arr -> int array -> arr
val repeat : arr -> int array -> arr
val concatenate : ?axis:int -> arr array -> arr
val stack : ?axis:int -> arr array -> arr
val split : ?axis:int -> int array -> arr -> arr array
val expand : ?hi:bool -> arr -> int -> arr
val squeeze : ?axis:int array -> arr -> arr
val draw : ?axis:int -> arr -> int -> arr * int array
val map : (elt -> elt) -> arr -> arr
val fold : ?axis:int -> (elt -> elt -> elt) -> elt -> arr -> arr
val scan : ?axis:int -> (elt -> elt -> elt) -> arr -> arr
val one_hot : int -> arr -> arr
val pad : ?v:elt -> int list list -> arr -> arr
val print : - ?max_row:int -> - ?max_col:int -> - ?header:bool -> - ?fmt:(elt -> string) -> - arr -> - unit
val abs : arr -> arr
val neg : arr -> arr
val floor : arr -> arr
val ceil : arr -> arr
val round : arr -> arr
val sqr : arr -> arr
val sqrt : arr -> arr
val log : arr -> arr
val log2 : arr -> arr
val log10 : arr -> arr
val exp : arr -> arr
val sin : arr -> arr
val cos : arr -> arr
val tan : arr -> arr
val sinh : arr -> arr
val cosh : arr -> arr
val tanh : arr -> arr
val asin : arr -> arr
val acos : arr -> arr
val atan : arr -> arr
val asinh : arr -> arr
val acosh : arr -> arr
val atanh : arr -> arr
val min : ?axis:int -> ?keep_dims:bool -> arr -> arr
val max : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum : ?axis:int -> ?keep_dims:bool -> arr -> arr
val sum_reduce : ?axis:int array -> arr -> arr
val signum : arr -> arr
val sigmoid : arr -> arr
val relu : arr -> arr
val dawsn : arr -> arr
val min' : arr -> elt
val max' : arr -> elt
val sum' : arr -> elt
val log_sum_exp' : arr -> elt
val log_sum_exp : ?axis:int -> ?keep_dims:bool -> arr -> arr
val l1norm' : arr -> elt
val l2norm' : arr -> elt
val l2norm_sqr' : arr -> elt
val clip_by_value : ?amin:elt -> ?amax:elt -> arr -> arr
val clip_by_l2norm : elt -> arr -> arr
val pow : arr -> arr -> arr
val scalar_pow : elt -> arr -> arr
val pow_scalar : arr -> elt -> arr
val atan2 : arr -> arr -> arr
val scalar_atan2 : elt -> arr -> arr
val atan2_scalar : arr -> elt -> arr
val add : arr -> arr -> arr
val sub : arr -> arr -> arr
val mul : arr -> arr -> arr
val div : arr -> arr -> arr
val add_scalar : arr -> elt -> arr
val sub_scalar : arr -> elt -> arr
val mul_scalar : arr -> elt -> arr
val div_scalar : arr -> elt -> arr
val scalar_add : elt -> arr -> arr
val scalar_sub : elt -> arr -> arr
val scalar_mul : elt -> arr -> arr
val scalar_div : elt -> arr -> arr
val fma : arr -> arr -> arr -> arr
val conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val dilated_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val dilated_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - int array -> - arr
val transpose_conv1d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv2d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val transpose_conv3d : - ?padding:Owl_types_common.padding -> - arr -> - arr -> - int array -> - arr
val max_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val max_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool1d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool2d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val avg_pool3d : - ?padding:Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr
val upsampling2d : arr -> int array -> arr
val conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val dilated_conv1d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv1d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv2d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_input : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val dilated_conv3d_backward_kernel : - arr -> - arr -> - int array -> - int array -> - arr -> - arr
val transpose_conv1d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv1d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv2d_backward_kernel : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_input : arr -> arr -> int array -> arr -> arr
val transpose_conv3d_backward_kernel : arr -> arr -> int array -> arr -> arr
val max_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val max_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool1d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool2d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val avg_pool3d_backward : - Owl_types_common.padding -> - arr -> - int array -> - int array -> - arr -> - arr
val upsampling2d_backward : arr -> int array -> arr -> arr
val row_num : arr -> int
val col_num : arr -> int
val row : arr -> int -> arr
val rows : arr -> int array -> arr
val copy_row_to : arr -> arr -> int -> unit
val copy_col_to : arr -> arr -> int -> unit
val dot : arr -> arr -> arr
val diag : ?k:int -> arr -> arr
val trace : arr -> elt
val transpose : ?axis:int array -> arr -> arr
val to_rows : arr -> arr array
val of_rows : arr array -> arr
val to_cols : arr -> arr array
val of_cols : arr array -> arr
val of_array : elt array -> int array -> arr
val of_arrays : elt array array -> arr
val float_to_elt : float -> elt
val elt_to_float : elt -> float
val elt_equal : arr -> arr -> arr
val elt_not_equal : arr -> arr -> arr
val elt_less : arr -> arr -> arr
val elt_greater : arr -> arr -> arr
val elt_less_equal : arr -> arr -> arr
val elt_greater_equal : arr -> arr -> arr
val elt_equal_scalar : arr -> elt -> arr
val elt_not_equal_scalar : arr -> elt -> arr
val elt_less_scalar : arr -> elt -> arr
val elt_greater_scalar : arr -> elt -> arr
val elt_less_equal_scalar : arr -> elt -> arr
val elt_greater_equal_scalar : arr -> elt -> arr
module Scalar : sig ... end
module Mat : sig ... end
module Linalg : sig ... end
\ No newline at end of file diff --git a/owl/Owl_regression_generic_sig/module-type-Sig/Optimise/Algodiff/Arr/index.html b/owl/Owl_regression_generic_sig/module-type-Sig/Optimise/Algodiff/Arr/index.html deleted file mode 100644 index 7c7bdfd2c..000000000 --- a/owl/Owl_regression_generic_sig/module-type-Sig/Optimise/Algodiff/Arr/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Arr (owl.Owl_regression_generic_sig.Sig.Optimise.Algodiff.Arr)

Module Algodiff.Arr

val empty : int array -> t
val zeros : int array -> t
val ones : int array -> t
val uniform : ?a:A.elt -> ?b:A.elt -> int array -> t
val gaussian : ?mu:A.elt -> ?sigma:A.elt -> int array -> t
val shape : t -> int array
val numel : t -> int
val reset : t -> unit
val reshape : t -> int array -> t
val add : t -> t -> t
val sub : t -> t -> t
val mul : t -> t -> t
val div : t -> t -> t
val dot : t -> t -> t
\ No newline at end of file diff --git a/owl/Owl_regression_generic_sig/module-type-Sig/Optimise/Algodiff/Builder/index.html b/owl/Owl_regression_generic_sig/module-type-Sig/Optimise/Algodiff/Builder/index.html deleted file mode 100644 index a0ca53241..000000000 --- a/owl/Owl_regression_generic_sig/module-type-Sig/Optimise/Algodiff/Builder/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Builder (owl.Owl_regression_generic_sig.Sig.Optimise.Algodiff.Builder)

Module Algodiff.Builder

Ops Builder
module type Siso = sig ... end
val build_siso : (module Siso) -> t -> t

build single input single output operations

module type Sipo = sig ... end
val build_sipo : (module Sipo) -> t -> t * t

build single input pair outputs operations

module type Sito = sig ... end
val build_sito : (module Sito) -> t -> t * t * t

build single input triple outputs operations

module type Siao = sig ... end
val build_siao : (module Siao) -> t -> t array

build single input array output operations

module type Piso = sig ... end
val build_piso : (module Piso) -> t -> t -> t

build pair inputs single output operations

module type Aiso = sig ... end
val build_aiso : (module Aiso) -> t array -> t

build array input single output operations

\ No newline at end of file diff --git a/owl/Owl_regression_generic_sig/module-type-Sig/Optimise/Algodiff/Builder/module-type-Aiso/index.html b/owl/Owl_regression_generic_sig/module-type-Sig/Optimise/Algodiff/Builder/module-type-Aiso/index.html deleted file mode 100644 index fe6c2163e..000000000 --- a/owl/Owl_regression_generic_sig/module-type-Sig/Optimise/Algodiff/Builder/module-type-Aiso/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Aiso (owl.Owl_regression_generic_sig.Sig.Optimise.Algodiff.Builder.Aiso)

Module type Builder.Aiso

val label : string
val ff : t array -> t
val df : int list -> t -> t array -> t array -> t
val dr : int list -> t array -> t -> t Stdlib.ref -> t list
\ No newline at end of file diff --git a/owl/Owl_regression_generic_sig/module-type-Sig/Optimise/Algodiff/Builder/module-type-Piso/index.html b/owl/Owl_regression_generic_sig/module-type-Sig/Optimise/Algodiff/Builder/module-type-Piso/index.html deleted file mode 100644 index 17d5422a3..000000000 --- a/owl/Owl_regression_generic_sig/module-type-Sig/Optimise/Algodiff/Builder/module-type-Piso/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Piso (owl.Owl_regression_generic_sig.Sig.Optimise.Algodiff.Builder.Piso)

Module type Builder.Piso

val label : string
val ff_aa : A.elt -> A.elt -> t
val ff_ab : A.elt -> A.arr -> t
val ff_ba : A.arr -> A.elt -> t
val ff_bb : A.arr -> A.arr -> t
val df_da : t -> t -> t -> t -> t
val df_db : t -> t -> t -> t -> t
val df_dab : t -> t -> t -> t -> t -> t
val dr_ab : t -> t -> t -> t Stdlib.ref -> t * t
val dr_a : t -> t -> t -> t Stdlib.ref -> t
val dr_b : t -> t -> t -> t Stdlib.ref -> t
\ No newline at end of file diff --git a/owl/Owl_regression_generic_sig/module-type-Sig/Optimise/Algodiff/Builder/module-type-Siao/index.html b/owl/Owl_regression_generic_sig/module-type-Sig/Optimise/Algodiff/Builder/module-type-Siao/index.html deleted file mode 100644 index 34900d97c..000000000 --- a/owl/Owl_regression_generic_sig/module-type-Sig/Optimise/Algodiff/Builder/module-type-Siao/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Siao (owl.Owl_regression_generic_sig.Sig.Optimise.Algodiff.Builder.Siao)

Module type Builder.Siao

val label : string
val ff_f : A.elt -> t array
val ff_arr : A.arr -> t array
val df : t array -> t -> t -> t array
val dr : t -> t -> t Stdlib.ref array -> t Stdlib.ref array -> t
\ No newline at end of file diff --git a/owl/Owl_regression_generic_sig/module-type-Sig/Optimise/Algodiff/Builder/module-type-Sipo/index.html b/owl/Owl_regression_generic_sig/module-type-Sig/Optimise/Algodiff/Builder/module-type-Sipo/index.html deleted file mode 100644 index d764754d0..000000000 --- a/owl/Owl_regression_generic_sig/module-type-Sig/Optimise/Algodiff/Builder/module-type-Sipo/index.html +++ /dev/null @@ -1,7 +0,0 @@ - -Sipo (owl.Owl_regression_generic_sig.Sig.Optimise.Algodiff.Builder.Sipo)

Module type Builder.Sipo

val label : string
val ff_f : A.elt -> t * t
val ff_arr : A.arr -> t * t
val df : t -> t -> t -> t
val dr : - t -> - t -> - (t Stdlib.ref * t Stdlib.ref) -> - (t Stdlib.ref * t Stdlib.ref) -> - t
\ No newline at end of file diff --git a/owl/Owl_regression_generic_sig/module-type-Sig/Optimise/Algodiff/Builder/module-type-Siso/index.html b/owl/Owl_regression_generic_sig/module-type-Sig/Optimise/Algodiff/Builder/module-type-Siso/index.html deleted file mode 100644 index 7b70f9fd3..000000000 --- a/owl/Owl_regression_generic_sig/module-type-Sig/Optimise/Algodiff/Builder/module-type-Siso/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Siso (owl.Owl_regression_generic_sig.Sig.Optimise.Algodiff.Builder.Siso)

Module type Builder.Siso

val label : string
val ff_f : A.elt -> t
val ff_arr : A.arr -> t
val df : t -> t -> t -> t
val dr : t -> t -> t Stdlib.ref -> t
\ No newline at end of file diff --git a/owl/Owl_regression_generic_sig/module-type-Sig/Optimise/Algodiff/Builder/module-type-Sito/index.html b/owl/Owl_regression_generic_sig/module-type-Sig/Optimise/Algodiff/Builder/module-type-Sito/index.html deleted file mode 100644 index f21904d57..000000000 --- a/owl/Owl_regression_generic_sig/module-type-Sig/Optimise/Algodiff/Builder/module-type-Sito/index.html +++ /dev/null @@ -1,7 +0,0 @@ - -Sito (owl.Owl_regression_generic_sig.Sig.Optimise.Algodiff.Builder.Sito)

Module type Builder.Sito

val label : string
val ff_f : A.elt -> t * t * t
val ff_arr : A.arr -> t * t * t
val df : t -> t -> t -> t
val dr : - t -> - t -> - (t Stdlib.ref * t Stdlib.ref * t Stdlib.ref) -> - (t Stdlib.ref * t Stdlib.ref * t Stdlib.ref) -> - t
\ No newline at end of file diff --git a/owl/Owl_regression_generic_sig/module-type-Sig/Optimise/Algodiff/Linalg/index.html b/owl/Owl_regression_generic_sig/module-type-Sig/Optimise/Algodiff/Linalg/index.html deleted file mode 100644 index 97029f6dd..000000000 --- a/owl/Owl_regression_generic_sig/module-type-Sig/Optimise/Algodiff/Linalg/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -Linalg (owl.Owl_regression_generic_sig.Sig.Optimise.Algodiff.Linalg)

Module Algodiff.Linalg

val inv : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val logdet : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val chol : ?upper:bool -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val qr : t -> t * t

Refer to :doc:`owl_dense_ndarray_generic`

val lq : t -> t * t

Refer to :doc:`owl_dense_ndarray_generic`

val svd : ?thin:bool -> t -> t * t * t

Refer to :doc:`owl_dense_ndarray_generic`

val sylvester : t -> t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val lyapunov : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val discrete_lyapunov : - ?solver:[ `default | `bilinear | `direct ] -> - t -> - t -> - t

Refer to :doc:`owl_dense_ndarray_generic`

val (/@) : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val linsolve : ?trans:bool -> ?typ:[ `n | `u | `l ] -> t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val care : ?diag_r:bool -> t -> t -> t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val dare : ?diag_r:bool -> t -> t -> t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

\ No newline at end of file diff --git a/owl/Owl_regression_generic_sig/module-type-Sig/Optimise/Algodiff/Mat/index.html b/owl/Owl_regression_generic_sig/module-type-Sig/Optimise/Algodiff/Mat/index.html deleted file mode 100644 index aad4b4748..000000000 --- a/owl/Owl_regression_generic_sig/module-type-Sig/Optimise/Algodiff/Mat/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Mat (owl.Owl_regression_generic_sig.Sig.Optimise.Algodiff.Mat)

Module Algodiff.Mat

val empty : int -> int -> t
val zeros : int -> int -> t
val eye : int -> t
val ones : int -> int -> t
val uniform : ?a:A.elt -> ?b:A.elt -> int -> int -> t
val gaussian : ?mu:A.elt -> ?sigma:A.elt -> int -> int -> t
val shape : t -> int * int
val numel : t -> int
val row_num : t -> int
val col_num : t -> int
val reset : t -> unit
val reshape : int -> int -> t -> t
val get : t -> int -> int -> t
val set : t -> int -> int -> t -> t
val row : t -> int -> t
val mean : t -> t
val add : t -> t -> t
val sub : t -> t -> t
val mul : t -> t -> t
val div : t -> t -> t
val dot : t -> t -> t
val map_by_row : (t -> t) -> t -> t
val of_arrays : A.elt array array -> t
val init_2d : int -> int -> (int -> int -> t) -> t
val print : t -> unit
\ No newline at end of file diff --git a/owl/Owl_regression_generic_sig/module-type-Sig/Optimise/Algodiff/Maths/index.html b/owl/Owl_regression_generic_sig/module-type-Sig/Optimise/Algodiff/Maths/index.html deleted file mode 100644 index 9fb2d951e..000000000 --- a/owl/Owl_regression_generic_sig/module-type-Sig/Optimise/Algodiff/Maths/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Maths (owl.Owl_regression_generic_sig.Sig.Optimise.Algodiff.Maths)

Module Algodiff.Maths

val (+) : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val (-) : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val (*) : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val (/) : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val (*@) : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val (**) : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val add : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val sub : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val mul : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val div : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val kron : t -> t -> t

Refer to :doc:`owl_dense_matrix_generic`

val dot : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val pow : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val atan2 : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val min2 : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val max2 : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val cross_entropy : t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val inv : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val neg : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val abs : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val signum : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val floor : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val ceil : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val round : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val sqr : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val sqrt : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val log : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val log2 : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val log10 : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val exp : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val sin : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val cos : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val tan : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val sinh : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val cosh : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val tanh : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val asin : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val acos : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val atan : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val asinh : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val acosh : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val atanh : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val sum' : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val log_sum_exp' : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val log_sum_exp : ?axis:int -> ?keep_dims:bool -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val sum : ?axis:int -> ?keep_dims:bool -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val sum_reduce : ?axis:int array -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val mean : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val transpose : ?axis:int array -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val swap : int -> int -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val l1norm' : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val l2norm' : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val l2norm_sqr' : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val sigmoid : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val relu : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val dawsn : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val softplus : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val softsign : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val softmax : ?axis:int -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val reshape : t -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val flatten : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val get_item : t -> int -> int -> t

Refer to :doc:`owl_dense_ndarray_generic`

val get_row : t -> int -> t

Refer to :doc:`owl_dense_ndarray_generic`

val concat : axis:int -> t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val split : axis:int -> int array -> t -> t array

Refer to :doc:`owl_dense_ndarray_generic`

val of_arrays : t array array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val to_arrays : t -> t array array

Refer to :doc:`owl_dense_ndarray_generic`

val concatenate : axis:int -> t array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val stack : axis:int -> t array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val get_slice : int list list -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val set_slice : int list list -> t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val get_fancy : Owl_types.index list -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val set_fancy : Owl_types.index list -> t -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val diag : ?k:int -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val diagm : ?k:int -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val trace : t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val triu : ?k:int -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val tril : ?k:int -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

\ No newline at end of file diff --git a/owl/Owl_regression_generic_sig/module-type-Sig/Optimise/Algodiff/NN/index.html b/owl/Owl_regression_generic_sig/module-type-Sig/Optimise/Algodiff/NN/index.html deleted file mode 100644 index 474d16784..000000000 --- a/owl/Owl_regression_generic_sig/module-type-Sig/Optimise/Algodiff/NN/index.html +++ /dev/null @@ -1,20 +0,0 @@ - -NN (owl.Owl_regression_generic_sig.Sig.Optimise.Algodiff.NN)

Module Algodiff.NN

val dropout : ?rate:float -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

val conv1d : ?padding:Owl_types.padding -> t -> t -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val conv2d : ?padding:Owl_types.padding -> t -> t -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val conv3d : ?padding:Owl_types.padding -> t -> t -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val dilated_conv1d : - ?padding:Owl_types.padding -> - t -> - t -> - int array -> - int array -> - t

Refer to :doc:`owl_dense_ndarray_generic`

val dilated_conv2d : - ?padding:Owl_types.padding -> - t -> - t -> - int array -> - int array -> - t

Refer to :doc:`owl_dense_ndarray_generic`

val dilated_conv3d : - ?padding:Owl_types.padding -> - t -> - t -> - int array -> - int array -> - t

Refer to :doc:`owl_dense_ndarray_generic`

val transpose_conv1d : ?padding:Owl_types.padding -> t -> t -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val transpose_conv2d : ?padding:Owl_types.padding -> t -> t -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val transpose_conv3d : ?padding:Owl_types.padding -> t -> t -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val max_pool1d : Owl_types.padding -> t -> int array -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val max_pool2d : Owl_types.padding -> t -> int array -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val max_pool3d : Owl_types.padding -> t -> int array -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val avg_pool1d : Owl_types.padding -> t -> int array -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val avg_pool2d : Owl_types.padding -> t -> int array -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val avg_pool3d : Owl_types.padding -> t -> int array -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val upsampling2d : t -> int array -> t

Refer to :doc:`owl_dense_ndarray_generic`

val pad : ?v:A.elt -> int list list -> t -> t

Refer to :doc:`owl_dense_ndarray_generic`

\ No newline at end of file diff --git a/owl/Owl_regression_generic_sig/module-type-Sig/Optimise/Algodiff/index.html b/owl/Owl_regression_generic_sig/module-type-Sig/Optimise/Algodiff/index.html deleted file mode 100644 index 10de37e63..000000000 --- a/owl/Owl_regression_generic_sig/module-type-Sig/Optimise/Algodiff/index.html +++ /dev/null @@ -1,11 +0,0 @@ - -Algodiff (owl.Owl_regression_generic_sig.Sig.Optimise.Algodiff)

Module Optimise.Algodiff

include Owl_algodiff_core_sig.Sig
Type definition
include Owl_algodiff_types_sig.Sig with type elt := A.elt and type arr := A.arr
type t =
  1. | F of A.elt
  2. | Arr of A.arr
  3. | DF of t * t * int
  4. | DR of t * t Stdlib.ref * op * int Stdlib.ref * int * int Stdlib.ref
and adjoint = t -> t Stdlib.ref -> (t * t) list -> (t * t) list
and register = t list -> t list
and label = string * t list
and op = adjoint * register * label
Core functions
val tag : unit -> int

TODO

val primal : t -> t

TODO

val primal' : t -> t

TODO

val zero : t -> t

TODO

val reset_zero : t -> t

TODO

val tangent : t -> t

TODO

val adjref : t -> t Stdlib.ref

TODO

val adjval : t -> t

TODO

val shape : t -> int array

TODO

val is_float : t -> bool

TODO

val is_arr : t -> bool

TODO

val row_num : t -> int

number of rows

val col_num : t -> int

number of columns

val numel : t -> int

number of elements

val clip_by_value : amin:A.elt -> amax:A.elt -> t -> t

other functions, without tracking gradient

val clip_by_l2norm : A.elt -> t -> t

other functions, without tracking gradient

val copy_primal' : t -> t

TODO

val tile : t -> int array -> t

TODO

val repeat : t -> int array -> t

TODO

val pack_elt : A.elt -> t

convert from elt type to t type.

val unpack_elt : t -> A.elt

convert from t type to elt type.

val pack_flt : float -> t

convert from float type to t type.

val _f : float -> t

A shortcut function for F A.(float_to_elt x).

val unpack_flt : t -> float

convert from t type to float type.

val pack_arr : A.arr -> t

convert from arr type to t type.

val unpack_arr : t -> A.arr

convert from t type to arr type.

val deep_info : t -> string

TODO

val type_info : t -> string

TODO

val error_binop : string -> t -> t -> 'a

TODO

val error_uniop : string -> t -> 'a

TODO

val make_forward : t -> t -> int -> t

TODO

val make_reverse : t -> int -> t

TODO

val reverse_prop : t -> t -> unit

TODO

val diff : (t -> t) -> t -> t

diff f x returns the exat derivative of a function f : scalar -> scalar at point x. Simply calling diff f will return its derivative function g of the same type, i.e. g : scalar -> scalar.

Keep calling this function will give you higher-order derivatives of f, i.e. f |> diff |> diff |> diff |> ...

val diff' : (t -> t) -> t -> t * t

similar to diff, but return (f x, diff f x).

val grad : (t -> t) -> t -> t

gradient of f : (vector -> scalar) at x, reverse ad.

val grad' : (t -> t) -> t -> t * t

similar to grad, but return (f x, grad f x).

val jacobian : (t -> t) -> t -> t

jacobian of f : (vector -> vector) at x, both x and y are row vectors.

val jacobian' : (t -> t) -> t -> t * t

similar to jacobian, but return (f x, jacobian f x)

val jacobianv : (t -> t) -> t -> t -> t

jacobian vector product of f : (vector -> vector) at x along v, forward ad. Namely, it calcultes (jacobian x) v

val jacobianv' : (t -> t) -> t -> t -> t * t

similar to jacobianv', but return (f x, jacobianv f x v)

val jacobianTv : (t -> t) -> t -> t -> t

transposed jacobian vector product of f : (vector -> vector) at x along v, backward ad. Namely, it calculates transpose ((jacobianv f x v)).

val jacobianTv' : (t -> t) -> t -> t -> t * t

similar to jacobianTv, but return (f x, transpose (jacobianv f x v))

val hessian : (t -> t) -> t -> t

hessian of f : (scalar -> scalar) at x.

val hessian' : (t -> t) -> t -> t * t

simiarl to hessian, but return (f x, hessian f x)

val hessianv : (t -> t) -> t -> t -> t

hessian vector product of f : (scalar -> scalar) at x along v. Namely, it calculates (hessian x) v.

val hessianv' : (t -> t) -> t -> t -> t * t

similar to hessianv, but return (f x, hessianv f x v).

val laplacian : (t -> t) -> t -> t

laplacian of f : (scalar -> scalar) at x.

val laplacian' : (t -> t) -> t -> t * t

similar to laplacian, but return (f x, laplacian f x).

val gradhessian : (t -> t) -> t -> t * t

return (grad f x, hessian f x), f : (scalar -> scalar)

val gradhessian' : (t -> t) -> t -> t * t * t

return (f x, grad f x, hessian f x)

val gradhessianv : (t -> t) -> t -> t -> t * t

return (grad f x v, hessian f x v)

val gradhessianv' : (t -> t) -> t -> t -> t * t * t

return (f x, grad f x v, hessian f x v)

include Owl_algodiff_ops_sig.Sig - with type t := t - and type elt := A.elt - and type arr := A.arr - and type op := op
module Builder : - Owl_algodiff_ops_builder_sig.Sig - with type t := t - and type elt := A.elt - and type arr := A.arr - and type op := op
Supported Maths functions
module Maths : sig ... end
Supported Linalg functions
module Linalg : sig ... end
Supported Neural Network functions
module NN : sig ... end
Supported Mat functions
module Mat : sig ... end
Supported Arr functions
module Arr : sig ... end
Helper functions
include Owl_algodiff_graph_convert_sig.Sig with type t := t
val to_trace : t list -> string

to_trace [t0; t1; ...] outputs the trace of computation graph on the terminal in a human-readable format.

val to_dot : t list -> string

to_dot [t0; t1; ...] outputs the trace of computation graph in the dot file format which you can use other tools further visualisation, such as Graphviz.

val pp_num : Stdlib.Format.formatter -> t -> unit

pp_num t pretty prints the abstract number used in Algodiff.

\ No newline at end of file diff --git a/owl/Owl_regression_generic_sig/module-type-Sig/Optimise/Batch/index.html b/owl/Owl_regression_generic_sig/module-type-Sig/Optimise/Batch/index.html deleted file mode 100644 index bd94d7c1a..000000000 --- a/owl/Owl_regression_generic_sig/module-type-Sig/Optimise/Batch/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Batch (owl.Owl_regression_generic_sig.Sig.Optimise.Batch)

Module Optimise.Batch

Batch module

type typ =
  1. | Full
  2. | Mini of int
  3. | Sample of int
  4. | Stochastic

Types of batches.

val run : typ -> Algodiff.t -> Algodiff.t -> int -> Algodiff.t * Algodiff.t

Execute the computations defined in module typ.

val batches : typ -> Algodiff.t -> int

Return the total number of batches given a batch typ.

val to_string : typ -> string

Convert the module typ to its string representation.

\ No newline at end of file diff --git a/owl/Owl_regression_generic_sig/module-type-Sig/Optimise/Checkpoint/index.html b/owl/Owl_regression_generic_sig/module-type-Sig/Optimise/Checkpoint/index.html deleted file mode 100644 index fef6a6452..000000000 --- a/owl/Owl_regression_generic_sig/module-type-Sig/Optimise/Checkpoint/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Checkpoint (owl.Owl_regression_generic_sig.Sig.Optimise.Checkpoint)

Module Optimise.Checkpoint

Checkpoint module

type state = {
  1. mutable current_batch : int;
  2. mutable batches_per_epoch : int;
  3. mutable epochs : float;
  4. mutable batches : int;
  5. mutable loss : Algodiff.t array;
  6. mutable start_at : float;
  7. mutable stop : bool;
  8. mutable gs : Algodiff.t array array;
  9. mutable ps : Algodiff.t array array;
  10. mutable us : Algodiff.t array array;
  11. mutable ch : Algodiff.t array array array;
}

Type definition of checkpoint

type typ =
  1. | Batch of int
  2. | Epoch of float
  3. | Custom of state -> unit
  4. | None

Batch type.

val init_state : int -> float -> state

init_state batches_per_epoch epochs initialises a state by specifying the number of batches per epoch and the number of epochs in total.

val default_checkpoint_fun : (string -> 'a) -> 'a

This function is used for saving intermediate files during optimisation.

val print_state_info : state -> unit

Print out the detail information of current state.

val print_summary : state -> unit

Print out the summary of current state.

val run : typ -> (string -> unit) -> int -> Algodiff.t -> state -> unit

Execute the computations defined in module typ.

val to_string : typ -> string

Convert the module typ to its string representation.

\ No newline at end of file diff --git a/owl/Owl_regression_generic_sig/module-type-Sig/Optimise/Clipping/index.html b/owl/Owl_regression_generic_sig/module-type-Sig/Optimise/Clipping/index.html deleted file mode 100644 index 0214a2249..000000000 --- a/owl/Owl_regression_generic_sig/module-type-Sig/Optimise/Clipping/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Clipping (owl.Owl_regression_generic_sig.Sig.Optimise.Clipping)

Module Optimise.Clipping

Clipping module

type typ =
  1. | L2norm of float
  2. | Value of float * float
  3. | None

Types of clipping functions.

val run : typ -> Algodiff.t -> Algodiff.t

Execute the computations defined in module typ.

val default : typ -> typ

Create module typ with default values.

val to_string : typ -> string

Convert the module typ to its string representation.

\ No newline at end of file diff --git a/owl/Owl_regression_generic_sig/module-type-Sig/Optimise/Gradient/index.html b/owl/Owl_regression_generic_sig/module-type-Sig/Optimise/Gradient/index.html deleted file mode 100644 index 2a8aba6c2..000000000 --- a/owl/Owl_regression_generic_sig/module-type-Sig/Optimise/Gradient/index.html +++ /dev/null @@ -1,9 +0,0 @@ - -Gradient (owl.Owl_regression_generic_sig.Sig.Optimise.Gradient)

Module Optimise.Gradient

Gradient module

type typ =
  1. | GD
  2. | CG
  3. | CD
  4. | NonlinearCG
  5. | DaiYuanCG
  6. | NewtonCG
  7. | Newton

Types of gradient function.

val run : - typ -> - (Algodiff.t -> Algodiff.t) -> - Algodiff.t -> - Algodiff.t -> - Algodiff.t -> - Algodiff.t -> - Algodiff.t

Execute the computations defined in module typ.

val to_string : typ -> string

Convert the module typ to its string representation.

\ No newline at end of file diff --git a/owl/Owl_regression_generic_sig/module-type-Sig/Optimise/Learning_Rate/index.html b/owl/Owl_regression_generic_sig/module-type-Sig/Optimise/Learning_Rate/index.html deleted file mode 100644 index efedd5395..000000000 --- a/owl/Owl_regression_generic_sig/module-type-Sig/Optimise/Learning_Rate/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Learning_Rate (owl.Owl_regression_generic_sig.Sig.Optimise.Learning_Rate)

Module Optimise.Learning_Rate

Strategies for learning rate update

type typ =
  1. | Adagrad of float
  2. | Const of float
  3. | Decay of float * float
  4. | Exp_decay of float * float
  5. | RMSprop of float * float
  6. | Adam of float * float * float
  7. | Schedule of float array

Representation of learning rate update strategies. Possible values include:

  • Adam (alpha, beta1, beta2), see ref for parameter meaning
val run : typ -> int -> Algodiff.t -> Algodiff.t array -> Algodiff.t

Execute the computations defined in module typ.

val default : typ -> typ

Create module typ with default values.

val update_ch : typ -> Algodiff.t -> Algodiff.t array -> Algodiff.t array

Update the cache of gradients.

val to_string : typ -> string

Convert the module typ to its string representation.

\ No newline at end of file diff --git a/owl/Owl_regression_generic_sig/module-type-Sig/Optimise/Loss/index.html b/owl/Owl_regression_generic_sig/module-type-Sig/Optimise/Loss/index.html deleted file mode 100644 index 8161f4e24..000000000 --- a/owl/Owl_regression_generic_sig/module-type-Sig/Optimise/Loss/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Loss (owl.Owl_regression_generic_sig.Sig.Optimise.Loss)

Module Optimise.Loss

Loss module

type typ =
  1. | Hinge
  2. | L1norm
  3. | L2norm
  4. | Quadratic
  5. | Cross_entropy
  6. | Custom of Algodiff.t -> Algodiff.t -> Algodiff.t

Types of loss functions.

val run : typ -> Algodiff.t -> Algodiff.t -> Algodiff.t

Execute the computations defined in module typ.

val to_string : typ -> string

Convert the module typ to its string representation.

\ No newline at end of file diff --git a/owl/Owl_regression_generic_sig/module-type-Sig/Optimise/Momentum/index.html b/owl/Owl_regression_generic_sig/module-type-Sig/Optimise/Momentum/index.html deleted file mode 100644 index ff0343613..000000000 --- a/owl/Owl_regression_generic_sig/module-type-Sig/Optimise/Momentum/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Momentum (owl.Owl_regression_generic_sig.Sig.Optimise.Momentum)

Module Optimise.Momentum

Momentum module

type typ =
  1. | Standard of float
  2. | Nesterov of float
  3. | None

Types of momentum functions.

val run : typ -> Algodiff.t -> Algodiff.t -> Algodiff.t

Execute the computations defined in module typ.

val default : typ -> typ

Create module typ with default values.

val to_string : typ -> string

Convert the module typ to its string representation.

\ No newline at end of file diff --git a/owl/Owl_regression_generic_sig/module-type-Sig/Optimise/Params/index.html b/owl/Owl_regression_generic_sig/module-type-Sig/Optimise/Params/index.html deleted file mode 100644 index 9451fbc87..000000000 --- a/owl/Owl_regression_generic_sig/module-type-Sig/Optimise/Params/index.html +++ /dev/null @@ -1,14 +0,0 @@ - -Params (owl.Owl_regression_generic_sig.Sig.Optimise.Params)

Module Optimise.Params

Params module

type typ = {
  1. mutable epochs : float;
  2. mutable batch : Batch.typ;
  3. mutable gradient : Gradient.typ;
  4. mutable loss : Loss.typ;
  5. mutable learning_rate : Learning_Rate.typ;
  6. mutable regularisation : Regularisation.typ;
  7. mutable momentum : Momentum.typ;
  8. mutable clipping : Clipping.typ;
  9. mutable stopping : Stopping.typ;
  10. mutable checkpoint : Checkpoint.typ;
  11. mutable verbosity : bool;
}

Type definition of parameter.

val default : unit -> typ

Create module typ with default values.

val config : - ?batch:Batch.typ -> - ?gradient:Gradient.typ -> - ?loss:Loss.typ -> - ?learning_rate:Learning_Rate.typ -> - ?regularisation:Regularisation.typ -> - ?momentum:Momentum.typ -> - ?clipping:Clipping.typ -> - ?stopping:Stopping.typ -> - ?checkpoint:Checkpoint.typ -> - ?verbosity:bool -> - float -> - typ

This function creates a parameter object with many configurations.

val to_string : typ -> string

Convert the module typ to its string representation.

\ No newline at end of file diff --git a/owl/Owl_regression_generic_sig/module-type-Sig/Optimise/Regularisation/index.html b/owl/Owl_regression_generic_sig/module-type-Sig/Optimise/Regularisation/index.html deleted file mode 100644 index 829cf932b..000000000 --- a/owl/Owl_regression_generic_sig/module-type-Sig/Optimise/Regularisation/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Regularisation (owl.Owl_regression_generic_sig.Sig.Optimise.Regularisation)

Module Optimise.Regularisation

Regularisation module

type typ =
  1. | L1norm of float
  2. | L2norm of float
  3. | Elastic_net of float * float
  4. | None

Types of regularisation functions.

val run : typ -> Algodiff.t -> Algodiff.t

Execute the computations defined in module typ.

val to_string : typ -> string

Convert the module typ to its string representation.

\ No newline at end of file diff --git a/owl/Owl_regression_generic_sig/module-type-Sig/Optimise/Stopping/index.html b/owl/Owl_regression_generic_sig/module-type-Sig/Optimise/Stopping/index.html deleted file mode 100644 index f9746c9c8..000000000 --- a/owl/Owl_regression_generic_sig/module-type-Sig/Optimise/Stopping/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Stopping (owl.Owl_regression_generic_sig.Sig.Optimise.Stopping)

Module Optimise.Stopping

Stopping module

type typ =
  1. | Const of float
  2. | Early of int * int
  3. | None

Types of stopping functions.

val run : typ -> float -> bool

Execute the computations defined in module typ.

val default : typ -> typ

Create module typ with default values.

val to_string : typ -> string

Convert the module typ to its string representation.

\ No newline at end of file diff --git a/owl/Owl_regression_generic_sig/module-type-Sig/Optimise/Utils/index.html b/owl/Owl_regression_generic_sig/module-type-Sig/Optimise/Utils/index.html deleted file mode 100644 index 078c01de9..000000000 --- a/owl/Owl_regression_generic_sig/module-type-Sig/Optimise/Utils/index.html +++ /dev/null @@ -1,7 +0,0 @@ - -Utils (owl.Owl_regression_generic_sig.Sig.Optimise.Utils)

Module Optimise.Utils

Utils module

val sample_num : Algodiff.t -> int

Return the total number of samples in passed in ndarray.

val draw_samples : Algodiff.t -> Algodiff.t -> int -> Algodiff.t * Algodiff.t

draw_samples x y draws samples from both x (observations) and y (labels). The samples will be drew along axis 0, so x and y must agree along axis 0.

val get_chunk : - Algodiff.t -> - Algodiff.t -> - int -> - int -> - Algodiff.t * Algodiff.t

get_chunk x y i c gets a continuous chunk of c samples from position i from x (observations) and y (labels).

\ No newline at end of file diff --git a/owl/Owl_regression_generic_sig/module-type-Sig/Optimise/index.html b/owl/Owl_regression_generic_sig/module-type-Sig/Optimise/index.html deleted file mode 100644 index ccd0881e6..000000000 --- a/owl/Owl_regression_generic_sig/module-type-Sig/Optimise/index.html +++ /dev/null @@ -1,31 +0,0 @@ - -Optimise (owl.Owl_regression_generic_sig.Sig.Optimise)

Module Sig.Optimise

module Utils : sig ... end

Utils module

module Learning_Rate : sig ... end

Strategies for learning rate update

module Batch : sig ... end

Batch module

module Loss : sig ... end

Loss module

module Gradient : sig ... end

Gradient module

module Momentum : sig ... end

Momentum module

module Regularisation : sig ... end

Regularisation module

module Clipping : sig ... end

Clipping module

module Stopping : sig ... end

Stopping module

module Checkpoint : sig ... end

Checkpoint module

module Params : sig ... end

Params module

Core functions
val minimise_weight : - ?state:Checkpoint.state -> - Params.typ -> - (Algodiff.t -> Algodiff.t -> Algodiff.t) -> - Algodiff.t -> - Algodiff.t -> - Algodiff.t -> - Checkpoint.state * Algodiff.t

This function minimises the weight w of passed-in function f.

* f is a function f : w -> x -> y. * w is a row vector but y can have any shape.

val minimise_network : - ?state:Checkpoint.state -> - Params.typ -> - (Algodiff.t -> Algodiff.t * Algodiff.t array array) -> - (Algodiff.t -> Algodiff.t array array * Algodiff.t array array) -> - (Algodiff.t array array -> unit) -> - (string -> unit) -> - Algodiff.t -> - Algodiff.t -> - Checkpoint.state

This function is specifically designed for minimising the weights in a neural network of graph structure. In Owl's earlier versions, the functions in the regression module were actually implemented using this function.

val minimise_fun : - ?state:Checkpoint.state -> - Params.typ -> - (Algodiff.t -> Algodiff.t) -> - Algodiff.t -> - Checkpoint.state * Algodiff.t

This function minimises f : x -> y w.r.t x.

x is an ndarray; and y is an scalar value.

val minimise_compiled_network : - ?state:Checkpoint.state -> - Params.typ -> - (Algodiff.t -> Algodiff.t -> Algodiff.t) -> - (unit -> unit) -> - (string -> unit) -> - Algodiff.t -> - Algodiff.t -> - Checkpoint.state

TODO

\ No newline at end of file diff --git a/owl/Owl_regression_generic_sig/module-type-Sig/index.html b/owl/Owl_regression_generic_sig/module-type-Sig/index.html deleted file mode 100644 index ff803f302..000000000 --- a/owl/Owl_regression_generic_sig/module-type-Sig/index.html +++ /dev/null @@ -1,8 +0,0 @@ - -Sig (owl.Owl_regression_generic_sig.Sig)

Module type Owl_regression_generic_sig.Sig

Type definition

Type of ndarray values.

Type of scalar values.

Regression models
val ols : ?i:bool -> arr -> arr -> arr array

TODO

val ridge : ?i:bool -> ?alpha:float -> arr -> arr -> arr array

TODO

val lasso : ?i:bool -> ?alpha:float -> arr -> arr -> arr array

TODO

val elastic_net : - ?i:bool -> - ?alpha:float -> - ?l1_ratio:float -> - arr -> - arr -> - arr array

TODO

val svm : ?i:bool -> ?a:float -> arr -> arr -> arr array

TODO

val logistic : ?i:bool -> arr -> arr -> arr array

TODO

val exponential : ?i:bool -> arr -> arr -> elt * elt * elt

TODO

val poly : arr -> arr -> int -> arr

TODO

\ No newline at end of file diff --git a/owl/Owl_signal/.dummy b/owl/Owl_signal/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl/Owl_slicing/.dummy b/owl/Owl_slicing/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl/Owl_slicing_basic/.dummy b/owl/Owl_slicing_basic/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl/Owl_slicing_fancy/.dummy b/owl/Owl_slicing_fancy/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl/Owl_stats/.dummy b/owl/Owl_stats/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl/Owl_stats_dist/.dummy b/owl/Owl_stats_dist/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl/Owl_stats_extend/.dummy b/owl/Owl_stats_extend/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl/Owl_stats_prng/.dummy b/owl/Owl_stats_prng/.dummy deleted file mode 100644 index e69de29bb..000000000 diff --git a/owl/Owl_stats_sampler/.dummy b/owl/Owl_stats_sampler/.dummy deleted file mode 100644 index e69de29bb..000000000