diff --git a/.nojekyll b/.nojekyll new file mode 100644 index 00000000..e69de29b diff --git a/404.html b/404.html new file mode 100644 index 00000000..2a9ccf0f --- /dev/null +++ b/404.html @@ -0,0 +1,2399 @@ + + + + + + + + + + + + + + + + + + + + + Research Project Template (preview) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + +
+
+ +

404 - Not found

+ +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/SUMMARY/index.html b/SUMMARY/index.html new file mode 100644 index 00000000..70a11b6a --- /dev/null +++ b/SUMMARY/index.html @@ -0,0 +1,2447 @@ + + + + + + + + + + + + + + + + + + + + + + + SUMMARY - Research Project Template (preview) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/__pycache__/macros.cpython-310.pyc b/__pycache__/macros.cpython-310.pyc new file mode 100644 index 00000000..59d1a7ef Binary files /dev/null and b/__pycache__/macros.cpython-310.pyc differ diff --git a/assets/_mkdocstrings.css b/assets/_mkdocstrings.css new file mode 100644 index 00000000..b500381b --- /dev/null +++ b/assets/_mkdocstrings.css @@ -0,0 +1,143 @@ + +/* Avoid breaking parameter names, etc. in table cells. */ +.doc-contents td code { + word-break: normal !important; +} + +/* No line break before first paragraph of descriptions. */ +.doc-md-description, +.doc-md-description>p:first-child { + display: inline; +} + +/* Max width for docstring sections tables. */ +.doc .md-typeset__table, +.doc .md-typeset__table table { + display: table !important; + width: 100%; +} + +.doc .md-typeset__table tr { + display: table-row; +} + +/* Defaults in Spacy table style. */ +.doc-param-default { + float: right; +} + +/* Parameter headings must be inline, not blocks. */ +.doc-heading-parameter { + display: inline; +} + +/* Prefer space on the right, not the left of parameter permalinks. */ +.doc-heading-parameter .headerlink { + margin-left: 0 !important; + margin-right: 0.2rem; +} + +/* Backward-compatibility: docstring section titles in bold. */ +.doc-section-title { + font-weight: bold; +} + +/* Symbols in Navigation and ToC. */ +:root, :host, +[data-md-color-scheme="default"] { + --doc-symbol-parameter-fg-color: #df50af; + --doc-symbol-attribute-fg-color: #953800; + --doc-symbol-function-fg-color: #8250df; + --doc-symbol-method-fg-color: #8250df; + --doc-symbol-class-fg-color: #0550ae; + --doc-symbol-module-fg-color: #5cad0f; + + --doc-symbol-parameter-bg-color: #df50af1a; + --doc-symbol-attribute-bg-color: #9538001a; + --doc-symbol-function-bg-color: #8250df1a; + --doc-symbol-method-bg-color: #8250df1a; + --doc-symbol-class-bg-color: #0550ae1a; + --doc-symbol-module-bg-color: #5cad0f1a; +} + +[data-md-color-scheme="slate"] { + --doc-symbol-parameter-fg-color: #ffa8cc; + --doc-symbol-attribute-fg-color: #ffa657; + --doc-symbol-function-fg-color: #d2a8ff; + --doc-symbol-method-fg-color: #d2a8ff; + --doc-symbol-class-fg-color: #79c0ff; + --doc-symbol-module-fg-color: #baff79; + + --doc-symbol-parameter-bg-color: #ffa8cc1a; + --doc-symbol-attribute-bg-color: #ffa6571a; + --doc-symbol-function-bg-color: #d2a8ff1a; + --doc-symbol-method-bg-color: #d2a8ff1a; + --doc-symbol-class-bg-color: #79c0ff1a; + --doc-symbol-module-bg-color: #baff791a; +} + +code.doc-symbol { + border-radius: .1rem; + font-size: .85em; + padding: 0 .3em; + font-weight: bold; +} + +code.doc-symbol-parameter { + color: var(--doc-symbol-parameter-fg-color); + background-color: var(--doc-symbol-parameter-bg-color); +} + +code.doc-symbol-parameter::after { + content: "param"; +} + +code.doc-symbol-attribute { + color: var(--doc-symbol-attribute-fg-color); + background-color: var(--doc-symbol-attribute-bg-color); +} + +code.doc-symbol-attribute::after { + content: "attr"; +} + +code.doc-symbol-function { + color: var(--doc-symbol-function-fg-color); + background-color: var(--doc-symbol-function-bg-color); +} + +code.doc-symbol-function::after { + content: "func"; +} + +code.doc-symbol-method { + color: var(--doc-symbol-method-fg-color); + background-color: var(--doc-symbol-method-bg-color); +} + +code.doc-symbol-method::after { + content: "meth"; +} + +code.doc-symbol-class { + color: var(--doc-symbol-class-fg-color); + background-color: var(--doc-symbol-class-bg-color); +} + +code.doc-symbol-class::after { + content: "class"; +} + +code.doc-symbol-module { + color: var(--doc-symbol-module-fg-color); + background-color: var(--doc-symbol-module-bg-color); +} + +code.doc-symbol-module::after { + content: "mod"; +} + +.doc-signature .autorefs { + color: inherit; + border-bottom: 1px dotted currentcolor; +} diff --git a/assets/images/favicon.png b/assets/images/favicon.png new file mode 100644 index 00000000..1cf13b9f Binary files /dev/null and b/assets/images/favicon.png differ diff --git a/assets/javascripts/bundle.83f73b43.min.js b/assets/javascripts/bundle.83f73b43.min.js new file mode 100644 index 00000000..43d8b70f --- /dev/null +++ b/assets/javascripts/bundle.83f73b43.min.js @@ -0,0 +1,16 @@ +"use strict";(()=>{var Wi=Object.create;var gr=Object.defineProperty;var Di=Object.getOwnPropertyDescriptor;var Vi=Object.getOwnPropertyNames,Vt=Object.getOwnPropertySymbols,Ni=Object.getPrototypeOf,yr=Object.prototype.hasOwnProperty,ao=Object.prototype.propertyIsEnumerable;var io=(e,t,r)=>t in e?gr(e,t,{enumerable:!0,configurable:!0,writable:!0,value:r}):e[t]=r,$=(e,t)=>{for(var r in t||(t={}))yr.call(t,r)&&io(e,r,t[r]);if(Vt)for(var r of Vt(t))ao.call(t,r)&&io(e,r,t[r]);return e};var so=(e,t)=>{var r={};for(var o in e)yr.call(e,o)&&t.indexOf(o)<0&&(r[o]=e[o]);if(e!=null&&Vt)for(var o of Vt(e))t.indexOf(o)<0&&ao.call(e,o)&&(r[o]=e[o]);return r};var xr=(e,t)=>()=>(t||e((t={exports:{}}).exports,t),t.exports);var zi=(e,t,r,o)=>{if(t&&typeof t=="object"||typeof t=="function")for(let n of Vi(t))!yr.call(e,n)&&n!==r&&gr(e,n,{get:()=>t[n],enumerable:!(o=Di(t,n))||o.enumerable});return e};var Mt=(e,t,r)=>(r=e!=null?Wi(Ni(e)):{},zi(t||!e||!e.__esModule?gr(r,"default",{value:e,enumerable:!0}):r,e));var co=(e,t,r)=>new Promise((o,n)=>{var i=p=>{try{s(r.next(p))}catch(c){n(c)}},a=p=>{try{s(r.throw(p))}catch(c){n(c)}},s=p=>p.done?o(p.value):Promise.resolve(p.value).then(i,a);s((r=r.apply(e,t)).next())});var lo=xr((Er,po)=>{(function(e,t){typeof Er=="object"&&typeof po!="undefined"?t():typeof define=="function"&&define.amd?define(t):t()})(Er,function(){"use strict";function e(r){var o=!0,n=!1,i=null,a={text:!0,search:!0,url:!0,tel:!0,email:!0,password:!0,number:!0,date:!0,month:!0,week:!0,time:!0,datetime:!0,"datetime-local":!0};function s(k){return!!(k&&k!==document&&k.nodeName!=="HTML"&&k.nodeName!=="BODY"&&"classList"in k&&"contains"in k.classList)}function p(k){var ft=k.type,qe=k.tagName;return!!(qe==="INPUT"&&a[ft]&&!k.readOnly||qe==="TEXTAREA"&&!k.readOnly||k.isContentEditable)}function c(k){k.classList.contains("focus-visible")||(k.classList.add("focus-visible"),k.setAttribute("data-focus-visible-added",""))}function l(k){k.hasAttribute("data-focus-visible-added")&&(k.classList.remove("focus-visible"),k.removeAttribute("data-focus-visible-added"))}function f(k){k.metaKey||k.altKey||k.ctrlKey||(s(r.activeElement)&&c(r.activeElement),o=!0)}function u(k){o=!1}function d(k){s(k.target)&&(o||p(k.target))&&c(k.target)}function y(k){s(k.target)&&(k.target.classList.contains("focus-visible")||k.target.hasAttribute("data-focus-visible-added"))&&(n=!0,window.clearTimeout(i),i=window.setTimeout(function(){n=!1},100),l(k.target))}function L(k){document.visibilityState==="hidden"&&(n&&(o=!0),X())}function X(){document.addEventListener("mousemove",J),document.addEventListener("mousedown",J),document.addEventListener("mouseup",J),document.addEventListener("pointermove",J),document.addEventListener("pointerdown",J),document.addEventListener("pointerup",J),document.addEventListener("touchmove",J),document.addEventListener("touchstart",J),document.addEventListener("touchend",J)}function te(){document.removeEventListener("mousemove",J),document.removeEventListener("mousedown",J),document.removeEventListener("mouseup",J),document.removeEventListener("pointermove",J),document.removeEventListener("pointerdown",J),document.removeEventListener("pointerup",J),document.removeEventListener("touchmove",J),document.removeEventListener("touchstart",J),document.removeEventListener("touchend",J)}function J(k){k.target.nodeName&&k.target.nodeName.toLowerCase()==="html"||(o=!1,te())}document.addEventListener("keydown",f,!0),document.addEventListener("mousedown",u,!0),document.addEventListener("pointerdown",u,!0),document.addEventListener("touchstart",u,!0),document.addEventListener("visibilitychange",L,!0),X(),r.addEventListener("focus",d,!0),r.addEventListener("blur",y,!0),r.nodeType===Node.DOCUMENT_FRAGMENT_NODE&&r.host?r.host.setAttribute("data-js-focus-visible",""):r.nodeType===Node.DOCUMENT_NODE&&(document.documentElement.classList.add("js-focus-visible"),document.documentElement.setAttribute("data-js-focus-visible",""))}if(typeof window!="undefined"&&typeof document!="undefined"){window.applyFocusVisiblePolyfill=e;var t;try{t=new CustomEvent("focus-visible-polyfill-ready")}catch(r){t=document.createEvent("CustomEvent"),t.initCustomEvent("focus-visible-polyfill-ready",!1,!1,{})}window.dispatchEvent(t)}typeof document!="undefined"&&e(document)})});var qr=xr((hy,On)=>{"use strict";/*! + * escape-html + * Copyright(c) 2012-2013 TJ Holowaychuk + * Copyright(c) 2015 Andreas Lubbe + * Copyright(c) 2015 Tiancheng "Timothy" Gu + * MIT Licensed + */var $a=/["'&<>]/;On.exports=Pa;function Pa(e){var t=""+e,r=$a.exec(t);if(!r)return t;var o,n="",i=0,a=0;for(i=r.index;i{/*! + * clipboard.js v2.0.11 + * https://clipboardjs.com/ + * + * Licensed MIT © Zeno Rocha + */(function(t,r){typeof It=="object"&&typeof Yr=="object"?Yr.exports=r():typeof define=="function"&&define.amd?define([],r):typeof It=="object"?It.ClipboardJS=r():t.ClipboardJS=r()})(It,function(){return function(){var e={686:function(o,n,i){"use strict";i.d(n,{default:function(){return Ui}});var a=i(279),s=i.n(a),p=i(370),c=i.n(p),l=i(817),f=i.n(l);function u(V){try{return document.execCommand(V)}catch(A){return!1}}var d=function(A){var M=f()(A);return u("cut"),M},y=d;function L(V){var A=document.documentElement.getAttribute("dir")==="rtl",M=document.createElement("textarea");M.style.fontSize="12pt",M.style.border="0",M.style.padding="0",M.style.margin="0",M.style.position="absolute",M.style[A?"right":"left"]="-9999px";var F=window.pageYOffset||document.documentElement.scrollTop;return M.style.top="".concat(F,"px"),M.setAttribute("readonly",""),M.value=V,M}var X=function(A,M){var F=L(A);M.container.appendChild(F);var D=f()(F);return u("copy"),F.remove(),D},te=function(A){var M=arguments.length>1&&arguments[1]!==void 0?arguments[1]:{container:document.body},F="";return typeof A=="string"?F=X(A,M):A instanceof HTMLInputElement&&!["text","search","url","tel","password"].includes(A==null?void 0:A.type)?F=X(A.value,M):(F=f()(A),u("copy")),F},J=te;function k(V){"@babel/helpers - typeof";return typeof Symbol=="function"&&typeof Symbol.iterator=="symbol"?k=function(M){return typeof M}:k=function(M){return M&&typeof Symbol=="function"&&M.constructor===Symbol&&M!==Symbol.prototype?"symbol":typeof M},k(V)}var ft=function(){var A=arguments.length>0&&arguments[0]!==void 0?arguments[0]:{},M=A.action,F=M===void 0?"copy":M,D=A.container,Y=A.target,$e=A.text;if(F!=="copy"&&F!=="cut")throw new Error('Invalid "action" value, use either "copy" or "cut"');if(Y!==void 0)if(Y&&k(Y)==="object"&&Y.nodeType===1){if(F==="copy"&&Y.hasAttribute("disabled"))throw new Error('Invalid "target" attribute. Please use "readonly" instead of "disabled" attribute');if(F==="cut"&&(Y.hasAttribute("readonly")||Y.hasAttribute("disabled")))throw new Error(`Invalid "target" attribute. You can't cut text from elements with "readonly" or "disabled" attributes`)}else throw new Error('Invalid "target" value, use a valid Element');if($e)return J($e,{container:D});if(Y)return F==="cut"?y(Y):J(Y,{container:D})},qe=ft;function Fe(V){"@babel/helpers - typeof";return typeof Symbol=="function"&&typeof Symbol.iterator=="symbol"?Fe=function(M){return typeof M}:Fe=function(M){return M&&typeof Symbol=="function"&&M.constructor===Symbol&&M!==Symbol.prototype?"symbol":typeof M},Fe(V)}function ki(V,A){if(!(V instanceof A))throw new TypeError("Cannot call a class as a function")}function no(V,A){for(var M=0;M0&&arguments[0]!==void 0?arguments[0]:{};this.action=typeof D.action=="function"?D.action:this.defaultAction,this.target=typeof D.target=="function"?D.target:this.defaultTarget,this.text=typeof D.text=="function"?D.text:this.defaultText,this.container=Fe(D.container)==="object"?D.container:document.body}},{key:"listenClick",value:function(D){var Y=this;this.listener=c()(D,"click",function($e){return Y.onClick($e)})}},{key:"onClick",value:function(D){var Y=D.delegateTarget||D.currentTarget,$e=this.action(Y)||"copy",Dt=qe({action:$e,container:this.container,target:this.target(Y),text:this.text(Y)});this.emit(Dt?"success":"error",{action:$e,text:Dt,trigger:Y,clearSelection:function(){Y&&Y.focus(),window.getSelection().removeAllRanges()}})}},{key:"defaultAction",value:function(D){return vr("action",D)}},{key:"defaultTarget",value:function(D){var Y=vr("target",D);if(Y)return document.querySelector(Y)}},{key:"defaultText",value:function(D){return vr("text",D)}},{key:"destroy",value:function(){this.listener.destroy()}}],[{key:"copy",value:function(D){var Y=arguments.length>1&&arguments[1]!==void 0?arguments[1]:{container:document.body};return J(D,Y)}},{key:"cut",value:function(D){return y(D)}},{key:"isSupported",value:function(){var D=arguments.length>0&&arguments[0]!==void 0?arguments[0]:["copy","cut"],Y=typeof D=="string"?[D]:D,$e=!!document.queryCommandSupported;return Y.forEach(function(Dt){$e=$e&&!!document.queryCommandSupported(Dt)}),$e}}]),M}(s()),Ui=Fi},828:function(o){var n=9;if(typeof Element!="undefined"&&!Element.prototype.matches){var i=Element.prototype;i.matches=i.matchesSelector||i.mozMatchesSelector||i.msMatchesSelector||i.oMatchesSelector||i.webkitMatchesSelector}function a(s,p){for(;s&&s.nodeType!==n;){if(typeof s.matches=="function"&&s.matches(p))return s;s=s.parentNode}}o.exports=a},438:function(o,n,i){var a=i(828);function s(l,f,u,d,y){var L=c.apply(this,arguments);return l.addEventListener(u,L,y),{destroy:function(){l.removeEventListener(u,L,y)}}}function p(l,f,u,d,y){return typeof l.addEventListener=="function"?s.apply(null,arguments):typeof u=="function"?s.bind(null,document).apply(null,arguments):(typeof l=="string"&&(l=document.querySelectorAll(l)),Array.prototype.map.call(l,function(L){return s(L,f,u,d,y)}))}function c(l,f,u,d){return function(y){y.delegateTarget=a(y.target,f),y.delegateTarget&&d.call(l,y)}}o.exports=p},879:function(o,n){n.node=function(i){return i!==void 0&&i instanceof HTMLElement&&i.nodeType===1},n.nodeList=function(i){var a=Object.prototype.toString.call(i);return i!==void 0&&(a==="[object NodeList]"||a==="[object HTMLCollection]")&&"length"in i&&(i.length===0||n.node(i[0]))},n.string=function(i){return typeof i=="string"||i instanceof String},n.fn=function(i){var a=Object.prototype.toString.call(i);return a==="[object Function]"}},370:function(o,n,i){var a=i(879),s=i(438);function p(u,d,y){if(!u&&!d&&!y)throw new Error("Missing required arguments");if(!a.string(d))throw new TypeError("Second argument must be a String");if(!a.fn(y))throw new TypeError("Third argument must be a Function");if(a.node(u))return c(u,d,y);if(a.nodeList(u))return l(u,d,y);if(a.string(u))return f(u,d,y);throw new TypeError("First argument must be a String, HTMLElement, HTMLCollection, or NodeList")}function c(u,d,y){return u.addEventListener(d,y),{destroy:function(){u.removeEventListener(d,y)}}}function l(u,d,y){return Array.prototype.forEach.call(u,function(L){L.addEventListener(d,y)}),{destroy:function(){Array.prototype.forEach.call(u,function(L){L.removeEventListener(d,y)})}}}function f(u,d,y){return s(document.body,u,d,y)}o.exports=p},817:function(o){function n(i){var a;if(i.nodeName==="SELECT")i.focus(),a=i.value;else if(i.nodeName==="INPUT"||i.nodeName==="TEXTAREA"){var s=i.hasAttribute("readonly");s||i.setAttribute("readonly",""),i.select(),i.setSelectionRange(0,i.value.length),s||i.removeAttribute("readonly"),a=i.value}else{i.hasAttribute("contenteditable")&&i.focus();var p=window.getSelection(),c=document.createRange();c.selectNodeContents(i),p.removeAllRanges(),p.addRange(c),a=p.toString()}return a}o.exports=n},279:function(o){function n(){}n.prototype={on:function(i,a,s){var p=this.e||(this.e={});return(p[i]||(p[i]=[])).push({fn:a,ctx:s}),this},once:function(i,a,s){var p=this;function c(){p.off(i,c),a.apply(s,arguments)}return c._=a,this.on(i,c,s)},emit:function(i){var a=[].slice.call(arguments,1),s=((this.e||(this.e={}))[i]||[]).slice(),p=0,c=s.length;for(p;p0&&i[i.length-1])&&(c[0]===6||c[0]===2)){r=0;continue}if(c[0]===3&&(!i||c[1]>i[0]&&c[1]=e.length&&(e=void 0),{value:e&&e[o++],done:!e}}};throw new TypeError(t?"Object is not iterable.":"Symbol.iterator is not defined.")}function N(e,t){var r=typeof Symbol=="function"&&e[Symbol.iterator];if(!r)return e;var o=r.call(e),n,i=[],a;try{for(;(t===void 0||t-- >0)&&!(n=o.next()).done;)i.push(n.value)}catch(s){a={error:s}}finally{try{n&&!n.done&&(r=o.return)&&r.call(o)}finally{if(a)throw a.error}}return i}function q(e,t,r){if(r||arguments.length===2)for(var o=0,n=t.length,i;o1||p(d,L)})},y&&(n[d]=y(n[d])))}function p(d,y){try{c(o[d](y))}catch(L){u(i[0][3],L)}}function c(d){d.value instanceof nt?Promise.resolve(d.value.v).then(l,f):u(i[0][2],d)}function l(d){p("next",d)}function f(d){p("throw",d)}function u(d,y){d(y),i.shift(),i.length&&p(i[0][0],i[0][1])}}function uo(e){if(!Symbol.asyncIterator)throw new TypeError("Symbol.asyncIterator is not defined.");var t=e[Symbol.asyncIterator],r;return t?t.call(e):(e=typeof he=="function"?he(e):e[Symbol.iterator](),r={},o("next"),o("throw"),o("return"),r[Symbol.asyncIterator]=function(){return this},r);function o(i){r[i]=e[i]&&function(a){return new Promise(function(s,p){a=e[i](a),n(s,p,a.done,a.value)})}}function n(i,a,s,p){Promise.resolve(p).then(function(c){i({value:c,done:s})},a)}}function H(e){return typeof e=="function"}function ut(e){var t=function(o){Error.call(o),o.stack=new Error().stack},r=e(t);return r.prototype=Object.create(Error.prototype),r.prototype.constructor=r,r}var zt=ut(function(e){return function(r){e(this),this.message=r?r.length+` errors occurred during unsubscription: +`+r.map(function(o,n){return n+1+") "+o.toString()}).join(` + `):"",this.name="UnsubscriptionError",this.errors=r}});function Qe(e,t){if(e){var r=e.indexOf(t);0<=r&&e.splice(r,1)}}var Ue=function(){function e(t){this.initialTeardown=t,this.closed=!1,this._parentage=null,this._finalizers=null}return e.prototype.unsubscribe=function(){var t,r,o,n,i;if(!this.closed){this.closed=!0;var a=this._parentage;if(a)if(this._parentage=null,Array.isArray(a))try{for(var s=he(a),p=s.next();!p.done;p=s.next()){var c=p.value;c.remove(this)}}catch(L){t={error:L}}finally{try{p&&!p.done&&(r=s.return)&&r.call(s)}finally{if(t)throw t.error}}else a.remove(this);var l=this.initialTeardown;if(H(l))try{l()}catch(L){i=L instanceof zt?L.errors:[L]}var f=this._finalizers;if(f){this._finalizers=null;try{for(var u=he(f),d=u.next();!d.done;d=u.next()){var y=d.value;try{ho(y)}catch(L){i=i!=null?i:[],L instanceof zt?i=q(q([],N(i)),N(L.errors)):i.push(L)}}}catch(L){o={error:L}}finally{try{d&&!d.done&&(n=u.return)&&n.call(u)}finally{if(o)throw o.error}}}if(i)throw new zt(i)}},e.prototype.add=function(t){var r;if(t&&t!==this)if(this.closed)ho(t);else{if(t instanceof e){if(t.closed||t._hasParent(this))return;t._addParent(this)}(this._finalizers=(r=this._finalizers)!==null&&r!==void 0?r:[]).push(t)}},e.prototype._hasParent=function(t){var r=this._parentage;return r===t||Array.isArray(r)&&r.includes(t)},e.prototype._addParent=function(t){var r=this._parentage;this._parentage=Array.isArray(r)?(r.push(t),r):r?[r,t]:t},e.prototype._removeParent=function(t){var r=this._parentage;r===t?this._parentage=null:Array.isArray(r)&&Qe(r,t)},e.prototype.remove=function(t){var r=this._finalizers;r&&Qe(r,t),t instanceof e&&t._removeParent(this)},e.EMPTY=function(){var t=new e;return t.closed=!0,t}(),e}();var Tr=Ue.EMPTY;function qt(e){return e instanceof Ue||e&&"closed"in e&&H(e.remove)&&H(e.add)&&H(e.unsubscribe)}function ho(e){H(e)?e():e.unsubscribe()}var Pe={onUnhandledError:null,onStoppedNotification:null,Promise:void 0,useDeprecatedSynchronousErrorHandling:!1,useDeprecatedNextContext:!1};var dt={setTimeout:function(e,t){for(var r=[],o=2;o0},enumerable:!1,configurable:!0}),t.prototype._trySubscribe=function(r){return this._throwIfClosed(),e.prototype._trySubscribe.call(this,r)},t.prototype._subscribe=function(r){return this._throwIfClosed(),this._checkFinalizedStatuses(r),this._innerSubscribe(r)},t.prototype._innerSubscribe=function(r){var o=this,n=this,i=n.hasError,a=n.isStopped,s=n.observers;return i||a?Tr:(this.currentObservers=null,s.push(r),new Ue(function(){o.currentObservers=null,Qe(s,r)}))},t.prototype._checkFinalizedStatuses=function(r){var o=this,n=o.hasError,i=o.thrownError,a=o.isStopped;n?r.error(i):a&&r.complete()},t.prototype.asObservable=function(){var r=new j;return r.source=this,r},t.create=function(r,o){return new To(r,o)},t}(j);var To=function(e){oe(t,e);function t(r,o){var n=e.call(this)||this;return n.destination=r,n.source=o,n}return t.prototype.next=function(r){var o,n;(n=(o=this.destination)===null||o===void 0?void 0:o.next)===null||n===void 0||n.call(o,r)},t.prototype.error=function(r){var o,n;(n=(o=this.destination)===null||o===void 0?void 0:o.error)===null||n===void 0||n.call(o,r)},t.prototype.complete=function(){var r,o;(o=(r=this.destination)===null||r===void 0?void 0:r.complete)===null||o===void 0||o.call(r)},t.prototype._subscribe=function(r){var o,n;return(n=(o=this.source)===null||o===void 0?void 0:o.subscribe(r))!==null&&n!==void 0?n:Tr},t}(g);var _r=function(e){oe(t,e);function t(r){var o=e.call(this)||this;return o._value=r,o}return Object.defineProperty(t.prototype,"value",{get:function(){return this.getValue()},enumerable:!1,configurable:!0}),t.prototype._subscribe=function(r){var o=e.prototype._subscribe.call(this,r);return!o.closed&&r.next(this._value),o},t.prototype.getValue=function(){var r=this,o=r.hasError,n=r.thrownError,i=r._value;if(o)throw n;return this._throwIfClosed(),i},t.prototype.next=function(r){e.prototype.next.call(this,this._value=r)},t}(g);var At={now:function(){return(At.delegate||Date).now()},delegate:void 0};var Ct=function(e){oe(t,e);function t(r,o,n){r===void 0&&(r=1/0),o===void 0&&(o=1/0),n===void 0&&(n=At);var i=e.call(this)||this;return i._bufferSize=r,i._windowTime=o,i._timestampProvider=n,i._buffer=[],i._infiniteTimeWindow=!0,i._infiniteTimeWindow=o===1/0,i._bufferSize=Math.max(1,r),i._windowTime=Math.max(1,o),i}return t.prototype.next=function(r){var o=this,n=o.isStopped,i=o._buffer,a=o._infiniteTimeWindow,s=o._timestampProvider,p=o._windowTime;n||(i.push(r),!a&&i.push(s.now()+p)),this._trimBuffer(),e.prototype.next.call(this,r)},t.prototype._subscribe=function(r){this._throwIfClosed(),this._trimBuffer();for(var o=this._innerSubscribe(r),n=this,i=n._infiniteTimeWindow,a=n._buffer,s=a.slice(),p=0;p0?e.prototype.schedule.call(this,r,o):(this.delay=o,this.state=r,this.scheduler.flush(this),this)},t.prototype.execute=function(r,o){return o>0||this.closed?e.prototype.execute.call(this,r,o):this._execute(r,o)},t.prototype.requestAsyncId=function(r,o,n){return n===void 0&&(n=0),n!=null&&n>0||n==null&&this.delay>0?e.prototype.requestAsyncId.call(this,r,o,n):(r.flush(this),0)},t}(gt);var Lo=function(e){oe(t,e);function t(){return e!==null&&e.apply(this,arguments)||this}return t}(yt);var kr=new Lo(Oo);var Mo=function(e){oe(t,e);function t(r,o){var n=e.call(this,r,o)||this;return n.scheduler=r,n.work=o,n}return t.prototype.requestAsyncId=function(r,o,n){return n===void 0&&(n=0),n!==null&&n>0?e.prototype.requestAsyncId.call(this,r,o,n):(r.actions.push(this),r._scheduled||(r._scheduled=vt.requestAnimationFrame(function(){return r.flush(void 0)})))},t.prototype.recycleAsyncId=function(r,o,n){var i;if(n===void 0&&(n=0),n!=null?n>0:this.delay>0)return e.prototype.recycleAsyncId.call(this,r,o,n);var a=r.actions;o!=null&&((i=a[a.length-1])===null||i===void 0?void 0:i.id)!==o&&(vt.cancelAnimationFrame(o),r._scheduled=void 0)},t}(gt);var _o=function(e){oe(t,e);function t(){return e!==null&&e.apply(this,arguments)||this}return t.prototype.flush=function(r){this._active=!0;var o=this._scheduled;this._scheduled=void 0;var n=this.actions,i;r=r||n.shift();do if(i=r.execute(r.state,r.delay))break;while((r=n[0])&&r.id===o&&n.shift());if(this._active=!1,i){for(;(r=n[0])&&r.id===o&&n.shift();)r.unsubscribe();throw i}},t}(yt);var me=new _o(Mo);var S=new j(function(e){return e.complete()});function Yt(e){return e&&H(e.schedule)}function Hr(e){return e[e.length-1]}function Xe(e){return H(Hr(e))?e.pop():void 0}function ke(e){return Yt(Hr(e))?e.pop():void 0}function Bt(e,t){return typeof Hr(e)=="number"?e.pop():t}var xt=function(e){return e&&typeof e.length=="number"&&typeof e!="function"};function Gt(e){return H(e==null?void 0:e.then)}function Jt(e){return H(e[bt])}function Xt(e){return Symbol.asyncIterator&&H(e==null?void 0:e[Symbol.asyncIterator])}function Zt(e){return new TypeError("You provided "+(e!==null&&typeof e=="object"?"an invalid object":"'"+e+"'")+" where a stream was expected. You can provide an Observable, Promise, ReadableStream, Array, AsyncIterable, or Iterable.")}function Zi(){return typeof Symbol!="function"||!Symbol.iterator?"@@iterator":Symbol.iterator}var er=Zi();function tr(e){return H(e==null?void 0:e[er])}function rr(e){return fo(this,arguments,function(){var r,o,n,i;return Nt(this,function(a){switch(a.label){case 0:r=e.getReader(),a.label=1;case 1:a.trys.push([1,,9,10]),a.label=2;case 2:return[4,nt(r.read())];case 3:return o=a.sent(),n=o.value,i=o.done,i?[4,nt(void 0)]:[3,5];case 4:return[2,a.sent()];case 5:return[4,nt(n)];case 6:return[4,a.sent()];case 7:return a.sent(),[3,2];case 8:return[3,10];case 9:return r.releaseLock(),[7];case 10:return[2]}})})}function or(e){return H(e==null?void 0:e.getReader)}function U(e){if(e instanceof j)return e;if(e!=null){if(Jt(e))return ea(e);if(xt(e))return ta(e);if(Gt(e))return ra(e);if(Xt(e))return Ao(e);if(tr(e))return oa(e);if(or(e))return na(e)}throw Zt(e)}function ea(e){return new j(function(t){var r=e[bt]();if(H(r.subscribe))return r.subscribe(t);throw new TypeError("Provided object does not correctly implement Symbol.observable")})}function ta(e){return new j(function(t){for(var r=0;r=2;return function(o){return o.pipe(e?b(function(n,i){return e(n,i,o)}):le,Te(1),r?De(t):Qo(function(){return new ir}))}}function jr(e){return e<=0?function(){return S}:E(function(t,r){var o=[];t.subscribe(T(r,function(n){o.push(n),e=2,!0))}function pe(e){e===void 0&&(e={});var t=e.connector,r=t===void 0?function(){return new g}:t,o=e.resetOnError,n=o===void 0?!0:o,i=e.resetOnComplete,a=i===void 0?!0:i,s=e.resetOnRefCountZero,p=s===void 0?!0:s;return function(c){var l,f,u,d=0,y=!1,L=!1,X=function(){f==null||f.unsubscribe(),f=void 0},te=function(){X(),l=u=void 0,y=L=!1},J=function(){var k=l;te(),k==null||k.unsubscribe()};return E(function(k,ft){d++,!L&&!y&&X();var qe=u=u!=null?u:r();ft.add(function(){d--,d===0&&!L&&!y&&(f=Ur(J,p))}),qe.subscribe(ft),!l&&d>0&&(l=new at({next:function(Fe){return qe.next(Fe)},error:function(Fe){L=!0,X(),f=Ur(te,n,Fe),qe.error(Fe)},complete:function(){y=!0,X(),f=Ur(te,a),qe.complete()}}),U(k).subscribe(l))})(c)}}function Ur(e,t){for(var r=[],o=2;oe.next(document)),e}function P(e,t=document){return Array.from(t.querySelectorAll(e))}function R(e,t=document){let r=fe(e,t);if(typeof r=="undefined")throw new ReferenceError(`Missing element: expected "${e}" to be present`);return r}function fe(e,t=document){return t.querySelector(e)||void 0}function Ie(){var e,t,r,o;return(o=(r=(t=(e=document.activeElement)==null?void 0:e.shadowRoot)==null?void 0:t.activeElement)!=null?r:document.activeElement)!=null?o:void 0}var wa=O(h(document.body,"focusin"),h(document.body,"focusout")).pipe(_e(1),Q(void 0),m(()=>Ie()||document.body),G(1));function et(e){return wa.pipe(m(t=>e.contains(t)),K())}function $t(e,t){return C(()=>O(h(e,"mouseenter").pipe(m(()=>!0)),h(e,"mouseleave").pipe(m(()=>!1))).pipe(t?Ht(r=>Le(+!r*t)):le,Q(e.matches(":hover"))))}function Jo(e,t){if(typeof t=="string"||typeof t=="number")e.innerHTML+=t.toString();else if(t instanceof Node)e.appendChild(t);else if(Array.isArray(t))for(let r of t)Jo(e,r)}function x(e,t,...r){let o=document.createElement(e);if(t)for(let n of Object.keys(t))typeof t[n]!="undefined"&&(typeof t[n]!="boolean"?o.setAttribute(n,t[n]):o.setAttribute(n,""));for(let n of r)Jo(o,n);return o}function sr(e){if(e>999){let t=+((e-950)%1e3>99);return`${((e+1e-6)/1e3).toFixed(t)}k`}else return e.toString()}function Tt(e){let t=x("script",{src:e});return C(()=>(document.head.appendChild(t),O(h(t,"load"),h(t,"error").pipe(v(()=>$r(()=>new ReferenceError(`Invalid script: ${e}`))))).pipe(m(()=>{}),_(()=>document.head.removeChild(t)),Te(1))))}var Xo=new g,Ta=C(()=>typeof ResizeObserver=="undefined"?Tt("https://unpkg.com/resize-observer-polyfill"):I(void 0)).pipe(m(()=>new ResizeObserver(e=>e.forEach(t=>Xo.next(t)))),v(e=>O(Ye,I(e)).pipe(_(()=>e.disconnect()))),G(1));function ce(e){return{width:e.offsetWidth,height:e.offsetHeight}}function ge(e){let t=e;for(;t.clientWidth===0&&t.parentElement;)t=t.parentElement;return Ta.pipe(w(r=>r.observe(t)),v(r=>Xo.pipe(b(o=>o.target===t),_(()=>r.unobserve(t)))),m(()=>ce(e)),Q(ce(e)))}function St(e){return{width:e.scrollWidth,height:e.scrollHeight}}function cr(e){let t=e.parentElement;for(;t&&(e.scrollWidth<=t.scrollWidth&&e.scrollHeight<=t.scrollHeight);)t=(e=t).parentElement;return t?e:void 0}function Zo(e){let t=[],r=e.parentElement;for(;r;)(e.clientWidth>r.clientWidth||e.clientHeight>r.clientHeight)&&t.push(r),r=(e=r).parentElement;return t.length===0&&t.push(document.documentElement),t}function Ve(e){return{x:e.offsetLeft,y:e.offsetTop}}function en(e){let t=e.getBoundingClientRect();return{x:t.x+window.scrollX,y:t.y+window.scrollY}}function tn(e){return O(h(window,"load"),h(window,"resize")).pipe(Me(0,me),m(()=>Ve(e)),Q(Ve(e)))}function pr(e){return{x:e.scrollLeft,y:e.scrollTop}}function Ne(e){return O(h(e,"scroll"),h(window,"scroll"),h(window,"resize")).pipe(Me(0,me),m(()=>pr(e)),Q(pr(e)))}var rn=new g,Sa=C(()=>I(new IntersectionObserver(e=>{for(let t of e)rn.next(t)},{threshold:0}))).pipe(v(e=>O(Ye,I(e)).pipe(_(()=>e.disconnect()))),G(1));function tt(e){return Sa.pipe(w(t=>t.observe(e)),v(t=>rn.pipe(b(({target:r})=>r===e),_(()=>t.unobserve(e)),m(({isIntersecting:r})=>r))))}function on(e,t=16){return Ne(e).pipe(m(({y:r})=>{let o=ce(e),n=St(e);return r>=n.height-o.height-t}),K())}var lr={drawer:R("[data-md-toggle=drawer]"),search:R("[data-md-toggle=search]")};function nn(e){return lr[e].checked}function Je(e,t){lr[e].checked!==t&&lr[e].click()}function ze(e){let t=lr[e];return h(t,"change").pipe(m(()=>t.checked),Q(t.checked))}function Oa(e,t){switch(e.constructor){case HTMLInputElement:return e.type==="radio"?/^Arrow/.test(t):!0;case HTMLSelectElement:case HTMLTextAreaElement:return!0;default:return e.isContentEditable}}function La(){return O(h(window,"compositionstart").pipe(m(()=>!0)),h(window,"compositionend").pipe(m(()=>!1))).pipe(Q(!1))}function an(){let e=h(window,"keydown").pipe(b(t=>!(t.metaKey||t.ctrlKey)),m(t=>({mode:nn("search")?"search":"global",type:t.key,claim(){t.preventDefault(),t.stopPropagation()}})),b(({mode:t,type:r})=>{if(t==="global"){let o=Ie();if(typeof o!="undefined")return!Oa(o,r)}return!0}),pe());return La().pipe(v(t=>t?S:e))}function ye(){return new URL(location.href)}function lt(e,t=!1){if(B("navigation.instant")&&!t){let r=x("a",{href:e.href});document.body.appendChild(r),r.click(),r.remove()}else location.href=e.href}function sn(){return new g}function cn(){return location.hash.slice(1)}function pn(e){let t=x("a",{href:e});t.addEventListener("click",r=>r.stopPropagation()),t.click()}function Ma(e){return O(h(window,"hashchange"),e).pipe(m(cn),Q(cn()),b(t=>t.length>0),G(1))}function ln(e){return Ma(e).pipe(m(t=>fe(`[id="${t}"]`)),b(t=>typeof t!="undefined"))}function Pt(e){let t=matchMedia(e);return ar(r=>t.addListener(()=>r(t.matches))).pipe(Q(t.matches))}function mn(){let e=matchMedia("print");return O(h(window,"beforeprint").pipe(m(()=>!0)),h(window,"afterprint").pipe(m(()=>!1))).pipe(Q(e.matches))}function Nr(e,t){return e.pipe(v(r=>r?t():S))}function zr(e,t){return new j(r=>{let o=new XMLHttpRequest;return o.open("GET",`${e}`),o.responseType="blob",o.addEventListener("load",()=>{o.status>=200&&o.status<300?(r.next(o.response),r.complete()):r.error(new Error(o.statusText))}),o.addEventListener("error",()=>{r.error(new Error("Network error"))}),o.addEventListener("abort",()=>{r.complete()}),typeof(t==null?void 0:t.progress$)!="undefined"&&(o.addEventListener("progress",n=>{var i;if(n.lengthComputable)t.progress$.next(n.loaded/n.total*100);else{let a=(i=o.getResponseHeader("Content-Length"))!=null?i:0;t.progress$.next(n.loaded/+a*100)}}),t.progress$.next(5)),o.send(),()=>o.abort()})}function je(e,t){return zr(e,t).pipe(v(r=>r.text()),m(r=>JSON.parse(r)),G(1))}function fn(e,t){let r=new DOMParser;return zr(e,t).pipe(v(o=>o.text()),m(o=>r.parseFromString(o,"text/html")),G(1))}function un(e,t){let r=new DOMParser;return zr(e,t).pipe(v(o=>o.text()),m(o=>r.parseFromString(o,"text/xml")),G(1))}function dn(){return{x:Math.max(0,scrollX),y:Math.max(0,scrollY)}}function hn(){return O(h(window,"scroll",{passive:!0}),h(window,"resize",{passive:!0})).pipe(m(dn),Q(dn()))}function bn(){return{width:innerWidth,height:innerHeight}}function vn(){return h(window,"resize",{passive:!0}).pipe(m(bn),Q(bn()))}function gn(){return z([hn(),vn()]).pipe(m(([e,t])=>({offset:e,size:t})),G(1))}function mr(e,{viewport$:t,header$:r}){let o=t.pipe(ee("size")),n=z([o,r]).pipe(m(()=>Ve(e)));return z([r,t,n]).pipe(m(([{height:i},{offset:a,size:s},{x:p,y:c}])=>({offset:{x:a.x-p,y:a.y-c+i},size:s})))}function _a(e){return h(e,"message",t=>t.data)}function Aa(e){let t=new g;return t.subscribe(r=>e.postMessage(r)),t}function yn(e,t=new Worker(e)){let r=_a(t),o=Aa(t),n=new g;n.subscribe(o);let i=o.pipe(Z(),ie(!0));return n.pipe(Z(),Re(r.pipe(W(i))),pe())}var Ca=R("#__config"),Ot=JSON.parse(Ca.textContent);Ot.base=`${new URL(Ot.base,ye())}`;function xe(){return Ot}function B(e){return Ot.features.includes(e)}function Ee(e,t){return typeof t!="undefined"?Ot.translations[e].replace("#",t.toString()):Ot.translations[e]}function Se(e,t=document){return R(`[data-md-component=${e}]`,t)}function ae(e,t=document){return P(`[data-md-component=${e}]`,t)}function ka(e){let t=R(".md-typeset > :first-child",e);return h(t,"click",{once:!0}).pipe(m(()=>R(".md-typeset",e)),m(r=>({hash:__md_hash(r.innerHTML)})))}function xn(e){if(!B("announce.dismiss")||!e.childElementCount)return S;if(!e.hidden){let t=R(".md-typeset",e);__md_hash(t.innerHTML)===__md_get("__announce")&&(e.hidden=!0)}return C(()=>{let t=new g;return t.subscribe(({hash:r})=>{e.hidden=!0,__md_set("__announce",r)}),ka(e).pipe(w(r=>t.next(r)),_(()=>t.complete()),m(r=>$({ref:e},r)))})}function Ha(e,{target$:t}){return t.pipe(m(r=>({hidden:r!==e})))}function En(e,t){let r=new g;return r.subscribe(({hidden:o})=>{e.hidden=o}),Ha(e,t).pipe(w(o=>r.next(o)),_(()=>r.complete()),m(o=>$({ref:e},o)))}function Rt(e,t){return t==="inline"?x("div",{class:"md-tooltip md-tooltip--inline",id:e,role:"tooltip"},x("div",{class:"md-tooltip__inner md-typeset"})):x("div",{class:"md-tooltip",id:e,role:"tooltip"},x("div",{class:"md-tooltip__inner md-typeset"}))}function wn(...e){return x("div",{class:"md-tooltip2",role:"tooltip"},x("div",{class:"md-tooltip2__inner md-typeset"},e))}function Tn(e,t){if(t=t?`${t}_annotation_${e}`:void 0,t){let r=t?`#${t}`:void 0;return x("aside",{class:"md-annotation",tabIndex:0},Rt(t),x("a",{href:r,class:"md-annotation__index",tabIndex:-1},x("span",{"data-md-annotation-id":e})))}else return x("aside",{class:"md-annotation",tabIndex:0},Rt(t),x("span",{class:"md-annotation__index",tabIndex:-1},x("span",{"data-md-annotation-id":e})))}function Sn(e){return x("button",{class:"md-clipboard md-icon",title:Ee("clipboard.copy"),"data-clipboard-target":`#${e} > code`})}var Ln=Mt(qr());function Qr(e,t){let r=t&2,o=t&1,n=Object.keys(e.terms).filter(p=>!e.terms[p]).reduce((p,c)=>[...p,x("del",null,(0,Ln.default)(c))," "],[]).slice(0,-1),i=xe(),a=new URL(e.location,i.base);B("search.highlight")&&a.searchParams.set("h",Object.entries(e.terms).filter(([,p])=>p).reduce((p,[c])=>`${p} ${c}`.trim(),""));let{tags:s}=xe();return x("a",{href:`${a}`,class:"md-search-result__link",tabIndex:-1},x("article",{class:"md-search-result__article md-typeset","data-md-score":e.score.toFixed(2)},r>0&&x("div",{class:"md-search-result__icon md-icon"}),r>0&&x("h1",null,e.title),r<=0&&x("h2",null,e.title),o>0&&e.text.length>0&&e.text,e.tags&&x("nav",{class:"md-tags"},e.tags.map(p=>{let c=s?p in s?`md-tag-icon md-tag--${s[p]}`:"md-tag-icon":"";return x("span",{class:`md-tag ${c}`},p)})),o>0&&n.length>0&&x("p",{class:"md-search-result__terms"},Ee("search.result.term.missing"),": ",...n)))}function Mn(e){let t=e[0].score,r=[...e],o=xe(),n=r.findIndex(l=>!`${new URL(l.location,o.base)}`.includes("#")),[i]=r.splice(n,1),a=r.findIndex(l=>l.scoreQr(l,1)),...p.length?[x("details",{class:"md-search-result__more"},x("summary",{tabIndex:-1},x("div",null,p.length>0&&p.length===1?Ee("search.result.more.one"):Ee("search.result.more.other",p.length))),...p.map(l=>Qr(l,1)))]:[]];return x("li",{class:"md-search-result__item"},c)}function _n(e){return x("ul",{class:"md-source__facts"},Object.entries(e).map(([t,r])=>x("li",{class:`md-source__fact md-source__fact--${t}`},typeof r=="number"?sr(r):r)))}function Kr(e){let t=`tabbed-control tabbed-control--${e}`;return x("div",{class:t,hidden:!0},x("button",{class:"tabbed-button",tabIndex:-1,"aria-hidden":"true"}))}function An(e){return x("div",{class:"md-typeset__scrollwrap"},x("div",{class:"md-typeset__table"},e))}function Ra(e){var o;let t=xe(),r=new URL(`../${e.version}/`,t.base);return x("li",{class:"md-version__item"},x("a",{href:`${r}`,class:"md-version__link"},e.title,((o=t.version)==null?void 0:o.alias)&&e.aliases.length>0&&x("span",{class:"md-version__alias"},e.aliases[0])))}function Cn(e,t){var o;let r=xe();return e=e.filter(n=>{var i;return!((i=n.properties)!=null&&i.hidden)}),x("div",{class:"md-version"},x("button",{class:"md-version__current","aria-label":Ee("select.version")},t.title,((o=r.version)==null?void 0:o.alias)&&t.aliases.length>0&&x("span",{class:"md-version__alias"},t.aliases[0])),x("ul",{class:"md-version__list"},e.map(Ra)))}var Ia=0;function ja(e){let t=z([et(e),$t(e)]).pipe(m(([o,n])=>o||n),K()),r=C(()=>Zo(e)).pipe(ne(Ne),pt(1),He(t),m(()=>en(e)));return t.pipe(Ae(o=>o),v(()=>z([t,r])),m(([o,n])=>({active:o,offset:n})),pe())}function Fa(e,t){let{content$:r,viewport$:o}=t,n=`__tooltip2_${Ia++}`;return C(()=>{let i=new g,a=new _r(!1);i.pipe(Z(),ie(!1)).subscribe(a);let s=a.pipe(Ht(c=>Le(+!c*250,kr)),K(),v(c=>c?r:S),w(c=>c.id=n),pe());z([i.pipe(m(({active:c})=>c)),s.pipe(v(c=>$t(c,250)),Q(!1))]).pipe(m(c=>c.some(l=>l))).subscribe(a);let p=a.pipe(b(c=>c),re(s,o),m(([c,l,{size:f}])=>{let u=e.getBoundingClientRect(),d=u.width/2;if(l.role==="tooltip")return{x:d,y:8+u.height};if(u.y>=f.height/2){let{height:y}=ce(l);return{x:d,y:-16-y}}else return{x:d,y:16+u.height}}));return z([s,i,p]).subscribe(([c,{offset:l},f])=>{c.style.setProperty("--md-tooltip-host-x",`${l.x}px`),c.style.setProperty("--md-tooltip-host-y",`${l.y}px`),c.style.setProperty("--md-tooltip-x",`${f.x}px`),c.style.setProperty("--md-tooltip-y",`${f.y}px`),c.classList.toggle("md-tooltip2--top",f.y<0),c.classList.toggle("md-tooltip2--bottom",f.y>=0)}),a.pipe(b(c=>c),re(s,(c,l)=>l),b(c=>c.role==="tooltip")).subscribe(c=>{let l=ce(R(":scope > *",c));c.style.setProperty("--md-tooltip-width",`${l.width}px`),c.style.setProperty("--md-tooltip-tail","0px")}),a.pipe(K(),ve(me),re(s)).subscribe(([c,l])=>{l.classList.toggle("md-tooltip2--active",c)}),z([a.pipe(b(c=>c)),s]).subscribe(([c,l])=>{l.role==="dialog"?(e.setAttribute("aria-controls",n),e.setAttribute("aria-haspopup","dialog")):e.setAttribute("aria-describedby",n)}),a.pipe(b(c=>!c)).subscribe(()=>{e.removeAttribute("aria-controls"),e.removeAttribute("aria-describedby"),e.removeAttribute("aria-haspopup")}),ja(e).pipe(w(c=>i.next(c)),_(()=>i.complete()),m(c=>$({ref:e},c)))})}function mt(e,{viewport$:t},r=document.body){return Fa(e,{content$:new j(o=>{let n=e.title,i=wn(n);return o.next(i),e.removeAttribute("title"),r.append(i),()=>{i.remove(),e.setAttribute("title",n)}}),viewport$:t})}function Ua(e,t){let r=C(()=>z([tn(e),Ne(t)])).pipe(m(([{x:o,y:n},i])=>{let{width:a,height:s}=ce(e);return{x:o-i.x+a/2,y:n-i.y+s/2}}));return et(e).pipe(v(o=>r.pipe(m(n=>({active:o,offset:n})),Te(+!o||1/0))))}function kn(e,t,{target$:r}){let[o,n]=Array.from(e.children);return C(()=>{let i=new g,a=i.pipe(Z(),ie(!0));return i.subscribe({next({offset:s}){e.style.setProperty("--md-tooltip-x",`${s.x}px`),e.style.setProperty("--md-tooltip-y",`${s.y}px`)},complete(){e.style.removeProperty("--md-tooltip-x"),e.style.removeProperty("--md-tooltip-y")}}),tt(e).pipe(W(a)).subscribe(s=>{e.toggleAttribute("data-md-visible",s)}),O(i.pipe(b(({active:s})=>s)),i.pipe(_e(250),b(({active:s})=>!s))).subscribe({next({active:s}){s?e.prepend(o):o.remove()},complete(){e.prepend(o)}}),i.pipe(Me(16,me)).subscribe(({active:s})=>{o.classList.toggle("md-tooltip--active",s)}),i.pipe(pt(125,me),b(()=>!!e.offsetParent),m(()=>e.offsetParent.getBoundingClientRect()),m(({x:s})=>s)).subscribe({next(s){s?e.style.setProperty("--md-tooltip-0",`${-s}px`):e.style.removeProperty("--md-tooltip-0")},complete(){e.style.removeProperty("--md-tooltip-0")}}),h(n,"click").pipe(W(a),b(s=>!(s.metaKey||s.ctrlKey))).subscribe(s=>{s.stopPropagation(),s.preventDefault()}),h(n,"mousedown").pipe(W(a),re(i)).subscribe(([s,{active:p}])=>{var c;if(s.button!==0||s.metaKey||s.ctrlKey)s.preventDefault();else if(p){s.preventDefault();let l=e.parentElement.closest(".md-annotation");l instanceof HTMLElement?l.focus():(c=Ie())==null||c.blur()}}),r.pipe(W(a),b(s=>s===o),Ge(125)).subscribe(()=>e.focus()),Ua(e,t).pipe(w(s=>i.next(s)),_(()=>i.complete()),m(s=>$({ref:e},s)))})}function Wa(e){return e.tagName==="CODE"?P(".c, .c1, .cm",e):[e]}function Da(e){let t=[];for(let r of Wa(e)){let o=[],n=document.createNodeIterator(r,NodeFilter.SHOW_TEXT);for(let i=n.nextNode();i;i=n.nextNode())o.push(i);for(let i of o){let a;for(;a=/(\(\d+\))(!)?/.exec(i.textContent);){let[,s,p]=a;if(typeof p=="undefined"){let c=i.splitText(a.index);i=c.splitText(s.length),t.push(c)}else{i.textContent=s,t.push(i);break}}}}return t}function Hn(e,t){t.append(...Array.from(e.childNodes))}function fr(e,t,{target$:r,print$:o}){let n=t.closest("[id]"),i=n==null?void 0:n.id,a=new Map;for(let s of Da(t)){let[,p]=s.textContent.match(/\((\d+)\)/);fe(`:scope > li:nth-child(${p})`,e)&&(a.set(p,Tn(p,i)),s.replaceWith(a.get(p)))}return a.size===0?S:C(()=>{let s=new g,p=s.pipe(Z(),ie(!0)),c=[];for(let[l,f]of a)c.push([R(".md-typeset",f),R(`:scope > li:nth-child(${l})`,e)]);return o.pipe(W(p)).subscribe(l=>{e.hidden=!l,e.classList.toggle("md-annotation-list",l);for(let[f,u]of c)l?Hn(f,u):Hn(u,f)}),O(...[...a].map(([,l])=>kn(l,t,{target$:r}))).pipe(_(()=>s.complete()),pe())})}function $n(e){if(e.nextElementSibling){let t=e.nextElementSibling;if(t.tagName==="OL")return t;if(t.tagName==="P"&&!t.children.length)return $n(t)}}function Pn(e,t){return C(()=>{let r=$n(e);return typeof r!="undefined"?fr(r,e,t):S})}var Rn=Mt(Br());var Va=0;function In(e){if(e.nextElementSibling){let t=e.nextElementSibling;if(t.tagName==="OL")return t;if(t.tagName==="P"&&!t.children.length)return In(t)}}function Na(e){return ge(e).pipe(m(({width:t})=>({scrollable:St(e).width>t})),ee("scrollable"))}function jn(e,t){let{matches:r}=matchMedia("(hover)"),o=C(()=>{let n=new g,i=n.pipe(jr(1));n.subscribe(({scrollable:c})=>{c&&r?e.setAttribute("tabindex","0"):e.removeAttribute("tabindex")});let a=[];if(Rn.default.isSupported()&&(e.closest(".copy")||B("content.code.copy")&&!e.closest(".no-copy"))){let c=e.closest("pre");c.id=`__code_${Va++}`;let l=Sn(c.id);c.insertBefore(l,e),B("content.tooltips")&&a.push(mt(l,{viewport$}))}let s=e.closest(".highlight");if(s instanceof HTMLElement){let c=In(s);if(typeof c!="undefined"&&(s.classList.contains("annotate")||B("content.code.annotate"))){let l=fr(c,e,t);a.push(ge(s).pipe(W(i),m(({width:f,height:u})=>f&&u),K(),v(f=>f?l:S)))}}return P(":scope > span[id]",e).length&&e.classList.add("md-code__content"),Na(e).pipe(w(c=>n.next(c)),_(()=>n.complete()),m(c=>$({ref:e},c)),Re(...a))});return B("content.lazy")?tt(e).pipe(b(n=>n),Te(1),v(()=>o)):o}function za(e,{target$:t,print$:r}){let o=!0;return O(t.pipe(m(n=>n.closest("details:not([open])")),b(n=>e===n),m(()=>({action:"open",reveal:!0}))),r.pipe(b(n=>n||!o),w(()=>o=e.open),m(n=>({action:n?"open":"close"}))))}function Fn(e,t){return C(()=>{let r=new g;return r.subscribe(({action:o,reveal:n})=>{e.toggleAttribute("open",o==="open"),n&&e.scrollIntoView()}),za(e,t).pipe(w(o=>r.next(o)),_(()=>r.complete()),m(o=>$({ref:e},o)))})}var Un=".node circle,.node ellipse,.node path,.node polygon,.node rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}marker{fill:var(--md-mermaid-edge-color)!important}.edgeLabel .label rect{fill:#0000}.label{color:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.label foreignObject{line-height:normal;overflow:visible}.label div .edgeLabel{color:var(--md-mermaid-label-fg-color)}.edgeLabel,.edgeLabel p,.label div .edgeLabel{background-color:var(--md-mermaid-label-bg-color)}.edgeLabel,.edgeLabel p{fill:var(--md-mermaid-label-bg-color);color:var(--md-mermaid-edge-color)}.edgePath .path,.flowchart-link{stroke:var(--md-mermaid-edge-color);stroke-width:.05rem}.edgePath .arrowheadPath{fill:var(--md-mermaid-edge-color);stroke:none}.cluster rect{fill:var(--md-default-fg-color--lightest);stroke:var(--md-default-fg-color--lighter)}.cluster span{color:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}g #flowchart-circleEnd,g #flowchart-circleStart,g #flowchart-crossEnd,g #flowchart-crossStart,g #flowchart-pointEnd,g #flowchart-pointStart{stroke:none}g.classGroup line,g.classGroup rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}g.classGroup text{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.classLabel .box{fill:var(--md-mermaid-label-bg-color);background-color:var(--md-mermaid-label-bg-color);opacity:1}.classLabel .label{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.node .divider{stroke:var(--md-mermaid-node-fg-color)}.relation{stroke:var(--md-mermaid-edge-color)}.cardinality{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.cardinality text{fill:inherit!important}defs #classDiagram-compositionEnd,defs #classDiagram-compositionStart,defs #classDiagram-dependencyEnd,defs #classDiagram-dependencyStart,defs #classDiagram-extensionEnd,defs #classDiagram-extensionStart{fill:var(--md-mermaid-edge-color)!important;stroke:var(--md-mermaid-edge-color)!important}defs #classDiagram-aggregationEnd,defs #classDiagram-aggregationStart{fill:var(--md-mermaid-label-bg-color)!important;stroke:var(--md-mermaid-edge-color)!important}g.stateGroup rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}g.stateGroup .state-title{fill:var(--md-mermaid-label-fg-color)!important;font-family:var(--md-mermaid-font-family)}g.stateGroup .composit{fill:var(--md-mermaid-label-bg-color)}.nodeLabel,.nodeLabel p{color:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}a .nodeLabel{text-decoration:underline}.node circle.state-end,.node circle.state-start,.start-state{fill:var(--md-mermaid-edge-color);stroke:none}.end-state-inner,.end-state-outer{fill:var(--md-mermaid-edge-color)}.end-state-inner,.node circle.state-end{stroke:var(--md-mermaid-label-bg-color)}.transition{stroke:var(--md-mermaid-edge-color)}[id^=state-fork] rect,[id^=state-join] rect{fill:var(--md-mermaid-edge-color)!important;stroke:none!important}.statediagram-cluster.statediagram-cluster .inner{fill:var(--md-default-bg-color)}.statediagram-cluster rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}.statediagram-state rect.divider{fill:var(--md-default-fg-color--lightest);stroke:var(--md-default-fg-color--lighter)}defs #statediagram-barbEnd{stroke:var(--md-mermaid-edge-color)}.attributeBoxEven,.attributeBoxOdd{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}.entityBox{fill:var(--md-mermaid-label-bg-color);stroke:var(--md-mermaid-node-fg-color)}.entityLabel{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.relationshipLabelBox{fill:var(--md-mermaid-label-bg-color);fill-opacity:1;background-color:var(--md-mermaid-label-bg-color);opacity:1}.relationshipLabel{fill:var(--md-mermaid-label-fg-color)}.relationshipLine{stroke:var(--md-mermaid-edge-color)}defs #ONE_OR_MORE_END *,defs #ONE_OR_MORE_START *,defs #ONLY_ONE_END *,defs #ONLY_ONE_START *,defs #ZERO_OR_MORE_END *,defs #ZERO_OR_MORE_START *,defs #ZERO_OR_ONE_END *,defs #ZERO_OR_ONE_START *{stroke:var(--md-mermaid-edge-color)!important}defs #ZERO_OR_MORE_END circle,defs #ZERO_OR_MORE_START circle{fill:var(--md-mermaid-label-bg-color)}.actor{fill:var(--md-mermaid-sequence-actor-bg-color);stroke:var(--md-mermaid-sequence-actor-border-color)}text.actor>tspan{fill:var(--md-mermaid-sequence-actor-fg-color);font-family:var(--md-mermaid-font-family)}line{stroke:var(--md-mermaid-sequence-actor-line-color)}.actor-man circle,.actor-man line{fill:var(--md-mermaid-sequence-actorman-bg-color);stroke:var(--md-mermaid-sequence-actorman-line-color)}.messageLine0,.messageLine1{stroke:var(--md-mermaid-sequence-message-line-color)}.note{fill:var(--md-mermaid-sequence-note-bg-color);stroke:var(--md-mermaid-sequence-note-border-color)}.loopText,.loopText>tspan,.messageText,.noteText>tspan{stroke:none;font-family:var(--md-mermaid-font-family)!important}.messageText{fill:var(--md-mermaid-sequence-message-fg-color)}.loopText,.loopText>tspan{fill:var(--md-mermaid-sequence-loop-fg-color)}.noteText>tspan{fill:var(--md-mermaid-sequence-note-fg-color)}#arrowhead path{fill:var(--md-mermaid-sequence-message-line-color);stroke:none}.loopLine{fill:var(--md-mermaid-sequence-loop-bg-color);stroke:var(--md-mermaid-sequence-loop-border-color)}.labelBox{fill:var(--md-mermaid-sequence-label-bg-color);stroke:none}.labelText,.labelText>span{fill:var(--md-mermaid-sequence-label-fg-color);font-family:var(--md-mermaid-font-family)}.sequenceNumber{fill:var(--md-mermaid-sequence-number-fg-color)}rect.rect{fill:var(--md-mermaid-sequence-box-bg-color);stroke:none}rect.rect+text.text{fill:var(--md-mermaid-sequence-box-fg-color)}defs #sequencenumber{fill:var(--md-mermaid-sequence-number-bg-color)!important}";var Gr,Qa=0;function Ka(){return typeof mermaid=="undefined"||mermaid instanceof Element?Tt("https://unpkg.com/mermaid@11/dist/mermaid.min.js"):I(void 0)}function Wn(e){return e.classList.remove("mermaid"),Gr||(Gr=Ka().pipe(w(()=>mermaid.initialize({startOnLoad:!1,themeCSS:Un,sequence:{actorFontSize:"16px",messageFontSize:"16px",noteFontSize:"16px"}})),m(()=>{}),G(1))),Gr.subscribe(()=>co(this,null,function*(){e.classList.add("mermaid");let t=`__mermaid_${Qa++}`,r=x("div",{class:"mermaid"}),o=e.textContent,{svg:n,fn:i}=yield mermaid.render(t,o),a=r.attachShadow({mode:"closed"});a.innerHTML=n,e.replaceWith(r),i==null||i(a)})),Gr.pipe(m(()=>({ref:e})))}var Dn=x("table");function Vn(e){return e.replaceWith(Dn),Dn.replaceWith(An(e)),I({ref:e})}function Ya(e){let t=e.find(r=>r.checked)||e[0];return O(...e.map(r=>h(r,"change").pipe(m(()=>R(`label[for="${r.id}"]`))))).pipe(Q(R(`label[for="${t.id}"]`)),m(r=>({active:r})))}function Nn(e,{viewport$:t,target$:r}){let o=R(".tabbed-labels",e),n=P(":scope > input",e),i=Kr("prev");e.append(i);let a=Kr("next");return e.append(a),C(()=>{let s=new g,p=s.pipe(Z(),ie(!0));z([s,ge(e),tt(e)]).pipe(W(p),Me(1,me)).subscribe({next([{active:c},l]){let f=Ve(c),{width:u}=ce(c);e.style.setProperty("--md-indicator-x",`${f.x}px`),e.style.setProperty("--md-indicator-width",`${u}px`);let d=pr(o);(f.xd.x+l.width)&&o.scrollTo({left:Math.max(0,f.x-16),behavior:"smooth"})},complete(){e.style.removeProperty("--md-indicator-x"),e.style.removeProperty("--md-indicator-width")}}),z([Ne(o),ge(o)]).pipe(W(p)).subscribe(([c,l])=>{let f=St(o);i.hidden=c.x<16,a.hidden=c.x>f.width-l.width-16}),O(h(i,"click").pipe(m(()=>-1)),h(a,"click").pipe(m(()=>1))).pipe(W(p)).subscribe(c=>{let{width:l}=ce(o);o.scrollBy({left:l*c,behavior:"smooth"})}),r.pipe(W(p),b(c=>n.includes(c))).subscribe(c=>c.click()),o.classList.add("tabbed-labels--linked");for(let c of n){let l=R(`label[for="${c.id}"]`);l.replaceChildren(x("a",{href:`#${l.htmlFor}`,tabIndex:-1},...Array.from(l.childNodes))),h(l.firstElementChild,"click").pipe(W(p),b(f=>!(f.metaKey||f.ctrlKey)),w(f=>{f.preventDefault(),f.stopPropagation()})).subscribe(()=>{history.replaceState({},"",`#${l.htmlFor}`),l.click()})}return B("content.tabs.link")&&s.pipe(Ce(1),re(t)).subscribe(([{active:c},{offset:l}])=>{let f=c.innerText.trim();if(c.hasAttribute("data-md-switching"))c.removeAttribute("data-md-switching");else{let u=e.offsetTop-l.y;for(let y of P("[data-tabs]"))for(let L of P(":scope > input",y)){let X=R(`label[for="${L.id}"]`);if(X!==c&&X.innerText.trim()===f){X.setAttribute("data-md-switching",""),L.click();break}}window.scrollTo({top:e.offsetTop-u});let d=__md_get("__tabs")||[];__md_set("__tabs",[...new Set([f,...d])])}}),s.pipe(W(p)).subscribe(()=>{for(let c of P("audio, video",e))c.pause()}),Ya(n).pipe(w(c=>s.next(c)),_(()=>s.complete()),m(c=>$({ref:e},c)))}).pipe(Ke(se))}function zn(e,{viewport$:t,target$:r,print$:o}){return O(...P(".annotate:not(.highlight)",e).map(n=>Pn(n,{target$:r,print$:o})),...P("pre:not(.mermaid) > code",e).map(n=>jn(n,{target$:r,print$:o})),...P("pre.mermaid",e).map(n=>Wn(n)),...P("table:not([class])",e).map(n=>Vn(n)),...P("details",e).map(n=>Fn(n,{target$:r,print$:o})),...P("[data-tabs]",e).map(n=>Nn(n,{viewport$:t,target$:r})),...P("[title]",e).filter(()=>B("content.tooltips")).map(n=>mt(n,{viewport$:t})))}function Ba(e,{alert$:t}){return t.pipe(v(r=>O(I(!0),I(!1).pipe(Ge(2e3))).pipe(m(o=>({message:r,active:o})))))}function qn(e,t){let r=R(".md-typeset",e);return C(()=>{let o=new g;return o.subscribe(({message:n,active:i})=>{e.classList.toggle("md-dialog--active",i),r.textContent=n}),Ba(e,t).pipe(w(n=>o.next(n)),_(()=>o.complete()),m(n=>$({ref:e},n)))})}var Ga=0;function Ja(e,t){document.body.append(e);let{width:r}=ce(e);e.style.setProperty("--md-tooltip-width",`${r}px`),e.remove();let o=cr(t),n=typeof o!="undefined"?Ne(o):I({x:0,y:0}),i=O(et(t),$t(t)).pipe(K());return z([i,n]).pipe(m(([a,s])=>{let{x:p,y:c}=Ve(t),l=ce(t),f=t.closest("table");return f&&t.parentElement&&(p+=f.offsetLeft+t.parentElement.offsetLeft,c+=f.offsetTop+t.parentElement.offsetTop),{active:a,offset:{x:p-s.x+l.width/2-r/2,y:c-s.y+l.height+8}}}))}function Qn(e){let t=e.title;if(!t.length)return S;let r=`__tooltip_${Ga++}`,o=Rt(r,"inline"),n=R(".md-typeset",o);return n.innerHTML=t,C(()=>{let i=new g;return i.subscribe({next({offset:a}){o.style.setProperty("--md-tooltip-x",`${a.x}px`),o.style.setProperty("--md-tooltip-y",`${a.y}px`)},complete(){o.style.removeProperty("--md-tooltip-x"),o.style.removeProperty("--md-tooltip-y")}}),O(i.pipe(b(({active:a})=>a)),i.pipe(_e(250),b(({active:a})=>!a))).subscribe({next({active:a}){a?(e.insertAdjacentElement("afterend",o),e.setAttribute("aria-describedby",r),e.removeAttribute("title")):(o.remove(),e.removeAttribute("aria-describedby"),e.setAttribute("title",t))},complete(){o.remove(),e.removeAttribute("aria-describedby"),e.setAttribute("title",t)}}),i.pipe(Me(16,me)).subscribe(({active:a})=>{o.classList.toggle("md-tooltip--active",a)}),i.pipe(pt(125,me),b(()=>!!e.offsetParent),m(()=>e.offsetParent.getBoundingClientRect()),m(({x:a})=>a)).subscribe({next(a){a?o.style.setProperty("--md-tooltip-0",`${-a}px`):o.style.removeProperty("--md-tooltip-0")},complete(){o.style.removeProperty("--md-tooltip-0")}}),Ja(o,e).pipe(w(a=>i.next(a)),_(()=>i.complete()),m(a=>$({ref:e},a)))}).pipe(Ke(se))}function Xa({viewport$:e}){if(!B("header.autohide"))return I(!1);let t=e.pipe(m(({offset:{y:n}})=>n),Be(2,1),m(([n,i])=>[nMath.abs(i-n.y)>100),m(([,[n]])=>n),K()),o=ze("search");return z([e,o]).pipe(m(([{offset:n},i])=>n.y>400&&!i),K(),v(n=>n?r:I(!1)),Q(!1))}function Kn(e,t){return C(()=>z([ge(e),Xa(t)])).pipe(m(([{height:r},o])=>({height:r,hidden:o})),K((r,o)=>r.height===o.height&&r.hidden===o.hidden),G(1))}function Yn(e,{header$:t,main$:r}){return C(()=>{let o=new g,n=o.pipe(Z(),ie(!0));o.pipe(ee("active"),He(t)).subscribe(([{active:a},{hidden:s}])=>{e.classList.toggle("md-header--shadow",a&&!s),e.hidden=s});let i=ue(P("[title]",e)).pipe(b(()=>B("content.tooltips")),ne(a=>Qn(a)));return r.subscribe(o),t.pipe(W(n),m(a=>$({ref:e},a)),Re(i.pipe(W(n))))})}function Za(e,{viewport$:t,header$:r}){return mr(e,{viewport$:t,header$:r}).pipe(m(({offset:{y:o}})=>{let{height:n}=ce(e);return{active:o>=n}}),ee("active"))}function Bn(e,t){return C(()=>{let r=new g;r.subscribe({next({active:n}){e.classList.toggle("md-header__title--active",n)},complete(){e.classList.remove("md-header__title--active")}});let o=fe(".md-content h1");return typeof o=="undefined"?S:Za(o,t).pipe(w(n=>r.next(n)),_(()=>r.complete()),m(n=>$({ref:e},n)))})}function Gn(e,{viewport$:t,header$:r}){let o=r.pipe(m(({height:i})=>i),K()),n=o.pipe(v(()=>ge(e).pipe(m(({height:i})=>({top:e.offsetTop,bottom:e.offsetTop+i})),ee("bottom"))));return z([o,n,t]).pipe(m(([i,{top:a,bottom:s},{offset:{y:p},size:{height:c}}])=>(c=Math.max(0,c-Math.max(0,a-p,i)-Math.max(0,c+p-s)),{offset:a-i,height:c,active:a-i<=p})),K((i,a)=>i.offset===a.offset&&i.height===a.height&&i.active===a.active))}function es(e){let t=__md_get("__palette")||{index:e.findIndex(o=>matchMedia(o.getAttribute("data-md-color-media")).matches)},r=Math.max(0,Math.min(t.index,e.length-1));return I(...e).pipe(ne(o=>h(o,"change").pipe(m(()=>o))),Q(e[r]),m(o=>({index:e.indexOf(o),color:{media:o.getAttribute("data-md-color-media"),scheme:o.getAttribute("data-md-color-scheme"),primary:o.getAttribute("data-md-color-primary"),accent:o.getAttribute("data-md-color-accent")}})),G(1))}function Jn(e){let t=P("input",e),r=x("meta",{name:"theme-color"});document.head.appendChild(r);let o=x("meta",{name:"color-scheme"});document.head.appendChild(o);let n=Pt("(prefers-color-scheme: light)");return C(()=>{let i=new g;return i.subscribe(a=>{if(document.body.setAttribute("data-md-color-switching",""),a.color.media==="(prefers-color-scheme)"){let s=matchMedia("(prefers-color-scheme: light)"),p=document.querySelector(s.matches?"[data-md-color-media='(prefers-color-scheme: light)']":"[data-md-color-media='(prefers-color-scheme: dark)']");a.color.scheme=p.getAttribute("data-md-color-scheme"),a.color.primary=p.getAttribute("data-md-color-primary"),a.color.accent=p.getAttribute("data-md-color-accent")}for(let[s,p]of Object.entries(a.color))document.body.setAttribute(`data-md-color-${s}`,p);for(let s=0;sa.key==="Enter"),re(i,(a,s)=>s)).subscribe(({index:a})=>{a=(a+1)%t.length,t[a].click(),t[a].focus()}),i.pipe(m(()=>{let a=Se("header"),s=window.getComputedStyle(a);return o.content=s.colorScheme,s.backgroundColor.match(/\d+/g).map(p=>(+p).toString(16).padStart(2,"0")).join("")})).subscribe(a=>r.content=`#${a}`),i.pipe(ve(se)).subscribe(()=>{document.body.removeAttribute("data-md-color-switching")}),es(t).pipe(W(n.pipe(Ce(1))),ct(),w(a=>i.next(a)),_(()=>i.complete()),m(a=>$({ref:e},a)))})}function Xn(e,{progress$:t}){return C(()=>{let r=new g;return r.subscribe(({value:o})=>{e.style.setProperty("--md-progress-value",`${o}`)}),t.pipe(w(o=>r.next({value:o})),_(()=>r.complete()),m(o=>({ref:e,value:o})))})}var Jr=Mt(Br());function ts(e){e.setAttribute("data-md-copying","");let t=e.closest("[data-copy]"),r=t?t.getAttribute("data-copy"):e.innerText;return e.removeAttribute("data-md-copying"),r.trimEnd()}function Zn({alert$:e}){Jr.default.isSupported()&&new j(t=>{new Jr.default("[data-clipboard-target], [data-clipboard-text]",{text:r=>r.getAttribute("data-clipboard-text")||ts(R(r.getAttribute("data-clipboard-target")))}).on("success",r=>t.next(r))}).pipe(w(t=>{t.trigger.focus()}),m(()=>Ee("clipboard.copied"))).subscribe(e)}function ei(e,t){return e.protocol=t.protocol,e.hostname=t.hostname,e}function rs(e,t){let r=new Map;for(let o of P("url",e)){let n=R("loc",o),i=[ei(new URL(n.textContent),t)];r.set(`${i[0]}`,i);for(let a of P("[rel=alternate]",o)){let s=a.getAttribute("href");s!=null&&i.push(ei(new URL(s),t))}}return r}function ur(e){return un(new URL("sitemap.xml",e)).pipe(m(t=>rs(t,new URL(e))),de(()=>I(new Map)))}function os(e,t){if(!(e.target instanceof Element))return S;let r=e.target.closest("a");if(r===null)return S;if(r.target||e.metaKey||e.ctrlKey)return S;let o=new URL(r.href);return o.search=o.hash="",t.has(`${o}`)?(e.preventDefault(),I(new URL(r.href))):S}function ti(e){let t=new Map;for(let r of P(":scope > *",e.head))t.set(r.outerHTML,r);return t}function ri(e){for(let t of P("[href], [src]",e))for(let r of["href","src"]){let o=t.getAttribute(r);if(o&&!/^(?:[a-z]+:)?\/\//i.test(o)){t[r]=t[r];break}}return I(e)}function ns(e){for(let o of["[data-md-component=announce]","[data-md-component=container]","[data-md-component=header-topic]","[data-md-component=outdated]","[data-md-component=logo]","[data-md-component=skip]",...B("navigation.tabs.sticky")?["[data-md-component=tabs]"]:[]]){let n=fe(o),i=fe(o,e);typeof n!="undefined"&&typeof i!="undefined"&&n.replaceWith(i)}let t=ti(document);for(let[o,n]of ti(e))t.has(o)?t.delete(o):document.head.appendChild(n);for(let o of t.values()){let n=o.getAttribute("name");n!=="theme-color"&&n!=="color-scheme"&&o.remove()}let r=Se("container");return We(P("script",r)).pipe(v(o=>{let n=e.createElement("script");if(o.src){for(let i of o.getAttributeNames())n.setAttribute(i,o.getAttribute(i));return o.replaceWith(n),new j(i=>{n.onload=()=>i.complete()})}else return n.textContent=o.textContent,o.replaceWith(n),S}),Z(),ie(document))}function oi({location$:e,viewport$:t,progress$:r}){let o=xe();if(location.protocol==="file:")return S;let n=ur(o.base);I(document).subscribe(ri);let i=h(document.body,"click").pipe(He(n),v(([p,c])=>os(p,c)),pe()),a=h(window,"popstate").pipe(m(ye),pe());i.pipe(re(t)).subscribe(([p,{offset:c}])=>{history.replaceState(c,""),history.pushState(null,"",p)}),O(i,a).subscribe(e);let s=e.pipe(ee("pathname"),v(p=>fn(p,{progress$:r}).pipe(de(()=>(lt(p,!0),S)))),v(ri),v(ns),pe());return O(s.pipe(re(e,(p,c)=>c)),s.pipe(v(()=>e),ee("pathname"),v(()=>e),ee("hash")),e.pipe(K((p,c)=>p.pathname===c.pathname&&p.hash===c.hash),v(()=>i),w(()=>history.back()))).subscribe(p=>{var c,l;history.state!==null||!p.hash?window.scrollTo(0,(l=(c=history.state)==null?void 0:c.y)!=null?l:0):(history.scrollRestoration="auto",pn(p.hash),history.scrollRestoration="manual")}),e.subscribe(()=>{history.scrollRestoration="manual"}),h(window,"beforeunload").subscribe(()=>{history.scrollRestoration="auto"}),t.pipe(ee("offset"),_e(100)).subscribe(({offset:p})=>{history.replaceState(p,"")}),s}var ni=Mt(qr());function ii(e){let t=e.separator.split("|").map(n=>n.replace(/(\(\?[!=<][^)]+\))/g,"").length===0?"\uFFFD":n).join("|"),r=new RegExp(t,"img"),o=(n,i,a)=>`${i}${a}`;return n=>{n=n.replace(/[\s*+\-:~^]+/g," ").trim();let i=new RegExp(`(^|${e.separator}|)(${n.replace(/[|\\{}()[\]^$+*?.-]/g,"\\$&").replace(r,"|")})`,"img");return a=>(0,ni.default)(a).replace(i,o).replace(/<\/mark>(\s+)]*>/img,"$1")}}function jt(e){return e.type===1}function dr(e){return e.type===3}function ai(e,t){let r=yn(e);return O(I(location.protocol!=="file:"),ze("search")).pipe(Ae(o=>o),v(()=>t)).subscribe(({config:o,docs:n})=>r.next({type:0,data:{config:o,docs:n,options:{suggest:B("search.suggest")}}})),r}function si(e){var l;let{selectedVersionSitemap:t,selectedVersionBaseURL:r,currentLocation:o,currentBaseURL:n}=e,i=(l=Xr(n))==null?void 0:l.pathname;if(i===void 0)return;let a=ss(o.pathname,i);if(a===void 0)return;let s=ps(t.keys());if(!t.has(s))return;let p=Xr(a,s);if(!p||!t.has(p.href))return;let c=Xr(a,r);if(c)return c.hash=o.hash,c.search=o.search,c}function Xr(e,t){try{return new URL(e,t)}catch(r){return}}function ss(e,t){if(e.startsWith(t))return e.slice(t.length)}function cs(e,t){let r=Math.min(e.length,t.length),o;for(o=0;oS)),o=r.pipe(m(n=>{let[,i]=t.base.match(/([^/]+)\/?$/);return n.find(({version:a,aliases:s})=>a===i||s.includes(i))||n[0]}));r.pipe(m(n=>new Map(n.map(i=>[`${new URL(`../${i.version}/`,t.base)}`,i]))),v(n=>h(document.body,"click").pipe(b(i=>!i.metaKey&&!i.ctrlKey),re(o),v(([i,a])=>{if(i.target instanceof Element){let s=i.target.closest("a");if(s&&!s.target&&n.has(s.href)){let p=s.href;return!i.target.closest(".md-version")&&n.get(p)===a?S:(i.preventDefault(),I(new URL(p)))}}return S}),v(i=>ur(i).pipe(m(a=>{var s;return(s=si({selectedVersionSitemap:a,selectedVersionBaseURL:i,currentLocation:ye(),currentBaseURL:t.base}))!=null?s:i})))))).subscribe(n=>lt(n,!0)),z([r,o]).subscribe(([n,i])=>{R(".md-header__topic").appendChild(Cn(n,i))}),e.pipe(v(()=>o)).subscribe(n=>{var a;let i=__md_get("__outdated",sessionStorage);if(i===null){i=!0;let s=((a=t.version)==null?void 0:a.default)||"latest";Array.isArray(s)||(s=[s]);e:for(let p of s)for(let c of n.aliases.concat(n.version))if(new RegExp(p,"i").test(c)){i=!1;break e}__md_set("__outdated",i,sessionStorage)}if(i)for(let s of ae("outdated"))s.hidden=!1})}function ls(e,{worker$:t}){let{searchParams:r}=ye();r.has("q")&&(Je("search",!0),e.value=r.get("q"),e.focus(),ze("search").pipe(Ae(i=>!i)).subscribe(()=>{let i=ye();i.searchParams.delete("q"),history.replaceState({},"",`${i}`)}));let o=et(e),n=O(t.pipe(Ae(jt)),h(e,"keyup"),o).pipe(m(()=>e.value),K());return z([n,o]).pipe(m(([i,a])=>({value:i,focus:a})),G(1))}function pi(e,{worker$:t}){let r=new g,o=r.pipe(Z(),ie(!0));z([t.pipe(Ae(jt)),r],(i,a)=>a).pipe(ee("value")).subscribe(({value:i})=>t.next({type:2,data:i})),r.pipe(ee("focus")).subscribe(({focus:i})=>{i&&Je("search",i)}),h(e.form,"reset").pipe(W(o)).subscribe(()=>e.focus());let n=R("header [for=__search]");return h(n,"click").subscribe(()=>e.focus()),ls(e,{worker$:t}).pipe(w(i=>r.next(i)),_(()=>r.complete()),m(i=>$({ref:e},i)),G(1))}function li(e,{worker$:t,query$:r}){let o=new g,n=on(e.parentElement).pipe(b(Boolean)),i=e.parentElement,a=R(":scope > :first-child",e),s=R(":scope > :last-child",e);ze("search").subscribe(l=>s.setAttribute("role",l?"list":"presentation")),o.pipe(re(r),Wr(t.pipe(Ae(jt)))).subscribe(([{items:l},{value:f}])=>{switch(l.length){case 0:a.textContent=f.length?Ee("search.result.none"):Ee("search.result.placeholder");break;case 1:a.textContent=Ee("search.result.one");break;default:let u=sr(l.length);a.textContent=Ee("search.result.other",u)}});let p=o.pipe(w(()=>s.innerHTML=""),v(({items:l})=>O(I(...l.slice(0,10)),I(...l.slice(10)).pipe(Be(4),Vr(n),v(([f])=>f)))),m(Mn),pe());return p.subscribe(l=>s.appendChild(l)),p.pipe(ne(l=>{let f=fe("details",l);return typeof f=="undefined"?S:h(f,"toggle").pipe(W(o),m(()=>f))})).subscribe(l=>{l.open===!1&&l.offsetTop<=i.scrollTop&&i.scrollTo({top:l.offsetTop})}),t.pipe(b(dr),m(({data:l})=>l)).pipe(w(l=>o.next(l)),_(()=>o.complete()),m(l=>$({ref:e},l)))}function ms(e,{query$:t}){return t.pipe(m(({value:r})=>{let o=ye();return o.hash="",r=r.replace(/\s+/g,"+").replace(/&/g,"%26").replace(/=/g,"%3D"),o.search=`q=${r}`,{url:o}}))}function mi(e,t){let r=new g,o=r.pipe(Z(),ie(!0));return r.subscribe(({url:n})=>{e.setAttribute("data-clipboard-text",e.href),e.href=`${n}`}),h(e,"click").pipe(W(o)).subscribe(n=>n.preventDefault()),ms(e,t).pipe(w(n=>r.next(n)),_(()=>r.complete()),m(n=>$({ref:e},n)))}function fi(e,{worker$:t,keyboard$:r}){let o=new g,n=Se("search-query"),i=O(h(n,"keydown"),h(n,"focus")).pipe(ve(se),m(()=>n.value),K());return o.pipe(He(i),m(([{suggest:s},p])=>{let c=p.split(/([\s-]+)/);if(s!=null&&s.length&&c[c.length-1]){let l=s[s.length-1];l.startsWith(c[c.length-1])&&(c[c.length-1]=l)}else c.length=0;return c})).subscribe(s=>e.innerHTML=s.join("").replace(/\s/g," ")),r.pipe(b(({mode:s})=>s==="search")).subscribe(s=>{switch(s.type){case"ArrowRight":e.innerText.length&&n.selectionStart===n.value.length&&(n.value=e.innerText);break}}),t.pipe(b(dr),m(({data:s})=>s)).pipe(w(s=>o.next(s)),_(()=>o.complete()),m(()=>({ref:e})))}function ui(e,{index$:t,keyboard$:r}){let o=xe();try{let n=ai(o.search,t),i=Se("search-query",e),a=Se("search-result",e);h(e,"click").pipe(b(({target:p})=>p instanceof Element&&!!p.closest("a"))).subscribe(()=>Je("search",!1)),r.pipe(b(({mode:p})=>p==="search")).subscribe(p=>{let c=Ie();switch(p.type){case"Enter":if(c===i){let l=new Map;for(let f of P(":first-child [href]",a)){let u=f.firstElementChild;l.set(f,parseFloat(u.getAttribute("data-md-score")))}if(l.size){let[[f]]=[...l].sort(([,u],[,d])=>d-u);f.click()}p.claim()}break;case"Escape":case"Tab":Je("search",!1),i.blur();break;case"ArrowUp":case"ArrowDown":if(typeof c=="undefined")i.focus();else{let l=[i,...P(":not(details) > [href], summary, details[open] [href]",a)],f=Math.max(0,(Math.max(0,l.indexOf(c))+l.length+(p.type==="ArrowUp"?-1:1))%l.length);l[f].focus()}p.claim();break;default:i!==Ie()&&i.focus()}}),r.pipe(b(({mode:p})=>p==="global")).subscribe(p=>{switch(p.type){case"f":case"s":case"/":i.focus(),i.select(),p.claim();break}});let s=pi(i,{worker$:n});return O(s,li(a,{worker$:n,query$:s})).pipe(Re(...ae("search-share",e).map(p=>mi(p,{query$:s})),...ae("search-suggest",e).map(p=>fi(p,{worker$:n,keyboard$:r}))))}catch(n){return e.hidden=!0,Ye}}function di(e,{index$:t,location$:r}){return z([t,r.pipe(Q(ye()),b(o=>!!o.searchParams.get("h")))]).pipe(m(([o,n])=>ii(o.config)(n.searchParams.get("h"))),m(o=>{var a;let n=new Map,i=document.createNodeIterator(e,NodeFilter.SHOW_TEXT);for(let s=i.nextNode();s;s=i.nextNode())if((a=s.parentElement)!=null&&a.offsetHeight){let p=s.textContent,c=o(p);c.length>p.length&&n.set(s,c)}for(let[s,p]of n){let{childNodes:c}=x("span",null,p);s.replaceWith(...Array.from(c))}return{ref:e,nodes:n}}))}function fs(e,{viewport$:t,main$:r}){let o=e.closest(".md-grid"),n=o.offsetTop-o.parentElement.offsetTop;return z([r,t]).pipe(m(([{offset:i,height:a},{offset:{y:s}}])=>(a=a+Math.min(n,Math.max(0,s-i))-n,{height:a,locked:s>=i+n})),K((i,a)=>i.height===a.height&&i.locked===a.locked))}function Zr(e,o){var n=o,{header$:t}=n,r=so(n,["header$"]);let i=R(".md-sidebar__scrollwrap",e),{y:a}=Ve(i);return C(()=>{let s=new g,p=s.pipe(Z(),ie(!0)),c=s.pipe(Me(0,me));return c.pipe(re(t)).subscribe({next([{height:l},{height:f}]){i.style.height=`${l-2*a}px`,e.style.top=`${f}px`},complete(){i.style.height="",e.style.top=""}}),c.pipe(Ae()).subscribe(()=>{for(let l of P(".md-nav__link--active[href]",e)){if(!l.clientHeight)continue;let f=l.closest(".md-sidebar__scrollwrap");if(typeof f!="undefined"){let u=l.offsetTop-f.offsetTop,{height:d}=ce(f);f.scrollTo({top:u-d/2})}}}),ue(P("label[tabindex]",e)).pipe(ne(l=>h(l,"click").pipe(ve(se),m(()=>l),W(p)))).subscribe(l=>{let f=R(`[id="${l.htmlFor}"]`);R(`[aria-labelledby="${l.id}"]`).setAttribute("aria-expanded",`${f.checked}`)}),fs(e,r).pipe(w(l=>s.next(l)),_(()=>s.complete()),m(l=>$({ref:e},l)))})}function hi(e,t){if(typeof t!="undefined"){let r=`https://api.github.com/repos/${e}/${t}`;return st(je(`${r}/releases/latest`).pipe(de(()=>S),m(o=>({version:o.tag_name})),De({})),je(r).pipe(de(()=>S),m(o=>({stars:o.stargazers_count,forks:o.forks_count})),De({}))).pipe(m(([o,n])=>$($({},o),n)))}else{let r=`https://api.github.com/users/${e}`;return je(r).pipe(m(o=>({repositories:o.public_repos})),De({}))}}function bi(e,t){let r=`https://${e}/api/v4/projects/${encodeURIComponent(t)}`;return st(je(`${r}/releases/permalink/latest`).pipe(de(()=>S),m(({tag_name:o})=>({version:o})),De({})),je(r).pipe(de(()=>S),m(({star_count:o,forks_count:n})=>({stars:o,forks:n})),De({}))).pipe(m(([o,n])=>$($({},o),n)))}function vi(e){let t=e.match(/^.+github\.com\/([^/]+)\/?([^/]+)?/i);if(t){let[,r,o]=t;return hi(r,o)}if(t=e.match(/^.+?([^/]*gitlab[^/]+)\/(.+?)\/?$/i),t){let[,r,o]=t;return bi(r,o)}return S}var us;function ds(e){return us||(us=C(()=>{let t=__md_get("__source",sessionStorage);if(t)return I(t);if(ae("consent").length){let o=__md_get("__consent");if(!(o&&o.github))return S}return vi(e.href).pipe(w(o=>__md_set("__source",o,sessionStorage)))}).pipe(de(()=>S),b(t=>Object.keys(t).length>0),m(t=>({facts:t})),G(1)))}function gi(e){let t=R(":scope > :last-child",e);return C(()=>{let r=new g;return r.subscribe(({facts:o})=>{t.appendChild(_n(o)),t.classList.add("md-source__repository--active")}),ds(e).pipe(w(o=>r.next(o)),_(()=>r.complete()),m(o=>$({ref:e},o)))})}function hs(e,{viewport$:t,header$:r}){return ge(document.body).pipe(v(()=>mr(e,{header$:r,viewport$:t})),m(({offset:{y:o}})=>({hidden:o>=10})),ee("hidden"))}function yi(e,t){return C(()=>{let r=new g;return r.subscribe({next({hidden:o}){e.hidden=o},complete(){e.hidden=!1}}),(B("navigation.tabs.sticky")?I({hidden:!1}):hs(e,t)).pipe(w(o=>r.next(o)),_(()=>r.complete()),m(o=>$({ref:e},o)))})}function bs(e,{viewport$:t,header$:r}){let o=new Map,n=P(".md-nav__link",e);for(let s of n){let p=decodeURIComponent(s.hash.substring(1)),c=fe(`[id="${p}"]`);typeof c!="undefined"&&o.set(s,c)}let i=r.pipe(ee("height"),m(({height:s})=>{let p=Se("main"),c=R(":scope > :first-child",p);return s+.8*(c.offsetTop-p.offsetTop)}),pe());return ge(document.body).pipe(ee("height"),v(s=>C(()=>{let p=[];return I([...o].reduce((c,[l,f])=>{for(;p.length&&o.get(p[p.length-1]).tagName>=f.tagName;)p.pop();let u=f.offsetTop;for(;!u&&f.parentElement;)f=f.parentElement,u=f.offsetTop;let d=f.offsetParent;for(;d;d=d.offsetParent)u+=d.offsetTop;return c.set([...p=[...p,l]].reverse(),u)},new Map))}).pipe(m(p=>new Map([...p].sort(([,c],[,l])=>c-l))),He(i),v(([p,c])=>t.pipe(Fr(([l,f],{offset:{y:u},size:d})=>{let y=u+d.height>=Math.floor(s.height);for(;f.length;){let[,L]=f[0];if(L-c=u&&!y)f=[l.pop(),...f];else break}return[l,f]},[[],[...p]]),K((l,f)=>l[0]===f[0]&&l[1]===f[1])))))).pipe(m(([s,p])=>({prev:s.map(([c])=>c),next:p.map(([c])=>c)})),Q({prev:[],next:[]}),Be(2,1),m(([s,p])=>s.prev.length{let i=new g,a=i.pipe(Z(),ie(!0));if(i.subscribe(({prev:s,next:p})=>{for(let[c]of p)c.classList.remove("md-nav__link--passed"),c.classList.remove("md-nav__link--active");for(let[c,[l]]of s.entries())l.classList.add("md-nav__link--passed"),l.classList.toggle("md-nav__link--active",c===s.length-1)}),B("toc.follow")){let s=O(t.pipe(_e(1),m(()=>{})),t.pipe(_e(250),m(()=>"smooth")));i.pipe(b(({prev:p})=>p.length>0),He(o.pipe(ve(se))),re(s)).subscribe(([[{prev:p}],c])=>{let[l]=p[p.length-1];if(l.offsetHeight){let f=cr(l);if(typeof f!="undefined"){let u=l.offsetTop-f.offsetTop,{height:d}=ce(f);f.scrollTo({top:u-d/2,behavior:c})}}})}return B("navigation.tracking")&&t.pipe(W(a),ee("offset"),_e(250),Ce(1),W(n.pipe(Ce(1))),ct({delay:250}),re(i)).subscribe(([,{prev:s}])=>{let p=ye(),c=s[s.length-1];if(c&&c.length){let[l]=c,{hash:f}=new URL(l.href);p.hash!==f&&(p.hash=f,history.replaceState({},"",`${p}`))}else p.hash="",history.replaceState({},"",`${p}`)}),bs(e,{viewport$:t,header$:r}).pipe(w(s=>i.next(s)),_(()=>i.complete()),m(s=>$({ref:e},s)))})}function vs(e,{viewport$:t,main$:r,target$:o}){let n=t.pipe(m(({offset:{y:a}})=>a),Be(2,1),m(([a,s])=>a>s&&s>0),K()),i=r.pipe(m(({active:a})=>a));return z([i,n]).pipe(m(([a,s])=>!(a&&s)),K(),W(o.pipe(Ce(1))),ie(!0),ct({delay:250}),m(a=>({hidden:a})))}function Ei(e,{viewport$:t,header$:r,main$:o,target$:n}){let i=new g,a=i.pipe(Z(),ie(!0));return i.subscribe({next({hidden:s}){e.hidden=s,s?(e.setAttribute("tabindex","-1"),e.blur()):e.removeAttribute("tabindex")},complete(){e.style.top="",e.hidden=!0,e.removeAttribute("tabindex")}}),r.pipe(W(a),ee("height")).subscribe(({height:s})=>{e.style.top=`${s+16}px`}),h(e,"click").subscribe(s=>{s.preventDefault(),window.scrollTo({top:0})}),vs(e,{viewport$:t,main$:o,target$:n}).pipe(w(s=>i.next(s)),_(()=>i.complete()),m(s=>$({ref:e},s)))}function wi({document$:e,viewport$:t}){e.pipe(v(()=>P(".md-ellipsis")),ne(r=>tt(r).pipe(W(e.pipe(Ce(1))),b(o=>o),m(()=>r),Te(1))),b(r=>r.offsetWidth{let o=r.innerText,n=r.closest("a")||r;return n.title=o,B("content.tooltips")?mt(n,{viewport$:t}).pipe(W(e.pipe(Ce(1))),_(()=>n.removeAttribute("title"))):S})).subscribe(),B("content.tooltips")&&e.pipe(v(()=>P(".md-status")),ne(r=>mt(r,{viewport$:t}))).subscribe()}function Ti({document$:e,tablet$:t}){e.pipe(v(()=>P(".md-toggle--indeterminate")),w(r=>{r.indeterminate=!0,r.checked=!1}),ne(r=>h(r,"change").pipe(Dr(()=>r.classList.contains("md-toggle--indeterminate")),m(()=>r))),re(t)).subscribe(([r,o])=>{r.classList.remove("md-toggle--indeterminate"),o&&(r.checked=!1)})}function gs(){return/(iPad|iPhone|iPod)/.test(navigator.userAgent)}function Si({document$:e}){e.pipe(v(()=>P("[data-md-scrollfix]")),w(t=>t.removeAttribute("data-md-scrollfix")),b(gs),ne(t=>h(t,"touchstart").pipe(m(()=>t)))).subscribe(t=>{let r=t.scrollTop;r===0?t.scrollTop=1:r+t.offsetHeight===t.scrollHeight&&(t.scrollTop=r-1)})}function Oi({viewport$:e,tablet$:t}){z([ze("search"),t]).pipe(m(([r,o])=>r&&!o),v(r=>I(r).pipe(Ge(r?400:100))),re(e)).subscribe(([r,{offset:{y:o}}])=>{if(r)document.body.setAttribute("data-md-scrolllock",""),document.body.style.top=`-${o}px`;else{let n=-1*parseInt(document.body.style.top,10);document.body.removeAttribute("data-md-scrolllock"),document.body.style.top="",n&&window.scrollTo(0,n)}})}Object.entries||(Object.entries=function(e){let t=[];for(let r of Object.keys(e))t.push([r,e[r]]);return t});Object.values||(Object.values=function(e){let t=[];for(let r of Object.keys(e))t.push(e[r]);return t});typeof Element!="undefined"&&(Element.prototype.scrollTo||(Element.prototype.scrollTo=function(e,t){typeof e=="object"?(this.scrollLeft=e.left,this.scrollTop=e.top):(this.scrollLeft=e,this.scrollTop=t)}),Element.prototype.replaceWith||(Element.prototype.replaceWith=function(...e){let t=this.parentNode;if(t){e.length===0&&t.removeChild(this);for(let r=e.length-1;r>=0;r--){let o=e[r];typeof o=="string"?o=document.createTextNode(o):o.parentNode&&o.parentNode.removeChild(o),r?t.insertBefore(this.previousSibling,o):t.replaceChild(o,this)}}}));function ys(){return location.protocol==="file:"?Tt(`${new URL("search/search_index.js",eo.base)}`).pipe(m(()=>__index),G(1)):je(new URL("search/search_index.json",eo.base))}document.documentElement.classList.remove("no-js");document.documentElement.classList.add("js");var ot=Go(),Ut=sn(),Lt=ln(Ut),to=an(),Oe=gn(),hr=Pt("(min-width: 960px)"),Mi=Pt("(min-width: 1220px)"),_i=mn(),eo=xe(),Ai=document.forms.namedItem("search")?ys():Ye,ro=new g;Zn({alert$:ro});var oo=new g;B("navigation.instant")&&oi({location$:Ut,viewport$:Oe,progress$:oo}).subscribe(ot);var Li;((Li=eo.version)==null?void 0:Li.provider)==="mike"&&ci({document$:ot});O(Ut,Lt).pipe(Ge(125)).subscribe(()=>{Je("drawer",!1),Je("search",!1)});to.pipe(b(({mode:e})=>e==="global")).subscribe(e=>{switch(e.type){case"p":case",":let t=fe("link[rel=prev]");typeof t!="undefined"&<(t);break;case"n":case".":let r=fe("link[rel=next]");typeof r!="undefined"&<(r);break;case"Enter":let o=Ie();o instanceof HTMLLabelElement&&o.click()}});wi({viewport$:Oe,document$:ot});Ti({document$:ot,tablet$:hr});Si({document$:ot});Oi({viewport$:Oe,tablet$:hr});var rt=Kn(Se("header"),{viewport$:Oe}),Ft=ot.pipe(m(()=>Se("main")),v(e=>Gn(e,{viewport$:Oe,header$:rt})),G(1)),xs=O(...ae("consent").map(e=>En(e,{target$:Lt})),...ae("dialog").map(e=>qn(e,{alert$:ro})),...ae("palette").map(e=>Jn(e)),...ae("progress").map(e=>Xn(e,{progress$:oo})),...ae("search").map(e=>ui(e,{index$:Ai,keyboard$:to})),...ae("source").map(e=>gi(e))),Es=C(()=>O(...ae("announce").map(e=>xn(e)),...ae("content").map(e=>zn(e,{viewport$:Oe,target$:Lt,print$:_i})),...ae("content").map(e=>B("search.highlight")?di(e,{index$:Ai,location$:Ut}):S),...ae("header").map(e=>Yn(e,{viewport$:Oe,header$:rt,main$:Ft})),...ae("header-title").map(e=>Bn(e,{viewport$:Oe,header$:rt})),...ae("sidebar").map(e=>e.getAttribute("data-md-type")==="navigation"?Nr(Mi,()=>Zr(e,{viewport$:Oe,header$:rt,main$:Ft})):Nr(hr,()=>Zr(e,{viewport$:Oe,header$:rt,main$:Ft}))),...ae("tabs").map(e=>yi(e,{viewport$:Oe,header$:rt})),...ae("toc").map(e=>xi(e,{viewport$:Oe,header$:rt,main$:Ft,target$:Lt})),...ae("top").map(e=>Ei(e,{viewport$:Oe,header$:rt,main$:Ft,target$:Lt})))),Ci=ot.pipe(v(()=>Es),Re(xs),G(1));Ci.subscribe();window.document$=ot;window.location$=Ut;window.target$=Lt;window.keyboard$=to;window.viewport$=Oe;window.tablet$=hr;window.screen$=Mi;window.print$=_i;window.alert$=ro;window.progress$=oo;window.component$=Ci;})(); +//# sourceMappingURL=bundle.83f73b43.min.js.map + diff --git a/assets/javascripts/bundle.83f73b43.min.js.map b/assets/javascripts/bundle.83f73b43.min.js.map new file mode 100644 index 00000000..fe920b7d --- /dev/null +++ b/assets/javascripts/bundle.83f73b43.min.js.map @@ -0,0 +1,7 @@ +{ + "version": 3, + "sources": ["node_modules/focus-visible/dist/focus-visible.js", "node_modules/escape-html/index.js", "node_modules/clipboard/dist/clipboard.js", "src/templates/assets/javascripts/bundle.ts", "node_modules/tslib/tslib.es6.mjs", "node_modules/rxjs/src/internal/util/isFunction.ts", "node_modules/rxjs/src/internal/util/createErrorClass.ts", "node_modules/rxjs/src/internal/util/UnsubscriptionError.ts", "node_modules/rxjs/src/internal/util/arrRemove.ts", "node_modules/rxjs/src/internal/Subscription.ts", "node_modules/rxjs/src/internal/config.ts", "node_modules/rxjs/src/internal/scheduler/timeoutProvider.ts", "node_modules/rxjs/src/internal/util/reportUnhandledError.ts", "node_modules/rxjs/src/internal/util/noop.ts", "node_modules/rxjs/src/internal/NotificationFactories.ts", "node_modules/rxjs/src/internal/util/errorContext.ts", "node_modules/rxjs/src/internal/Subscriber.ts", "node_modules/rxjs/src/internal/symbol/observable.ts", "node_modules/rxjs/src/internal/util/identity.ts", "node_modules/rxjs/src/internal/util/pipe.ts", "node_modules/rxjs/src/internal/Observable.ts", "node_modules/rxjs/src/internal/util/lift.ts", "node_modules/rxjs/src/internal/operators/OperatorSubscriber.ts", "node_modules/rxjs/src/internal/scheduler/animationFrameProvider.ts", "node_modules/rxjs/src/internal/util/ObjectUnsubscribedError.ts", "node_modules/rxjs/src/internal/Subject.ts", "node_modules/rxjs/src/internal/BehaviorSubject.ts", "node_modules/rxjs/src/internal/scheduler/dateTimestampProvider.ts", "node_modules/rxjs/src/internal/ReplaySubject.ts", "node_modules/rxjs/src/internal/scheduler/Action.ts", "node_modules/rxjs/src/internal/scheduler/intervalProvider.ts", "node_modules/rxjs/src/internal/scheduler/AsyncAction.ts", "node_modules/rxjs/src/internal/Scheduler.ts", "node_modules/rxjs/src/internal/scheduler/AsyncScheduler.ts", "node_modules/rxjs/src/internal/scheduler/async.ts", "node_modules/rxjs/src/internal/scheduler/QueueAction.ts", "node_modules/rxjs/src/internal/scheduler/QueueScheduler.ts", "node_modules/rxjs/src/internal/scheduler/queue.ts", "node_modules/rxjs/src/internal/scheduler/AnimationFrameAction.ts", "node_modules/rxjs/src/internal/scheduler/AnimationFrameScheduler.ts", "node_modules/rxjs/src/internal/scheduler/animationFrame.ts", "node_modules/rxjs/src/internal/observable/empty.ts", "node_modules/rxjs/src/internal/util/isScheduler.ts", "node_modules/rxjs/src/internal/util/args.ts", "node_modules/rxjs/src/internal/util/isArrayLike.ts", "node_modules/rxjs/src/internal/util/isPromise.ts", "node_modules/rxjs/src/internal/util/isInteropObservable.ts", "node_modules/rxjs/src/internal/util/isAsyncIterable.ts", "node_modules/rxjs/src/internal/util/throwUnobservableError.ts", "node_modules/rxjs/src/internal/symbol/iterator.ts", "node_modules/rxjs/src/internal/util/isIterable.ts", "node_modules/rxjs/src/internal/util/isReadableStreamLike.ts", "node_modules/rxjs/src/internal/observable/innerFrom.ts", "node_modules/rxjs/src/internal/util/executeSchedule.ts", "node_modules/rxjs/src/internal/operators/observeOn.ts", "node_modules/rxjs/src/internal/operators/subscribeOn.ts", "node_modules/rxjs/src/internal/scheduled/scheduleObservable.ts", "node_modules/rxjs/src/internal/scheduled/schedulePromise.ts", "node_modules/rxjs/src/internal/scheduled/scheduleArray.ts", "node_modules/rxjs/src/internal/scheduled/scheduleIterable.ts", "node_modules/rxjs/src/internal/scheduled/scheduleAsyncIterable.ts", "node_modules/rxjs/src/internal/scheduled/scheduleReadableStreamLike.ts", "node_modules/rxjs/src/internal/scheduled/scheduled.ts", "node_modules/rxjs/src/internal/observable/from.ts", "node_modules/rxjs/src/internal/observable/of.ts", "node_modules/rxjs/src/internal/observable/throwError.ts", "node_modules/rxjs/src/internal/util/EmptyError.ts", "node_modules/rxjs/src/internal/util/isDate.ts", "node_modules/rxjs/src/internal/operators/map.ts", "node_modules/rxjs/src/internal/util/mapOneOrManyArgs.ts", "node_modules/rxjs/src/internal/util/argsArgArrayOrObject.ts", "node_modules/rxjs/src/internal/util/createObject.ts", "node_modules/rxjs/src/internal/observable/combineLatest.ts", "node_modules/rxjs/src/internal/operators/mergeInternals.ts", "node_modules/rxjs/src/internal/operators/mergeMap.ts", "node_modules/rxjs/src/internal/operators/mergeAll.ts", "node_modules/rxjs/src/internal/operators/concatAll.ts", "node_modules/rxjs/src/internal/observable/concat.ts", "node_modules/rxjs/src/internal/observable/defer.ts", "node_modules/rxjs/src/internal/observable/fromEvent.ts", "node_modules/rxjs/src/internal/observable/fromEventPattern.ts", "node_modules/rxjs/src/internal/observable/timer.ts", "node_modules/rxjs/src/internal/observable/merge.ts", "node_modules/rxjs/src/internal/observable/never.ts", "node_modules/rxjs/src/internal/util/argsOrArgArray.ts", "node_modules/rxjs/src/internal/operators/filter.ts", "node_modules/rxjs/src/internal/observable/zip.ts", "node_modules/rxjs/src/internal/operators/audit.ts", "node_modules/rxjs/src/internal/operators/auditTime.ts", "node_modules/rxjs/src/internal/operators/bufferCount.ts", "node_modules/rxjs/src/internal/operators/catchError.ts", "node_modules/rxjs/src/internal/operators/scanInternals.ts", "node_modules/rxjs/src/internal/operators/combineLatest.ts", "node_modules/rxjs/src/internal/operators/combineLatestWith.ts", "node_modules/rxjs/src/internal/operators/debounce.ts", "node_modules/rxjs/src/internal/operators/debounceTime.ts", "node_modules/rxjs/src/internal/operators/defaultIfEmpty.ts", "node_modules/rxjs/src/internal/operators/take.ts", "node_modules/rxjs/src/internal/operators/ignoreElements.ts", "node_modules/rxjs/src/internal/operators/mapTo.ts", "node_modules/rxjs/src/internal/operators/delayWhen.ts", "node_modules/rxjs/src/internal/operators/delay.ts", "node_modules/rxjs/src/internal/operators/distinctUntilChanged.ts", "node_modules/rxjs/src/internal/operators/distinctUntilKeyChanged.ts", "node_modules/rxjs/src/internal/operators/throwIfEmpty.ts", "node_modules/rxjs/src/internal/operators/endWith.ts", "node_modules/rxjs/src/internal/operators/finalize.ts", "node_modules/rxjs/src/internal/operators/first.ts", "node_modules/rxjs/src/internal/operators/takeLast.ts", "node_modules/rxjs/src/internal/operators/merge.ts", "node_modules/rxjs/src/internal/operators/mergeWith.ts", "node_modules/rxjs/src/internal/operators/repeat.ts", "node_modules/rxjs/src/internal/operators/scan.ts", "node_modules/rxjs/src/internal/operators/share.ts", "node_modules/rxjs/src/internal/operators/shareReplay.ts", "node_modules/rxjs/src/internal/operators/skip.ts", "node_modules/rxjs/src/internal/operators/skipUntil.ts", "node_modules/rxjs/src/internal/operators/startWith.ts", "node_modules/rxjs/src/internal/operators/switchMap.ts", "node_modules/rxjs/src/internal/operators/takeUntil.ts", "node_modules/rxjs/src/internal/operators/takeWhile.ts", "node_modules/rxjs/src/internal/operators/tap.ts", "node_modules/rxjs/src/internal/operators/throttle.ts", "node_modules/rxjs/src/internal/operators/throttleTime.ts", "node_modules/rxjs/src/internal/operators/withLatestFrom.ts", "node_modules/rxjs/src/internal/operators/zip.ts", "node_modules/rxjs/src/internal/operators/zipWith.ts", "src/templates/assets/javascripts/browser/document/index.ts", "src/templates/assets/javascripts/browser/element/_/index.ts", "src/templates/assets/javascripts/browser/element/focus/index.ts", "src/templates/assets/javascripts/browser/element/hover/index.ts", "src/templates/assets/javascripts/utilities/h/index.ts", "src/templates/assets/javascripts/utilities/round/index.ts", "src/templates/assets/javascripts/browser/script/index.ts", "src/templates/assets/javascripts/browser/element/size/_/index.ts", "src/templates/assets/javascripts/browser/element/size/content/index.ts", "src/templates/assets/javascripts/browser/element/offset/_/index.ts", "src/templates/assets/javascripts/browser/element/offset/content/index.ts", "src/templates/assets/javascripts/browser/element/visibility/index.ts", "src/templates/assets/javascripts/browser/toggle/index.ts", "src/templates/assets/javascripts/browser/keyboard/index.ts", "src/templates/assets/javascripts/browser/location/_/index.ts", "src/templates/assets/javascripts/browser/location/hash/index.ts", "src/templates/assets/javascripts/browser/media/index.ts", "src/templates/assets/javascripts/browser/request/index.ts", "src/templates/assets/javascripts/browser/viewport/offset/index.ts", "src/templates/assets/javascripts/browser/viewport/size/index.ts", "src/templates/assets/javascripts/browser/viewport/_/index.ts", "src/templates/assets/javascripts/browser/viewport/at/index.ts", "src/templates/assets/javascripts/browser/worker/index.ts", "src/templates/assets/javascripts/_/index.ts", "src/templates/assets/javascripts/components/_/index.ts", "src/templates/assets/javascripts/components/announce/index.ts", "src/templates/assets/javascripts/components/consent/index.ts", "src/templates/assets/javascripts/templates/tooltip/index.tsx", "src/templates/assets/javascripts/templates/annotation/index.tsx", "src/templates/assets/javascripts/templates/clipboard/index.tsx", "src/templates/assets/javascripts/templates/search/index.tsx", "src/templates/assets/javascripts/templates/source/index.tsx", "src/templates/assets/javascripts/templates/tabbed/index.tsx", "src/templates/assets/javascripts/templates/table/index.tsx", "src/templates/assets/javascripts/templates/version/index.tsx", "src/templates/assets/javascripts/components/tooltip2/index.ts", "src/templates/assets/javascripts/components/content/annotation/_/index.ts", "src/templates/assets/javascripts/components/content/annotation/list/index.ts", "src/templates/assets/javascripts/components/content/annotation/block/index.ts", "src/templates/assets/javascripts/components/content/code/_/index.ts", "src/templates/assets/javascripts/components/content/details/index.ts", "src/templates/assets/javascripts/components/content/mermaid/index.css", "src/templates/assets/javascripts/components/content/mermaid/index.ts", "src/templates/assets/javascripts/components/content/table/index.ts", "src/templates/assets/javascripts/components/content/tabs/index.ts", "src/templates/assets/javascripts/components/content/_/index.ts", "src/templates/assets/javascripts/components/dialog/index.ts", "src/templates/assets/javascripts/components/tooltip/index.ts", "src/templates/assets/javascripts/components/header/_/index.ts", "src/templates/assets/javascripts/components/header/title/index.ts", "src/templates/assets/javascripts/components/main/index.ts", "src/templates/assets/javascripts/components/palette/index.ts", "src/templates/assets/javascripts/components/progress/index.ts", "src/templates/assets/javascripts/integrations/clipboard/index.ts", "src/templates/assets/javascripts/integrations/sitemap/index.ts", "src/templates/assets/javascripts/integrations/instant/index.ts", "src/templates/assets/javascripts/integrations/search/highlighter/index.ts", "src/templates/assets/javascripts/integrations/search/worker/message/index.ts", "src/templates/assets/javascripts/integrations/search/worker/_/index.ts", "src/templates/assets/javascripts/integrations/version/findurl/index.ts", "src/templates/assets/javascripts/integrations/version/index.ts", "src/templates/assets/javascripts/components/search/query/index.ts", "src/templates/assets/javascripts/components/search/result/index.ts", "src/templates/assets/javascripts/components/search/share/index.ts", "src/templates/assets/javascripts/components/search/suggest/index.ts", "src/templates/assets/javascripts/components/search/_/index.ts", "src/templates/assets/javascripts/components/search/highlight/index.ts", "src/templates/assets/javascripts/components/sidebar/index.ts", "src/templates/assets/javascripts/components/source/facts/github/index.ts", "src/templates/assets/javascripts/components/source/facts/gitlab/index.ts", "src/templates/assets/javascripts/components/source/facts/_/index.ts", "src/templates/assets/javascripts/components/source/_/index.ts", "src/templates/assets/javascripts/components/tabs/index.ts", "src/templates/assets/javascripts/components/toc/index.ts", "src/templates/assets/javascripts/components/top/index.ts", "src/templates/assets/javascripts/patches/ellipsis/index.ts", "src/templates/assets/javascripts/patches/indeterminate/index.ts", "src/templates/assets/javascripts/patches/scrollfix/index.ts", "src/templates/assets/javascripts/patches/scrolllock/index.ts", "src/templates/assets/javascripts/polyfills/index.ts"], + "sourcesContent": ["(function (global, factory) {\n typeof exports === 'object' && typeof module !== 'undefined' ? factory() :\n typeof define === 'function' && define.amd ? define(factory) :\n (factory());\n}(this, (function () { 'use strict';\n\n /**\n * Applies the :focus-visible polyfill at the given scope.\n * A scope in this case is either the top-level Document or a Shadow Root.\n *\n * @param {(Document|ShadowRoot)} scope\n * @see https://github.com/WICG/focus-visible\n */\n function applyFocusVisiblePolyfill(scope) {\n var hadKeyboardEvent = true;\n var hadFocusVisibleRecently = false;\n var hadFocusVisibleRecentlyTimeout = null;\n\n var inputTypesAllowlist = {\n text: true,\n search: true,\n url: true,\n tel: true,\n email: true,\n password: true,\n number: true,\n date: true,\n month: true,\n week: true,\n time: true,\n datetime: true,\n 'datetime-local': true\n };\n\n /**\n * Helper function for legacy browsers and iframes which sometimes focus\n * elements like document, body, and non-interactive SVG.\n * @param {Element} el\n */\n function isValidFocusTarget(el) {\n if (\n el &&\n el !== document &&\n el.nodeName !== 'HTML' &&\n el.nodeName !== 'BODY' &&\n 'classList' in el &&\n 'contains' in el.classList\n ) {\n return true;\n }\n return false;\n }\n\n /**\n * Computes whether the given element should automatically trigger the\n * `focus-visible` class being added, i.e. whether it should always match\n * `:focus-visible` when focused.\n * @param {Element} el\n * @return {boolean}\n */\n function focusTriggersKeyboardModality(el) {\n var type = el.type;\n var tagName = el.tagName;\n\n if (tagName === 'INPUT' && inputTypesAllowlist[type] && !el.readOnly) {\n return true;\n }\n\n if (tagName === 'TEXTAREA' && !el.readOnly) {\n return true;\n }\n\n if (el.isContentEditable) {\n return true;\n }\n\n return false;\n }\n\n /**\n * Add the `focus-visible` class to the given element if it was not added by\n * the author.\n * @param {Element} el\n */\n function addFocusVisibleClass(el) {\n if (el.classList.contains('focus-visible')) {\n return;\n }\n el.classList.add('focus-visible');\n el.setAttribute('data-focus-visible-added', '');\n }\n\n /**\n * Remove the `focus-visible` class from the given element if it was not\n * originally added by the author.\n * @param {Element} el\n */\n function removeFocusVisibleClass(el) {\n if (!el.hasAttribute('data-focus-visible-added')) {\n return;\n }\n el.classList.remove('focus-visible');\n el.removeAttribute('data-focus-visible-added');\n }\n\n /**\n * If the most recent user interaction was via the keyboard;\n * and the key press did not include a meta, alt/option, or control key;\n * then the modality is keyboard. Otherwise, the modality is not keyboard.\n * Apply `focus-visible` to any current active element and keep track\n * of our keyboard modality state with `hadKeyboardEvent`.\n * @param {KeyboardEvent} e\n */\n function onKeyDown(e) {\n if (e.metaKey || e.altKey || e.ctrlKey) {\n return;\n }\n\n if (isValidFocusTarget(scope.activeElement)) {\n addFocusVisibleClass(scope.activeElement);\n }\n\n hadKeyboardEvent = true;\n }\n\n /**\n * If at any point a user clicks with a pointing device, ensure that we change\n * the modality away from keyboard.\n * This avoids the situation where a user presses a key on an already focused\n * element, and then clicks on a different element, focusing it with a\n * pointing device, while we still think we're in keyboard modality.\n * @param {Event} e\n */\n function onPointerDown(e) {\n hadKeyboardEvent = false;\n }\n\n /**\n * On `focus`, add the `focus-visible` class to the target if:\n * - the target received focus as a result of keyboard navigation, or\n * - the event target is an element that will likely require interaction\n * via the keyboard (e.g. a text box)\n * @param {Event} e\n */\n function onFocus(e) {\n // Prevent IE from focusing the document or HTML element.\n if (!isValidFocusTarget(e.target)) {\n return;\n }\n\n if (hadKeyboardEvent || focusTriggersKeyboardModality(e.target)) {\n addFocusVisibleClass(e.target);\n }\n }\n\n /**\n * On `blur`, remove the `focus-visible` class from the target.\n * @param {Event} e\n */\n function onBlur(e) {\n if (!isValidFocusTarget(e.target)) {\n return;\n }\n\n if (\n e.target.classList.contains('focus-visible') ||\n e.target.hasAttribute('data-focus-visible-added')\n ) {\n // To detect a tab/window switch, we look for a blur event followed\n // rapidly by a visibility change.\n // If we don't see a visibility change within 100ms, it's probably a\n // regular focus change.\n hadFocusVisibleRecently = true;\n window.clearTimeout(hadFocusVisibleRecentlyTimeout);\n hadFocusVisibleRecentlyTimeout = window.setTimeout(function() {\n hadFocusVisibleRecently = false;\n }, 100);\n removeFocusVisibleClass(e.target);\n }\n }\n\n /**\n * If the user changes tabs, keep track of whether or not the previously\n * focused element had .focus-visible.\n * @param {Event} e\n */\n function onVisibilityChange(e) {\n if (document.visibilityState === 'hidden') {\n // If the tab becomes active again, the browser will handle calling focus\n // on the element (Safari actually calls it twice).\n // If this tab change caused a blur on an element with focus-visible,\n // re-apply the class when the user switches back to the tab.\n if (hadFocusVisibleRecently) {\n hadKeyboardEvent = true;\n }\n addInitialPointerMoveListeners();\n }\n }\n\n /**\n * Add a group of listeners to detect usage of any pointing devices.\n * These listeners will be added when the polyfill first loads, and anytime\n * the window is blurred, so that they are active when the window regains\n * focus.\n */\n function addInitialPointerMoveListeners() {\n document.addEventListener('mousemove', onInitialPointerMove);\n document.addEventListener('mousedown', onInitialPointerMove);\n document.addEventListener('mouseup', onInitialPointerMove);\n document.addEventListener('pointermove', onInitialPointerMove);\n document.addEventListener('pointerdown', onInitialPointerMove);\n document.addEventListener('pointerup', onInitialPointerMove);\n document.addEventListener('touchmove', onInitialPointerMove);\n document.addEventListener('touchstart', onInitialPointerMove);\n document.addEventListener('touchend', onInitialPointerMove);\n }\n\n function removeInitialPointerMoveListeners() {\n document.removeEventListener('mousemove', onInitialPointerMove);\n document.removeEventListener('mousedown', onInitialPointerMove);\n document.removeEventListener('mouseup', onInitialPointerMove);\n document.removeEventListener('pointermove', onInitialPointerMove);\n document.removeEventListener('pointerdown', onInitialPointerMove);\n document.removeEventListener('pointerup', onInitialPointerMove);\n document.removeEventListener('touchmove', onInitialPointerMove);\n document.removeEventListener('touchstart', onInitialPointerMove);\n document.removeEventListener('touchend', onInitialPointerMove);\n }\n\n /**\n * When the polfyill first loads, assume the user is in keyboard modality.\n * If any event is received from a pointing device (e.g. mouse, pointer,\n * touch), turn off keyboard modality.\n * This accounts for situations where focus enters the page from the URL bar.\n * @param {Event} e\n */\n function onInitialPointerMove(e) {\n // Work around a Safari quirk that fires a mousemove on whenever the\n // window blurs, even if you're tabbing out of the page. \u00AF\\_(\u30C4)_/\u00AF\n if (e.target.nodeName && e.target.nodeName.toLowerCase() === 'html') {\n return;\n }\n\n hadKeyboardEvent = false;\n removeInitialPointerMoveListeners();\n }\n\n // For some kinds of state, we are interested in changes at the global scope\n // only. For example, global pointer input, global key presses and global\n // visibility change should affect the state at every scope:\n document.addEventListener('keydown', onKeyDown, true);\n document.addEventListener('mousedown', onPointerDown, true);\n document.addEventListener('pointerdown', onPointerDown, true);\n document.addEventListener('touchstart', onPointerDown, true);\n document.addEventListener('visibilitychange', onVisibilityChange, true);\n\n addInitialPointerMoveListeners();\n\n // For focus and blur, we specifically care about state changes in the local\n // scope. This is because focus / blur events that originate from within a\n // shadow root are not re-dispatched from the host element if it was already\n // the active element in its own scope:\n scope.addEventListener('focus', onFocus, true);\n scope.addEventListener('blur', onBlur, true);\n\n // We detect that a node is a ShadowRoot by ensuring that it is a\n // DocumentFragment and also has a host property. This check covers native\n // implementation and polyfill implementation transparently. If we only cared\n // about the native implementation, we could just check if the scope was\n // an instance of a ShadowRoot.\n if (scope.nodeType === Node.DOCUMENT_FRAGMENT_NODE && scope.host) {\n // Since a ShadowRoot is a special kind of DocumentFragment, it does not\n // have a root element to add a class to. So, we add this attribute to the\n // host element instead:\n scope.host.setAttribute('data-js-focus-visible', '');\n } else if (scope.nodeType === Node.DOCUMENT_NODE) {\n document.documentElement.classList.add('js-focus-visible');\n document.documentElement.setAttribute('data-js-focus-visible', '');\n }\n }\n\n // It is important to wrap all references to global window and document in\n // these checks to support server-side rendering use cases\n // @see https://github.com/WICG/focus-visible/issues/199\n if (typeof window !== 'undefined' && typeof document !== 'undefined') {\n // Make the polyfill helper globally available. This can be used as a signal\n // to interested libraries that wish to coordinate with the polyfill for e.g.,\n // applying the polyfill to a shadow root:\n window.applyFocusVisiblePolyfill = applyFocusVisiblePolyfill;\n\n // Notify interested libraries of the polyfill's presence, in case the\n // polyfill was loaded lazily:\n var event;\n\n try {\n event = new CustomEvent('focus-visible-polyfill-ready');\n } catch (error) {\n // IE11 does not support using CustomEvent as a constructor directly:\n event = document.createEvent('CustomEvent');\n event.initCustomEvent('focus-visible-polyfill-ready', false, false, {});\n }\n\n window.dispatchEvent(event);\n }\n\n if (typeof document !== 'undefined') {\n // Apply the polyfill to the global document, so that no JavaScript\n // coordination is required to use the polyfill in the top-level document:\n applyFocusVisiblePolyfill(document);\n }\n\n})));\n", "/*!\n * escape-html\n * Copyright(c) 2012-2013 TJ Holowaychuk\n * Copyright(c) 2015 Andreas Lubbe\n * Copyright(c) 2015 Tiancheng \"Timothy\" Gu\n * MIT Licensed\n */\n\n'use strict';\n\n/**\n * Module variables.\n * @private\n */\n\nvar matchHtmlRegExp = /[\"'&<>]/;\n\n/**\n * Module exports.\n * @public\n */\n\nmodule.exports = escapeHtml;\n\n/**\n * Escape special characters in the given string of html.\n *\n * @param {string} string The string to escape for inserting into HTML\n * @return {string}\n * @public\n */\n\nfunction escapeHtml(string) {\n var str = '' + string;\n var match = matchHtmlRegExp.exec(str);\n\n if (!match) {\n return str;\n }\n\n var escape;\n var html = '';\n var index = 0;\n var lastIndex = 0;\n\n for (index = match.index; index < str.length; index++) {\n switch (str.charCodeAt(index)) {\n case 34: // \"\n escape = '"';\n break;\n case 38: // &\n escape = '&';\n break;\n case 39: // '\n escape = ''';\n break;\n case 60: // <\n escape = '<';\n break;\n case 62: // >\n escape = '>';\n break;\n default:\n continue;\n }\n\n if (lastIndex !== index) {\n html += str.substring(lastIndex, index);\n }\n\n lastIndex = index + 1;\n html += escape;\n }\n\n return lastIndex !== index\n ? html + str.substring(lastIndex, index)\n : html;\n}\n", "/*!\n * clipboard.js v2.0.11\n * https://clipboardjs.com/\n *\n * Licensed MIT \u00A9 Zeno Rocha\n */\n(function webpackUniversalModuleDefinition(root, factory) {\n\tif(typeof exports === 'object' && typeof module === 'object')\n\t\tmodule.exports = factory();\n\telse if(typeof define === 'function' && define.amd)\n\t\tdefine([], factory);\n\telse if(typeof exports === 'object')\n\t\texports[\"ClipboardJS\"] = factory();\n\telse\n\t\troot[\"ClipboardJS\"] = factory();\n})(this, function() {\nreturn /******/ (function() { // webpackBootstrap\n/******/ \tvar __webpack_modules__ = ({\n\n/***/ 686:\n/***/ (function(__unused_webpack_module, __webpack_exports__, __webpack_require__) {\n\n\"use strict\";\n\n// EXPORTS\n__webpack_require__.d(__webpack_exports__, {\n \"default\": function() { return /* binding */ clipboard; }\n});\n\n// EXTERNAL MODULE: ./node_modules/tiny-emitter/index.js\nvar tiny_emitter = __webpack_require__(279);\nvar tiny_emitter_default = /*#__PURE__*/__webpack_require__.n(tiny_emitter);\n// EXTERNAL MODULE: ./node_modules/good-listener/src/listen.js\nvar listen = __webpack_require__(370);\nvar listen_default = /*#__PURE__*/__webpack_require__.n(listen);\n// EXTERNAL MODULE: ./node_modules/select/src/select.js\nvar src_select = __webpack_require__(817);\nvar select_default = /*#__PURE__*/__webpack_require__.n(src_select);\n;// CONCATENATED MODULE: ./src/common/command.js\n/**\n * Executes a given operation type.\n * @param {String} type\n * @return {Boolean}\n */\nfunction command(type) {\n try {\n return document.execCommand(type);\n } catch (err) {\n return false;\n }\n}\n;// CONCATENATED MODULE: ./src/actions/cut.js\n\n\n/**\n * Cut action wrapper.\n * @param {String|HTMLElement} target\n * @return {String}\n */\n\nvar ClipboardActionCut = function ClipboardActionCut(target) {\n var selectedText = select_default()(target);\n command('cut');\n return selectedText;\n};\n\n/* harmony default export */ var actions_cut = (ClipboardActionCut);\n;// CONCATENATED MODULE: ./src/common/create-fake-element.js\n/**\n * Creates a fake textarea element with a value.\n * @param {String} value\n * @return {HTMLElement}\n */\nfunction createFakeElement(value) {\n var isRTL = document.documentElement.getAttribute('dir') === 'rtl';\n var fakeElement = document.createElement('textarea'); // Prevent zooming on iOS\n\n fakeElement.style.fontSize = '12pt'; // Reset box model\n\n fakeElement.style.border = '0';\n fakeElement.style.padding = '0';\n fakeElement.style.margin = '0'; // Move element out of screen horizontally\n\n fakeElement.style.position = 'absolute';\n fakeElement.style[isRTL ? 'right' : 'left'] = '-9999px'; // Move element to the same position vertically\n\n var yPosition = window.pageYOffset || document.documentElement.scrollTop;\n fakeElement.style.top = \"\".concat(yPosition, \"px\");\n fakeElement.setAttribute('readonly', '');\n fakeElement.value = value;\n return fakeElement;\n}\n;// CONCATENATED MODULE: ./src/actions/copy.js\n\n\n\n/**\n * Create fake copy action wrapper using a fake element.\n * @param {String} target\n * @param {Object} options\n * @return {String}\n */\n\nvar fakeCopyAction = function fakeCopyAction(value, options) {\n var fakeElement = createFakeElement(value);\n options.container.appendChild(fakeElement);\n var selectedText = select_default()(fakeElement);\n command('copy');\n fakeElement.remove();\n return selectedText;\n};\n/**\n * Copy action wrapper.\n * @param {String|HTMLElement} target\n * @param {Object} options\n * @return {String}\n */\n\n\nvar ClipboardActionCopy = function ClipboardActionCopy(target) {\n var options = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {\n container: document.body\n };\n var selectedText = '';\n\n if (typeof target === 'string') {\n selectedText = fakeCopyAction(target, options);\n } else if (target instanceof HTMLInputElement && !['text', 'search', 'url', 'tel', 'password'].includes(target === null || target === void 0 ? void 0 : target.type)) {\n // If input type doesn't support `setSelectionRange`. Simulate it. https://developer.mozilla.org/en-US/docs/Web/API/HTMLInputElement/setSelectionRange\n selectedText = fakeCopyAction(target.value, options);\n } else {\n selectedText = select_default()(target);\n command('copy');\n }\n\n return selectedText;\n};\n\n/* harmony default export */ var actions_copy = (ClipboardActionCopy);\n;// CONCATENATED MODULE: ./src/actions/default.js\nfunction _typeof(obj) { \"@babel/helpers - typeof\"; if (typeof Symbol === \"function\" && typeof Symbol.iterator === \"symbol\") { _typeof = function _typeof(obj) { return typeof obj; }; } else { _typeof = function _typeof(obj) { return obj && typeof Symbol === \"function\" && obj.constructor === Symbol && obj !== Symbol.prototype ? \"symbol\" : typeof obj; }; } return _typeof(obj); }\n\n\n\n/**\n * Inner function which performs selection from either `text` or `target`\n * properties and then executes copy or cut operations.\n * @param {Object} options\n */\n\nvar ClipboardActionDefault = function ClipboardActionDefault() {\n var options = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : {};\n // Defines base properties passed from constructor.\n var _options$action = options.action,\n action = _options$action === void 0 ? 'copy' : _options$action,\n container = options.container,\n target = options.target,\n text = options.text; // Sets the `action` to be performed which can be either 'copy' or 'cut'.\n\n if (action !== 'copy' && action !== 'cut') {\n throw new Error('Invalid \"action\" value, use either \"copy\" or \"cut\"');\n } // Sets the `target` property using an element that will be have its content copied.\n\n\n if (target !== undefined) {\n if (target && _typeof(target) === 'object' && target.nodeType === 1) {\n if (action === 'copy' && target.hasAttribute('disabled')) {\n throw new Error('Invalid \"target\" attribute. Please use \"readonly\" instead of \"disabled\" attribute');\n }\n\n if (action === 'cut' && (target.hasAttribute('readonly') || target.hasAttribute('disabled'))) {\n throw new Error('Invalid \"target\" attribute. You can\\'t cut text from elements with \"readonly\" or \"disabled\" attributes');\n }\n } else {\n throw new Error('Invalid \"target\" value, use a valid Element');\n }\n } // Define selection strategy based on `text` property.\n\n\n if (text) {\n return actions_copy(text, {\n container: container\n });\n } // Defines which selection strategy based on `target` property.\n\n\n if (target) {\n return action === 'cut' ? actions_cut(target) : actions_copy(target, {\n container: container\n });\n }\n};\n\n/* harmony default export */ var actions_default = (ClipboardActionDefault);\n;// CONCATENATED MODULE: ./src/clipboard.js\nfunction clipboard_typeof(obj) { \"@babel/helpers - typeof\"; if (typeof Symbol === \"function\" && typeof Symbol.iterator === \"symbol\") { clipboard_typeof = function _typeof(obj) { return typeof obj; }; } else { clipboard_typeof = function _typeof(obj) { return obj && typeof Symbol === \"function\" && obj.constructor === Symbol && obj !== Symbol.prototype ? \"symbol\" : typeof obj; }; } return clipboard_typeof(obj); }\n\nfunction _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError(\"Cannot call a class as a function\"); } }\n\nfunction _defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if (\"value\" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } }\n\nfunction _createClass(Constructor, protoProps, staticProps) { if (protoProps) _defineProperties(Constructor.prototype, protoProps); if (staticProps) _defineProperties(Constructor, staticProps); return Constructor; }\n\nfunction _inherits(subClass, superClass) { if (typeof superClass !== \"function\" && superClass !== null) { throw new TypeError(\"Super expression must either be null or a function\"); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, writable: true, configurable: true } }); if (superClass) _setPrototypeOf(subClass, superClass); }\n\nfunction _setPrototypeOf(o, p) { _setPrototypeOf = Object.setPrototypeOf || function _setPrototypeOf(o, p) { o.__proto__ = p; return o; }; return _setPrototypeOf(o, p); }\n\nfunction _createSuper(Derived) { var hasNativeReflectConstruct = _isNativeReflectConstruct(); return function _createSuperInternal() { var Super = _getPrototypeOf(Derived), result; if (hasNativeReflectConstruct) { var NewTarget = _getPrototypeOf(this).constructor; result = Reflect.construct(Super, arguments, NewTarget); } else { result = Super.apply(this, arguments); } return _possibleConstructorReturn(this, result); }; }\n\nfunction _possibleConstructorReturn(self, call) { if (call && (clipboard_typeof(call) === \"object\" || typeof call === \"function\")) { return call; } return _assertThisInitialized(self); }\n\nfunction _assertThisInitialized(self) { if (self === void 0) { throw new ReferenceError(\"this hasn't been initialised - super() hasn't been called\"); } return self; }\n\nfunction _isNativeReflectConstruct() { if (typeof Reflect === \"undefined\" || !Reflect.construct) return false; if (Reflect.construct.sham) return false; if (typeof Proxy === \"function\") return true; try { Date.prototype.toString.call(Reflect.construct(Date, [], function () {})); return true; } catch (e) { return false; } }\n\nfunction _getPrototypeOf(o) { _getPrototypeOf = Object.setPrototypeOf ? Object.getPrototypeOf : function _getPrototypeOf(o) { return o.__proto__ || Object.getPrototypeOf(o); }; return _getPrototypeOf(o); }\n\n\n\n\n\n\n/**\n * Helper function to retrieve attribute value.\n * @param {String} suffix\n * @param {Element} element\n */\n\nfunction getAttributeValue(suffix, element) {\n var attribute = \"data-clipboard-\".concat(suffix);\n\n if (!element.hasAttribute(attribute)) {\n return;\n }\n\n return element.getAttribute(attribute);\n}\n/**\n * Base class which takes one or more elements, adds event listeners to them,\n * and instantiates a new `ClipboardAction` on each click.\n */\n\n\nvar Clipboard = /*#__PURE__*/function (_Emitter) {\n _inherits(Clipboard, _Emitter);\n\n var _super = _createSuper(Clipboard);\n\n /**\n * @param {String|HTMLElement|HTMLCollection|NodeList} trigger\n * @param {Object} options\n */\n function Clipboard(trigger, options) {\n var _this;\n\n _classCallCheck(this, Clipboard);\n\n _this = _super.call(this);\n\n _this.resolveOptions(options);\n\n _this.listenClick(trigger);\n\n return _this;\n }\n /**\n * Defines if attributes would be resolved using internal setter functions\n * or custom functions that were passed in the constructor.\n * @param {Object} options\n */\n\n\n _createClass(Clipboard, [{\n key: \"resolveOptions\",\n value: function resolveOptions() {\n var options = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : {};\n this.action = typeof options.action === 'function' ? options.action : this.defaultAction;\n this.target = typeof options.target === 'function' ? options.target : this.defaultTarget;\n this.text = typeof options.text === 'function' ? options.text : this.defaultText;\n this.container = clipboard_typeof(options.container) === 'object' ? options.container : document.body;\n }\n /**\n * Adds a click event listener to the passed trigger.\n * @param {String|HTMLElement|HTMLCollection|NodeList} trigger\n */\n\n }, {\n key: \"listenClick\",\n value: function listenClick(trigger) {\n var _this2 = this;\n\n this.listener = listen_default()(trigger, 'click', function (e) {\n return _this2.onClick(e);\n });\n }\n /**\n * Defines a new `ClipboardAction` on each click event.\n * @param {Event} e\n */\n\n }, {\n key: \"onClick\",\n value: function onClick(e) {\n var trigger = e.delegateTarget || e.currentTarget;\n var action = this.action(trigger) || 'copy';\n var text = actions_default({\n action: action,\n container: this.container,\n target: this.target(trigger),\n text: this.text(trigger)\n }); // Fires an event based on the copy operation result.\n\n this.emit(text ? 'success' : 'error', {\n action: action,\n text: text,\n trigger: trigger,\n clearSelection: function clearSelection() {\n if (trigger) {\n trigger.focus();\n }\n\n window.getSelection().removeAllRanges();\n }\n });\n }\n /**\n * Default `action` lookup function.\n * @param {Element} trigger\n */\n\n }, {\n key: \"defaultAction\",\n value: function defaultAction(trigger) {\n return getAttributeValue('action', trigger);\n }\n /**\n * Default `target` lookup function.\n * @param {Element} trigger\n */\n\n }, {\n key: \"defaultTarget\",\n value: function defaultTarget(trigger) {\n var selector = getAttributeValue('target', trigger);\n\n if (selector) {\n return document.querySelector(selector);\n }\n }\n /**\n * Allow fire programmatically a copy action\n * @param {String|HTMLElement} target\n * @param {Object} options\n * @returns Text copied.\n */\n\n }, {\n key: \"defaultText\",\n\n /**\n * Default `text` lookup function.\n * @param {Element} trigger\n */\n value: function defaultText(trigger) {\n return getAttributeValue('text', trigger);\n }\n /**\n * Destroy lifecycle.\n */\n\n }, {\n key: \"destroy\",\n value: function destroy() {\n this.listener.destroy();\n }\n }], [{\n key: \"copy\",\n value: function copy(target) {\n var options = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {\n container: document.body\n };\n return actions_copy(target, options);\n }\n /**\n * Allow fire programmatically a cut action\n * @param {String|HTMLElement} target\n * @returns Text cutted.\n */\n\n }, {\n key: \"cut\",\n value: function cut(target) {\n return actions_cut(target);\n }\n /**\n * Returns the support of the given action, or all actions if no action is\n * given.\n * @param {String} [action]\n */\n\n }, {\n key: \"isSupported\",\n value: function isSupported() {\n var action = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : ['copy', 'cut'];\n var actions = typeof action === 'string' ? [action] : action;\n var support = !!document.queryCommandSupported;\n actions.forEach(function (action) {\n support = support && !!document.queryCommandSupported(action);\n });\n return support;\n }\n }]);\n\n return Clipboard;\n}((tiny_emitter_default()));\n\n/* harmony default export */ var clipboard = (Clipboard);\n\n/***/ }),\n\n/***/ 828:\n/***/ (function(module) {\n\nvar DOCUMENT_NODE_TYPE = 9;\n\n/**\n * A polyfill for Element.matches()\n */\nif (typeof Element !== 'undefined' && !Element.prototype.matches) {\n var proto = Element.prototype;\n\n proto.matches = proto.matchesSelector ||\n proto.mozMatchesSelector ||\n proto.msMatchesSelector ||\n proto.oMatchesSelector ||\n proto.webkitMatchesSelector;\n}\n\n/**\n * Finds the closest parent that matches a selector.\n *\n * @param {Element} element\n * @param {String} selector\n * @return {Function}\n */\nfunction closest (element, selector) {\n while (element && element.nodeType !== DOCUMENT_NODE_TYPE) {\n if (typeof element.matches === 'function' &&\n element.matches(selector)) {\n return element;\n }\n element = element.parentNode;\n }\n}\n\nmodule.exports = closest;\n\n\n/***/ }),\n\n/***/ 438:\n/***/ (function(module, __unused_webpack_exports, __webpack_require__) {\n\nvar closest = __webpack_require__(828);\n\n/**\n * Delegates event to a selector.\n *\n * @param {Element} element\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @param {Boolean} useCapture\n * @return {Object}\n */\nfunction _delegate(element, selector, type, callback, useCapture) {\n var listenerFn = listener.apply(this, arguments);\n\n element.addEventListener(type, listenerFn, useCapture);\n\n return {\n destroy: function() {\n element.removeEventListener(type, listenerFn, useCapture);\n }\n }\n}\n\n/**\n * Delegates event to a selector.\n *\n * @param {Element|String|Array} [elements]\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @param {Boolean} useCapture\n * @return {Object}\n */\nfunction delegate(elements, selector, type, callback, useCapture) {\n // Handle the regular Element usage\n if (typeof elements.addEventListener === 'function') {\n return _delegate.apply(null, arguments);\n }\n\n // Handle Element-less usage, it defaults to global delegation\n if (typeof type === 'function') {\n // Use `document` as the first parameter, then apply arguments\n // This is a short way to .unshift `arguments` without running into deoptimizations\n return _delegate.bind(null, document).apply(null, arguments);\n }\n\n // Handle Selector-based usage\n if (typeof elements === 'string') {\n elements = document.querySelectorAll(elements);\n }\n\n // Handle Array-like based usage\n return Array.prototype.map.call(elements, function (element) {\n return _delegate(element, selector, type, callback, useCapture);\n });\n}\n\n/**\n * Finds closest match and invokes callback.\n *\n * @param {Element} element\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @return {Function}\n */\nfunction listener(element, selector, type, callback) {\n return function(e) {\n e.delegateTarget = closest(e.target, selector);\n\n if (e.delegateTarget) {\n callback.call(element, e);\n }\n }\n}\n\nmodule.exports = delegate;\n\n\n/***/ }),\n\n/***/ 879:\n/***/ (function(__unused_webpack_module, exports) {\n\n/**\n * Check if argument is a HTML element.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.node = function(value) {\n return value !== undefined\n && value instanceof HTMLElement\n && value.nodeType === 1;\n};\n\n/**\n * Check if argument is a list of HTML elements.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.nodeList = function(value) {\n var type = Object.prototype.toString.call(value);\n\n return value !== undefined\n && (type === '[object NodeList]' || type === '[object HTMLCollection]')\n && ('length' in value)\n && (value.length === 0 || exports.node(value[0]));\n};\n\n/**\n * Check if argument is a string.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.string = function(value) {\n return typeof value === 'string'\n || value instanceof String;\n};\n\n/**\n * Check if argument is a function.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.fn = function(value) {\n var type = Object.prototype.toString.call(value);\n\n return type === '[object Function]';\n};\n\n\n/***/ }),\n\n/***/ 370:\n/***/ (function(module, __unused_webpack_exports, __webpack_require__) {\n\nvar is = __webpack_require__(879);\nvar delegate = __webpack_require__(438);\n\n/**\n * Validates all params and calls the right\n * listener function based on its target type.\n *\n * @param {String|HTMLElement|HTMLCollection|NodeList} target\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listen(target, type, callback) {\n if (!target && !type && !callback) {\n throw new Error('Missing required arguments');\n }\n\n if (!is.string(type)) {\n throw new TypeError('Second argument must be a String');\n }\n\n if (!is.fn(callback)) {\n throw new TypeError('Third argument must be a Function');\n }\n\n if (is.node(target)) {\n return listenNode(target, type, callback);\n }\n else if (is.nodeList(target)) {\n return listenNodeList(target, type, callback);\n }\n else if (is.string(target)) {\n return listenSelector(target, type, callback);\n }\n else {\n throw new TypeError('First argument must be a String, HTMLElement, HTMLCollection, or NodeList');\n }\n}\n\n/**\n * Adds an event listener to a HTML element\n * and returns a remove listener function.\n *\n * @param {HTMLElement} node\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listenNode(node, type, callback) {\n node.addEventListener(type, callback);\n\n return {\n destroy: function() {\n node.removeEventListener(type, callback);\n }\n }\n}\n\n/**\n * Add an event listener to a list of HTML elements\n * and returns a remove listener function.\n *\n * @param {NodeList|HTMLCollection} nodeList\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listenNodeList(nodeList, type, callback) {\n Array.prototype.forEach.call(nodeList, function(node) {\n node.addEventListener(type, callback);\n });\n\n return {\n destroy: function() {\n Array.prototype.forEach.call(nodeList, function(node) {\n node.removeEventListener(type, callback);\n });\n }\n }\n}\n\n/**\n * Add an event listener to a selector\n * and returns a remove listener function.\n *\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listenSelector(selector, type, callback) {\n return delegate(document.body, selector, type, callback);\n}\n\nmodule.exports = listen;\n\n\n/***/ }),\n\n/***/ 817:\n/***/ (function(module) {\n\nfunction select(element) {\n var selectedText;\n\n if (element.nodeName === 'SELECT') {\n element.focus();\n\n selectedText = element.value;\n }\n else if (element.nodeName === 'INPUT' || element.nodeName === 'TEXTAREA') {\n var isReadOnly = element.hasAttribute('readonly');\n\n if (!isReadOnly) {\n element.setAttribute('readonly', '');\n }\n\n element.select();\n element.setSelectionRange(0, element.value.length);\n\n if (!isReadOnly) {\n element.removeAttribute('readonly');\n }\n\n selectedText = element.value;\n }\n else {\n if (element.hasAttribute('contenteditable')) {\n element.focus();\n }\n\n var selection = window.getSelection();\n var range = document.createRange();\n\n range.selectNodeContents(element);\n selection.removeAllRanges();\n selection.addRange(range);\n\n selectedText = selection.toString();\n }\n\n return selectedText;\n}\n\nmodule.exports = select;\n\n\n/***/ }),\n\n/***/ 279:\n/***/ (function(module) {\n\nfunction E () {\n // Keep this empty so it's easier to inherit from\n // (via https://github.com/lipsmack from https://github.com/scottcorgan/tiny-emitter/issues/3)\n}\n\nE.prototype = {\n on: function (name, callback, ctx) {\n var e = this.e || (this.e = {});\n\n (e[name] || (e[name] = [])).push({\n fn: callback,\n ctx: ctx\n });\n\n return this;\n },\n\n once: function (name, callback, ctx) {\n var self = this;\n function listener () {\n self.off(name, listener);\n callback.apply(ctx, arguments);\n };\n\n listener._ = callback\n return this.on(name, listener, ctx);\n },\n\n emit: function (name) {\n var data = [].slice.call(arguments, 1);\n var evtArr = ((this.e || (this.e = {}))[name] || []).slice();\n var i = 0;\n var len = evtArr.length;\n\n for (i; i < len; i++) {\n evtArr[i].fn.apply(evtArr[i].ctx, data);\n }\n\n return this;\n },\n\n off: function (name, callback) {\n var e = this.e || (this.e = {});\n var evts = e[name];\n var liveEvents = [];\n\n if (evts && callback) {\n for (var i = 0, len = evts.length; i < len; i++) {\n if (evts[i].fn !== callback && evts[i].fn._ !== callback)\n liveEvents.push(evts[i]);\n }\n }\n\n // Remove event from queue to prevent memory leak\n // Suggested by https://github.com/lazd\n // Ref: https://github.com/scottcorgan/tiny-emitter/commit/c6ebfaa9bc973b33d110a84a307742b7cf94c953#commitcomment-5024910\n\n (liveEvents.length)\n ? e[name] = liveEvents\n : delete e[name];\n\n return this;\n }\n};\n\nmodule.exports = E;\nmodule.exports.TinyEmitter = E;\n\n\n/***/ })\n\n/******/ \t});\n/************************************************************************/\n/******/ \t// The module cache\n/******/ \tvar __webpack_module_cache__ = {};\n/******/ \t\n/******/ \t// The require function\n/******/ \tfunction __webpack_require__(moduleId) {\n/******/ \t\t// Check if module is in cache\n/******/ \t\tif(__webpack_module_cache__[moduleId]) {\n/******/ \t\t\treturn __webpack_module_cache__[moduleId].exports;\n/******/ \t\t}\n/******/ \t\t// Create a new module (and put it into the cache)\n/******/ \t\tvar module = __webpack_module_cache__[moduleId] = {\n/******/ \t\t\t// no module.id needed\n/******/ \t\t\t// no module.loaded needed\n/******/ \t\t\texports: {}\n/******/ \t\t};\n/******/ \t\n/******/ \t\t// Execute the module function\n/******/ \t\t__webpack_modules__[moduleId](module, module.exports, __webpack_require__);\n/******/ \t\n/******/ \t\t// Return the exports of the module\n/******/ \t\treturn module.exports;\n/******/ \t}\n/******/ \t\n/************************************************************************/\n/******/ \t/* webpack/runtime/compat get default export */\n/******/ \t!function() {\n/******/ \t\t// getDefaultExport function for compatibility with non-harmony modules\n/******/ \t\t__webpack_require__.n = function(module) {\n/******/ \t\t\tvar getter = module && module.__esModule ?\n/******/ \t\t\t\tfunction() { return module['default']; } :\n/******/ \t\t\t\tfunction() { return module; };\n/******/ \t\t\t__webpack_require__.d(getter, { a: getter });\n/******/ \t\t\treturn getter;\n/******/ \t\t};\n/******/ \t}();\n/******/ \t\n/******/ \t/* webpack/runtime/define property getters */\n/******/ \t!function() {\n/******/ \t\t// define getter functions for harmony exports\n/******/ \t\t__webpack_require__.d = function(exports, definition) {\n/******/ \t\t\tfor(var key in definition) {\n/******/ \t\t\t\tif(__webpack_require__.o(definition, key) && !__webpack_require__.o(exports, key)) {\n/******/ \t\t\t\t\tObject.defineProperty(exports, key, { enumerable: true, get: definition[key] });\n/******/ \t\t\t\t}\n/******/ \t\t\t}\n/******/ \t\t};\n/******/ \t}();\n/******/ \t\n/******/ \t/* webpack/runtime/hasOwnProperty shorthand */\n/******/ \t!function() {\n/******/ \t\t__webpack_require__.o = function(obj, prop) { return Object.prototype.hasOwnProperty.call(obj, prop); }\n/******/ \t}();\n/******/ \t\n/************************************************************************/\n/******/ \t// module exports must be returned from runtime so entry inlining is disabled\n/******/ \t// startup\n/******/ \t// Load entry module and return exports\n/******/ \treturn __webpack_require__(686);\n/******/ })()\n.default;\n});", "/*\n * Copyright (c) 2016-2024 Martin Donath \n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport \"focus-visible\"\n\nimport {\n EMPTY,\n NEVER,\n Observable,\n Subject,\n defer,\n delay,\n filter,\n map,\n merge,\n mergeWith,\n shareReplay,\n switchMap\n} from \"rxjs\"\n\nimport { configuration, feature } from \"./_\"\nimport {\n at,\n getActiveElement,\n getOptionalElement,\n requestJSON,\n setLocation,\n setToggle,\n watchDocument,\n watchKeyboard,\n watchLocation,\n watchLocationTarget,\n watchMedia,\n watchPrint,\n watchScript,\n watchViewport\n} from \"./browser\"\nimport {\n getComponentElement,\n getComponentElements,\n mountAnnounce,\n mountBackToTop,\n mountConsent,\n mountContent,\n mountDialog,\n mountHeader,\n mountHeaderTitle,\n mountPalette,\n mountProgress,\n mountSearch,\n mountSearchHiglight,\n mountSidebar,\n mountSource,\n mountTableOfContents,\n mountTabs,\n watchHeader,\n watchMain\n} from \"./components\"\nimport {\n SearchIndex,\n setupClipboardJS,\n setupInstantNavigation,\n setupVersionSelector\n} from \"./integrations\"\nimport {\n patchEllipsis,\n patchIndeterminate,\n patchScrollfix,\n patchScrolllock\n} from \"./patches\"\nimport \"./polyfills\"\n\n/* ----------------------------------------------------------------------------\n * Functions - @todo refactor\n * ------------------------------------------------------------------------- */\n\n/**\n * Fetch search index\n *\n * @returns Search index observable\n */\nfunction fetchSearchIndex(): Observable {\n if (location.protocol === \"file:\") {\n return watchScript(\n `${new URL(\"search/search_index.js\", config.base)}`\n )\n .pipe(\n // @ts-ignore - @todo fix typings\n map(() => __index),\n shareReplay(1)\n )\n } else {\n return requestJSON(\n new URL(\"search/search_index.json\", config.base)\n )\n }\n}\n\n/* ----------------------------------------------------------------------------\n * Application\n * ------------------------------------------------------------------------- */\n\n/* Yay, JavaScript is available */\ndocument.documentElement.classList.remove(\"no-js\")\ndocument.documentElement.classList.add(\"js\")\n\n/* Set up navigation observables and subjects */\nconst document$ = watchDocument()\nconst location$ = watchLocation()\nconst target$ = watchLocationTarget(location$)\nconst keyboard$ = watchKeyboard()\n\n/* Set up media observables */\nconst viewport$ = watchViewport()\nconst tablet$ = watchMedia(\"(min-width: 960px)\")\nconst screen$ = watchMedia(\"(min-width: 1220px)\")\nconst print$ = watchPrint()\n\n/* Retrieve search index, if search is enabled */\nconst config = configuration()\nconst index$ = document.forms.namedItem(\"search\")\n ? fetchSearchIndex()\n : NEVER\n\n/* Set up Clipboard.js integration */\nconst alert$ = new Subject()\nsetupClipboardJS({ alert$ })\n\n/* Set up progress indicator */\nconst progress$ = new Subject()\n\n/* Set up instant navigation, if enabled */\nif (feature(\"navigation.instant\"))\n setupInstantNavigation({ location$, viewport$, progress$ })\n .subscribe(document$)\n\n/* Set up version selector */\nif (config.version?.provider === \"mike\")\n setupVersionSelector({ document$ })\n\n/* Always close drawer and search on navigation */\nmerge(location$, target$)\n .pipe(\n delay(125)\n )\n .subscribe(() => {\n setToggle(\"drawer\", false)\n setToggle(\"search\", false)\n })\n\n/* Set up global keyboard handlers */\nkeyboard$\n .pipe(\n filter(({ mode }) => mode === \"global\")\n )\n .subscribe(key => {\n switch (key.type) {\n\n /* Go to previous page */\n case \"p\":\n case \",\":\n const prev = getOptionalElement(\"link[rel=prev]\")\n if (typeof prev !== \"undefined\")\n setLocation(prev)\n break\n\n /* Go to next page */\n case \"n\":\n case \".\":\n const next = getOptionalElement(\"link[rel=next]\")\n if (typeof next !== \"undefined\")\n setLocation(next)\n break\n\n /* Expand navigation, see https://bit.ly/3ZjG5io */\n case \"Enter\":\n const active = getActiveElement()\n if (active instanceof HTMLLabelElement)\n active.click()\n }\n })\n\n/* Set up patches */\npatchEllipsis({ viewport$, document$ })\npatchIndeterminate({ document$, tablet$ })\npatchScrollfix({ document$ })\npatchScrolllock({ viewport$, tablet$ })\n\n/* Set up header and main area observable */\nconst header$ = watchHeader(getComponentElement(\"header\"), { viewport$ })\nconst main$ = document$\n .pipe(\n map(() => getComponentElement(\"main\")),\n switchMap(el => watchMain(el, { viewport$, header$ })),\n shareReplay(1)\n )\n\n/* Set up control component observables */\nconst control$ = merge(\n\n /* Consent */\n ...getComponentElements(\"consent\")\n .map(el => mountConsent(el, { target$ })),\n\n /* Dialog */\n ...getComponentElements(\"dialog\")\n .map(el => mountDialog(el, { alert$ })),\n\n /* Color palette */\n ...getComponentElements(\"palette\")\n .map(el => mountPalette(el)),\n\n /* Progress bar */\n ...getComponentElements(\"progress\")\n .map(el => mountProgress(el, { progress$ })),\n\n /* Search */\n ...getComponentElements(\"search\")\n .map(el => mountSearch(el, { index$, keyboard$ })),\n\n /* Repository information */\n ...getComponentElements(\"source\")\n .map(el => mountSource(el))\n)\n\n/* Set up content component observables */\nconst content$ = defer(() => merge(\n\n /* Announcement bar */\n ...getComponentElements(\"announce\")\n .map(el => mountAnnounce(el)),\n\n /* Content */\n ...getComponentElements(\"content\")\n .map(el => mountContent(el, { viewport$, target$, print$ })),\n\n /* Search highlighting */\n ...getComponentElements(\"content\")\n .map(el => feature(\"search.highlight\")\n ? mountSearchHiglight(el, { index$, location$ })\n : EMPTY\n ),\n\n /* Header */\n ...getComponentElements(\"header\")\n .map(el => mountHeader(el, { viewport$, header$, main$ })),\n\n /* Header title */\n ...getComponentElements(\"header-title\")\n .map(el => mountHeaderTitle(el, { viewport$, header$ })),\n\n /* Sidebar */\n ...getComponentElements(\"sidebar\")\n .map(el => el.getAttribute(\"data-md-type\") === \"navigation\"\n ? at(screen$, () => mountSidebar(el, { viewport$, header$, main$ }))\n : at(tablet$, () => mountSidebar(el, { viewport$, header$, main$ }))\n ),\n\n /* Navigation tabs */\n ...getComponentElements(\"tabs\")\n .map(el => mountTabs(el, { viewport$, header$ })),\n\n /* Table of contents */\n ...getComponentElements(\"toc\")\n .map(el => mountTableOfContents(el, {\n viewport$, header$, main$, target$\n })),\n\n /* Back-to-top button */\n ...getComponentElements(\"top\")\n .map(el => mountBackToTop(el, { viewport$, header$, main$, target$ }))\n))\n\n/* Set up component observables */\nconst component$ = document$\n .pipe(\n switchMap(() => content$),\n mergeWith(control$),\n shareReplay(1)\n )\n\n/* Subscribe to all components */\ncomponent$.subscribe()\n\n/* ----------------------------------------------------------------------------\n * Exports\n * ------------------------------------------------------------------------- */\n\nwindow.document$ = document$ /* Document observable */\nwindow.location$ = location$ /* Location subject */\nwindow.target$ = target$ /* Location target observable */\nwindow.keyboard$ = keyboard$ /* Keyboard observable */\nwindow.viewport$ = viewport$ /* Viewport observable */\nwindow.tablet$ = tablet$ /* Media tablet observable */\nwindow.screen$ = screen$ /* Media screen observable */\nwindow.print$ = print$ /* Media print observable */\nwindow.alert$ = alert$ /* Alert subject */\nwindow.progress$ = progress$ /* Progress indicator subject */\nwindow.component$ = component$ /* Component observable */\n", "/******************************************************************************\nCopyright (c) Microsoft Corporation.\n\nPermission to use, copy, modify, and/or distribute this software for any\npurpose with or without fee is hereby granted.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH\nREGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY\nAND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,\nINDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM\nLOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR\nOTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR\nPERFORMANCE OF THIS SOFTWARE.\n***************************************************************************** */\n/* global Reflect, Promise, SuppressedError, Symbol, Iterator */\n\nvar extendStatics = function(d, b) {\n extendStatics = Object.setPrototypeOf ||\n ({ __proto__: [] } instanceof Array && function (d, b) { d.__proto__ = b; }) ||\n function (d, b) { for (var p in b) if (Object.prototype.hasOwnProperty.call(b, p)) d[p] = b[p]; };\n return extendStatics(d, b);\n};\n\nexport function __extends(d, b) {\n if (typeof b !== \"function\" && b !== null)\n throw new TypeError(\"Class extends value \" + String(b) + \" is not a constructor or null\");\n extendStatics(d, b);\n function __() { this.constructor = d; }\n d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __());\n}\n\nexport var __assign = function() {\n __assign = Object.assign || function __assign(t) {\n for (var s, i = 1, n = arguments.length; i < n; i++) {\n s = arguments[i];\n for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p)) t[p] = s[p];\n }\n return t;\n }\n return __assign.apply(this, arguments);\n}\n\nexport function __rest(s, e) {\n var t = {};\n for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p) && e.indexOf(p) < 0)\n t[p] = s[p];\n if (s != null && typeof Object.getOwnPropertySymbols === \"function\")\n for (var i = 0, p = Object.getOwnPropertySymbols(s); i < p.length; i++) {\n if (e.indexOf(p[i]) < 0 && Object.prototype.propertyIsEnumerable.call(s, p[i]))\n t[p[i]] = s[p[i]];\n }\n return t;\n}\n\nexport function __decorate(decorators, target, key, desc) {\n var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d;\n if (typeof Reflect === \"object\" && typeof Reflect.decorate === \"function\") r = Reflect.decorate(decorators, target, key, desc);\n else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r;\n return c > 3 && r && Object.defineProperty(target, key, r), r;\n}\n\nexport function __param(paramIndex, decorator) {\n return function (target, key) { decorator(target, key, paramIndex); }\n}\n\nexport function __esDecorate(ctor, descriptorIn, decorators, contextIn, initializers, extraInitializers) {\n function accept(f) { if (f !== void 0 && typeof f !== \"function\") throw new TypeError(\"Function expected\"); return f; }\n var kind = contextIn.kind, key = kind === \"getter\" ? \"get\" : kind === \"setter\" ? \"set\" : \"value\";\n var target = !descriptorIn && ctor ? contextIn[\"static\"] ? ctor : ctor.prototype : null;\n var descriptor = descriptorIn || (target ? Object.getOwnPropertyDescriptor(target, contextIn.name) : {});\n var _, done = false;\n for (var i = decorators.length - 1; i >= 0; i--) {\n var context = {};\n for (var p in contextIn) context[p] = p === \"access\" ? {} : contextIn[p];\n for (var p in contextIn.access) context.access[p] = contextIn.access[p];\n context.addInitializer = function (f) { if (done) throw new TypeError(\"Cannot add initializers after decoration has completed\"); extraInitializers.push(accept(f || null)); };\n var result = (0, decorators[i])(kind === \"accessor\" ? { get: descriptor.get, set: descriptor.set } : descriptor[key], context);\n if (kind === \"accessor\") {\n if (result === void 0) continue;\n if (result === null || typeof result !== \"object\") throw new TypeError(\"Object expected\");\n if (_ = accept(result.get)) descriptor.get = _;\n if (_ = accept(result.set)) descriptor.set = _;\n if (_ = accept(result.init)) initializers.unshift(_);\n }\n else if (_ = accept(result)) {\n if (kind === \"field\") initializers.unshift(_);\n else descriptor[key] = _;\n }\n }\n if (target) Object.defineProperty(target, contextIn.name, descriptor);\n done = true;\n};\n\nexport function __runInitializers(thisArg, initializers, value) {\n var useValue = arguments.length > 2;\n for (var i = 0; i < initializers.length; i++) {\n value = useValue ? initializers[i].call(thisArg, value) : initializers[i].call(thisArg);\n }\n return useValue ? value : void 0;\n};\n\nexport function __propKey(x) {\n return typeof x === \"symbol\" ? x : \"\".concat(x);\n};\n\nexport function __setFunctionName(f, name, prefix) {\n if (typeof name === \"symbol\") name = name.description ? \"[\".concat(name.description, \"]\") : \"\";\n return Object.defineProperty(f, \"name\", { configurable: true, value: prefix ? \"\".concat(prefix, \" \", name) : name });\n};\n\nexport function __metadata(metadataKey, metadataValue) {\n if (typeof Reflect === \"object\" && typeof Reflect.metadata === \"function\") return Reflect.metadata(metadataKey, metadataValue);\n}\n\nexport function __awaiter(thisArg, _arguments, P, generator) {\n function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }\n return new (P || (P = Promise))(function (resolve, reject) {\n function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }\n function rejected(value) { try { step(generator[\"throw\"](value)); } catch (e) { reject(e); } }\n function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }\n step((generator = generator.apply(thisArg, _arguments || [])).next());\n });\n}\n\nexport function __generator(thisArg, body) {\n var _ = { label: 0, sent: function() { if (t[0] & 1) throw t[1]; return t[1]; }, trys: [], ops: [] }, f, y, t, g = Object.create((typeof Iterator === \"function\" ? Iterator : Object).prototype);\n return g.next = verb(0), g[\"throw\"] = verb(1), g[\"return\"] = verb(2), typeof Symbol === \"function\" && (g[Symbol.iterator] = function() { return this; }), g;\n function verb(n) { return function (v) { return step([n, v]); }; }\n function step(op) {\n if (f) throw new TypeError(\"Generator is already executing.\");\n while (g && (g = 0, op[0] && (_ = 0)), _) try {\n if (f = 1, y && (t = op[0] & 2 ? y[\"return\"] : op[0] ? y[\"throw\"] || ((t = y[\"return\"]) && t.call(y), 0) : y.next) && !(t = t.call(y, op[1])).done) return t;\n if (y = 0, t) op = [op[0] & 2, t.value];\n switch (op[0]) {\n case 0: case 1: t = op; break;\n case 4: _.label++; return { value: op[1], done: false };\n case 5: _.label++; y = op[1]; op = [0]; continue;\n case 7: op = _.ops.pop(); _.trys.pop(); continue;\n default:\n if (!(t = _.trys, t = t.length > 0 && t[t.length - 1]) && (op[0] === 6 || op[0] === 2)) { _ = 0; continue; }\n if (op[0] === 3 && (!t || (op[1] > t[0] && op[1] < t[3]))) { _.label = op[1]; break; }\n if (op[0] === 6 && _.label < t[1]) { _.label = t[1]; t = op; break; }\n if (t && _.label < t[2]) { _.label = t[2]; _.ops.push(op); break; }\n if (t[2]) _.ops.pop();\n _.trys.pop(); continue;\n }\n op = body.call(thisArg, _);\n } catch (e) { op = [6, e]; y = 0; } finally { f = t = 0; }\n if (op[0] & 5) throw op[1]; return { value: op[0] ? op[1] : void 0, done: true };\n }\n}\n\nexport var __createBinding = Object.create ? (function(o, m, k, k2) {\n if (k2 === undefined) k2 = k;\n var desc = Object.getOwnPropertyDescriptor(m, k);\n if (!desc || (\"get\" in desc ? !m.__esModule : desc.writable || desc.configurable)) {\n desc = { enumerable: true, get: function() { return m[k]; } };\n }\n Object.defineProperty(o, k2, desc);\n}) : (function(o, m, k, k2) {\n if (k2 === undefined) k2 = k;\n o[k2] = m[k];\n});\n\nexport function __exportStar(m, o) {\n for (var p in m) if (p !== \"default\" && !Object.prototype.hasOwnProperty.call(o, p)) __createBinding(o, m, p);\n}\n\nexport function __values(o) {\n var s = typeof Symbol === \"function\" && Symbol.iterator, m = s && o[s], i = 0;\n if (m) return m.call(o);\n if (o && typeof o.length === \"number\") return {\n next: function () {\n if (o && i >= o.length) o = void 0;\n return { value: o && o[i++], done: !o };\n }\n };\n throw new TypeError(s ? \"Object is not iterable.\" : \"Symbol.iterator is not defined.\");\n}\n\nexport function __read(o, n) {\n var m = typeof Symbol === \"function\" && o[Symbol.iterator];\n if (!m) return o;\n var i = m.call(o), r, ar = [], e;\n try {\n while ((n === void 0 || n-- > 0) && !(r = i.next()).done) ar.push(r.value);\n }\n catch (error) { e = { error: error }; }\n finally {\n try {\n if (r && !r.done && (m = i[\"return\"])) m.call(i);\n }\n finally { if (e) throw e.error; }\n }\n return ar;\n}\n\n/** @deprecated */\nexport function __spread() {\n for (var ar = [], i = 0; i < arguments.length; i++)\n ar = ar.concat(__read(arguments[i]));\n return ar;\n}\n\n/** @deprecated */\nexport function __spreadArrays() {\n for (var s = 0, i = 0, il = arguments.length; i < il; i++) s += arguments[i].length;\n for (var r = Array(s), k = 0, i = 0; i < il; i++)\n for (var a = arguments[i], j = 0, jl = a.length; j < jl; j++, k++)\n r[k] = a[j];\n return r;\n}\n\nexport function __spreadArray(to, from, pack) {\n if (pack || arguments.length === 2) for (var i = 0, l = from.length, ar; i < l; i++) {\n if (ar || !(i in from)) {\n if (!ar) ar = Array.prototype.slice.call(from, 0, i);\n ar[i] = from[i];\n }\n }\n return to.concat(ar || Array.prototype.slice.call(from));\n}\n\nexport function __await(v) {\n return this instanceof __await ? (this.v = v, this) : new __await(v);\n}\n\nexport function __asyncGenerator(thisArg, _arguments, generator) {\n if (!Symbol.asyncIterator) throw new TypeError(\"Symbol.asyncIterator is not defined.\");\n var g = generator.apply(thisArg, _arguments || []), i, q = [];\n return i = Object.create((typeof AsyncIterator === \"function\" ? AsyncIterator : Object).prototype), verb(\"next\"), verb(\"throw\"), verb(\"return\", awaitReturn), i[Symbol.asyncIterator] = function () { return this; }, i;\n function awaitReturn(f) { return function (v) { return Promise.resolve(v).then(f, reject); }; }\n function verb(n, f) { if (g[n]) { i[n] = function (v) { return new Promise(function (a, b) { q.push([n, v, a, b]) > 1 || resume(n, v); }); }; if (f) i[n] = f(i[n]); } }\n function resume(n, v) { try { step(g[n](v)); } catch (e) { settle(q[0][3], e); } }\n function step(r) { r.value instanceof __await ? Promise.resolve(r.value.v).then(fulfill, reject) : settle(q[0][2], r); }\n function fulfill(value) { resume(\"next\", value); }\n function reject(value) { resume(\"throw\", value); }\n function settle(f, v) { if (f(v), q.shift(), q.length) resume(q[0][0], q[0][1]); }\n}\n\nexport function __asyncDelegator(o) {\n var i, p;\n return i = {}, verb(\"next\"), verb(\"throw\", function (e) { throw e; }), verb(\"return\"), i[Symbol.iterator] = function () { return this; }, i;\n function verb(n, f) { i[n] = o[n] ? function (v) { return (p = !p) ? { value: __await(o[n](v)), done: false } : f ? f(v) : v; } : f; }\n}\n\nexport function __asyncValues(o) {\n if (!Symbol.asyncIterator) throw new TypeError(\"Symbol.asyncIterator is not defined.\");\n var m = o[Symbol.asyncIterator], i;\n return m ? m.call(o) : (o = typeof __values === \"function\" ? __values(o) : o[Symbol.iterator](), i = {}, verb(\"next\"), verb(\"throw\"), verb(\"return\"), i[Symbol.asyncIterator] = function () { return this; }, i);\n function verb(n) { i[n] = o[n] && function (v) { return new Promise(function (resolve, reject) { v = o[n](v), settle(resolve, reject, v.done, v.value); }); }; }\n function settle(resolve, reject, d, v) { Promise.resolve(v).then(function(v) { resolve({ value: v, done: d }); }, reject); }\n}\n\nexport function __makeTemplateObject(cooked, raw) {\n if (Object.defineProperty) { Object.defineProperty(cooked, \"raw\", { value: raw }); } else { cooked.raw = raw; }\n return cooked;\n};\n\nvar __setModuleDefault = Object.create ? (function(o, v) {\n Object.defineProperty(o, \"default\", { enumerable: true, value: v });\n}) : function(o, v) {\n o[\"default\"] = v;\n};\n\nexport function __importStar(mod) {\n if (mod && mod.__esModule) return mod;\n var result = {};\n if (mod != null) for (var k in mod) if (k !== \"default\" && Object.prototype.hasOwnProperty.call(mod, k)) __createBinding(result, mod, k);\n __setModuleDefault(result, mod);\n return result;\n}\n\nexport function __importDefault(mod) {\n return (mod && mod.__esModule) ? mod : { default: mod };\n}\n\nexport function __classPrivateFieldGet(receiver, state, kind, f) {\n if (kind === \"a\" && !f) throw new TypeError(\"Private accessor was defined without a getter\");\n if (typeof state === \"function\" ? receiver !== state || !f : !state.has(receiver)) throw new TypeError(\"Cannot read private member from an object whose class did not declare it\");\n return kind === \"m\" ? f : kind === \"a\" ? f.call(receiver) : f ? f.value : state.get(receiver);\n}\n\nexport function __classPrivateFieldSet(receiver, state, value, kind, f) {\n if (kind === \"m\") throw new TypeError(\"Private method is not writable\");\n if (kind === \"a\" && !f) throw new TypeError(\"Private accessor was defined without a setter\");\n if (typeof state === \"function\" ? receiver !== state || !f : !state.has(receiver)) throw new TypeError(\"Cannot write private member to an object whose class did not declare it\");\n return (kind === \"a\" ? f.call(receiver, value) : f ? f.value = value : state.set(receiver, value)), value;\n}\n\nexport function __classPrivateFieldIn(state, receiver) {\n if (receiver === null || (typeof receiver !== \"object\" && typeof receiver !== \"function\")) throw new TypeError(\"Cannot use 'in' operator on non-object\");\n return typeof state === \"function\" ? receiver === state : state.has(receiver);\n}\n\nexport function __addDisposableResource(env, value, async) {\n if (value !== null && value !== void 0) {\n if (typeof value !== \"object\" && typeof value !== \"function\") throw new TypeError(\"Object expected.\");\n var dispose, inner;\n if (async) {\n if (!Symbol.asyncDispose) throw new TypeError(\"Symbol.asyncDispose is not defined.\");\n dispose = value[Symbol.asyncDispose];\n }\n if (dispose === void 0) {\n if (!Symbol.dispose) throw new TypeError(\"Symbol.dispose is not defined.\");\n dispose = value[Symbol.dispose];\n if (async) inner = dispose;\n }\n if (typeof dispose !== \"function\") throw new TypeError(\"Object not disposable.\");\n if (inner) dispose = function() { try { inner.call(this); } catch (e) { return Promise.reject(e); } };\n env.stack.push({ value: value, dispose: dispose, async: async });\n }\n else if (async) {\n env.stack.push({ async: true });\n }\n return value;\n}\n\nvar _SuppressedError = typeof SuppressedError === \"function\" ? SuppressedError : function (error, suppressed, message) {\n var e = new Error(message);\n return e.name = \"SuppressedError\", e.error = error, e.suppressed = suppressed, e;\n};\n\nexport function __disposeResources(env) {\n function fail(e) {\n env.error = env.hasError ? new _SuppressedError(e, env.error, \"An error was suppressed during disposal.\") : e;\n env.hasError = true;\n }\n var r, s = 0;\n function next() {\n while (r = env.stack.pop()) {\n try {\n if (!r.async && s === 1) return s = 0, env.stack.push(r), Promise.resolve().then(next);\n if (r.dispose) {\n var result = r.dispose.call(r.value);\n if (r.async) return s |= 2, Promise.resolve(result).then(next, function(e) { fail(e); return next(); });\n }\n else s |= 1;\n }\n catch (e) {\n fail(e);\n }\n }\n if (s === 1) return env.hasError ? Promise.reject(env.error) : Promise.resolve();\n if (env.hasError) throw env.error;\n }\n return next();\n}\n\nexport default {\n __extends,\n __assign,\n __rest,\n __decorate,\n __param,\n __metadata,\n __awaiter,\n __generator,\n __createBinding,\n __exportStar,\n __values,\n __read,\n __spread,\n __spreadArrays,\n __spreadArray,\n __await,\n __asyncGenerator,\n __asyncDelegator,\n __asyncValues,\n __makeTemplateObject,\n __importStar,\n __importDefault,\n __classPrivateFieldGet,\n __classPrivateFieldSet,\n __classPrivateFieldIn,\n __addDisposableResource,\n __disposeResources,\n};\n", "/**\n * Returns true if the object is a function.\n * @param value The value to check\n */\nexport function isFunction(value: any): value is (...args: any[]) => any {\n return typeof value === 'function';\n}\n", "/**\n * Used to create Error subclasses until the community moves away from ES5.\n *\n * This is because compiling from TypeScript down to ES5 has issues with subclassing Errors\n * as well as other built-in types: https://github.com/Microsoft/TypeScript/issues/12123\n *\n * @param createImpl A factory function to create the actual constructor implementation. The returned\n * function should be a named function that calls `_super` internally.\n */\nexport function createErrorClass(createImpl: (_super: any) => any): T {\n const _super = (instance: any) => {\n Error.call(instance);\n instance.stack = new Error().stack;\n };\n\n const ctorFunc = createImpl(_super);\n ctorFunc.prototype = Object.create(Error.prototype);\n ctorFunc.prototype.constructor = ctorFunc;\n return ctorFunc;\n}\n", "import { createErrorClass } from './createErrorClass';\n\nexport interface UnsubscriptionError extends Error {\n readonly errors: any[];\n}\n\nexport interface UnsubscriptionErrorCtor {\n /**\n * @deprecated Internal implementation detail. Do not construct error instances.\n * Cannot be tagged as internal: https://github.com/ReactiveX/rxjs/issues/6269\n */\n new (errors: any[]): UnsubscriptionError;\n}\n\n/**\n * An error thrown when one or more errors have occurred during the\n * `unsubscribe` of a {@link Subscription}.\n */\nexport const UnsubscriptionError: UnsubscriptionErrorCtor = createErrorClass(\n (_super) =>\n function UnsubscriptionErrorImpl(this: any, errors: (Error | string)[]) {\n _super(this);\n this.message = errors\n ? `${errors.length} errors occurred during unsubscription:\n${errors.map((err, i) => `${i + 1}) ${err.toString()}`).join('\\n ')}`\n : '';\n this.name = 'UnsubscriptionError';\n this.errors = errors;\n }\n);\n", "/**\n * Removes an item from an array, mutating it.\n * @param arr The array to remove the item from\n * @param item The item to remove\n */\nexport function arrRemove(arr: T[] | undefined | null, item: T) {\n if (arr) {\n const index = arr.indexOf(item);\n 0 <= index && arr.splice(index, 1);\n }\n}\n", "import { isFunction } from './util/isFunction';\nimport { UnsubscriptionError } from './util/UnsubscriptionError';\nimport { SubscriptionLike, TeardownLogic, Unsubscribable } from './types';\nimport { arrRemove } from './util/arrRemove';\n\n/**\n * Represents a disposable resource, such as the execution of an Observable. A\n * Subscription has one important method, `unsubscribe`, that takes no argument\n * and just disposes the resource held by the subscription.\n *\n * Additionally, subscriptions may be grouped together through the `add()`\n * method, which will attach a child Subscription to the current Subscription.\n * When a Subscription is unsubscribed, all its children (and its grandchildren)\n * will be unsubscribed as well.\n *\n * @class Subscription\n */\nexport class Subscription implements SubscriptionLike {\n /** @nocollapse */\n public static EMPTY = (() => {\n const empty = new Subscription();\n empty.closed = true;\n return empty;\n })();\n\n /**\n * A flag to indicate whether this Subscription has already been unsubscribed.\n */\n public closed = false;\n\n private _parentage: Subscription[] | Subscription | null = null;\n\n /**\n * The list of registered finalizers to execute upon unsubscription. Adding and removing from this\n * list occurs in the {@link #add} and {@link #remove} methods.\n */\n private _finalizers: Exclude[] | null = null;\n\n /**\n * @param initialTeardown A function executed first as part of the finalization\n * process that is kicked off when {@link #unsubscribe} is called.\n */\n constructor(private initialTeardown?: () => void) {}\n\n /**\n * Disposes the resources held by the subscription. May, for instance, cancel\n * an ongoing Observable execution or cancel any other type of work that\n * started when the Subscription was created.\n * @return {void}\n */\n unsubscribe(): void {\n let errors: any[] | undefined;\n\n if (!this.closed) {\n this.closed = true;\n\n // Remove this from it's parents.\n const { _parentage } = this;\n if (_parentage) {\n this._parentage = null;\n if (Array.isArray(_parentage)) {\n for (const parent of _parentage) {\n parent.remove(this);\n }\n } else {\n _parentage.remove(this);\n }\n }\n\n const { initialTeardown: initialFinalizer } = this;\n if (isFunction(initialFinalizer)) {\n try {\n initialFinalizer();\n } catch (e) {\n errors = e instanceof UnsubscriptionError ? e.errors : [e];\n }\n }\n\n const { _finalizers } = this;\n if (_finalizers) {\n this._finalizers = null;\n for (const finalizer of _finalizers) {\n try {\n execFinalizer(finalizer);\n } catch (err) {\n errors = errors ?? [];\n if (err instanceof UnsubscriptionError) {\n errors = [...errors, ...err.errors];\n } else {\n errors.push(err);\n }\n }\n }\n }\n\n if (errors) {\n throw new UnsubscriptionError(errors);\n }\n }\n }\n\n /**\n * Adds a finalizer to this subscription, so that finalization will be unsubscribed/called\n * when this subscription is unsubscribed. If this subscription is already {@link #closed},\n * because it has already been unsubscribed, then whatever finalizer is passed to it\n * will automatically be executed (unless the finalizer itself is also a closed subscription).\n *\n * Closed Subscriptions cannot be added as finalizers to any subscription. Adding a closed\n * subscription to a any subscription will result in no operation. (A noop).\n *\n * Adding a subscription to itself, or adding `null` or `undefined` will not perform any\n * operation at all. (A noop).\n *\n * `Subscription` instances that are added to this instance will automatically remove themselves\n * if they are unsubscribed. Functions and {@link Unsubscribable} objects that you wish to remove\n * will need to be removed manually with {@link #remove}\n *\n * @param teardown The finalization logic to add to this subscription.\n */\n add(teardown: TeardownLogic): void {\n // Only add the finalizer if it's not undefined\n // and don't add a subscription to itself.\n if (teardown && teardown !== this) {\n if (this.closed) {\n // If this subscription is already closed,\n // execute whatever finalizer is handed to it automatically.\n execFinalizer(teardown);\n } else {\n if (teardown instanceof Subscription) {\n // We don't add closed subscriptions, and we don't add the same subscription\n // twice. Subscription unsubscribe is idempotent.\n if (teardown.closed || teardown._hasParent(this)) {\n return;\n }\n teardown._addParent(this);\n }\n (this._finalizers = this._finalizers ?? []).push(teardown);\n }\n }\n }\n\n /**\n * Checks to see if a this subscription already has a particular parent.\n * This will signal that this subscription has already been added to the parent in question.\n * @param parent the parent to check for\n */\n private _hasParent(parent: Subscription) {\n const { _parentage } = this;\n return _parentage === parent || (Array.isArray(_parentage) && _parentage.includes(parent));\n }\n\n /**\n * Adds a parent to this subscription so it can be removed from the parent if it\n * unsubscribes on it's own.\n *\n * NOTE: THIS ASSUMES THAT {@link _hasParent} HAS ALREADY BEEN CHECKED.\n * @param parent The parent subscription to add\n */\n private _addParent(parent: Subscription) {\n const { _parentage } = this;\n this._parentage = Array.isArray(_parentage) ? (_parentage.push(parent), _parentage) : _parentage ? [_parentage, parent] : parent;\n }\n\n /**\n * Called on a child when it is removed via {@link #remove}.\n * @param parent The parent to remove\n */\n private _removeParent(parent: Subscription) {\n const { _parentage } = this;\n if (_parentage === parent) {\n this._parentage = null;\n } else if (Array.isArray(_parentage)) {\n arrRemove(_parentage, parent);\n }\n }\n\n /**\n * Removes a finalizer from this subscription that was previously added with the {@link #add} method.\n *\n * Note that `Subscription` instances, when unsubscribed, will automatically remove themselves\n * from every other `Subscription` they have been added to. This means that using the `remove` method\n * is not a common thing and should be used thoughtfully.\n *\n * If you add the same finalizer instance of a function or an unsubscribable object to a `Subscription` instance\n * more than once, you will need to call `remove` the same number of times to remove all instances.\n *\n * All finalizer instances are removed to free up memory upon unsubscription.\n *\n * @param teardown The finalizer to remove from this subscription\n */\n remove(teardown: Exclude): void {\n const { _finalizers } = this;\n _finalizers && arrRemove(_finalizers, teardown);\n\n if (teardown instanceof Subscription) {\n teardown._removeParent(this);\n }\n }\n}\n\nexport const EMPTY_SUBSCRIPTION = Subscription.EMPTY;\n\nexport function isSubscription(value: any): value is Subscription {\n return (\n value instanceof Subscription ||\n (value && 'closed' in value && isFunction(value.remove) && isFunction(value.add) && isFunction(value.unsubscribe))\n );\n}\n\nfunction execFinalizer(finalizer: Unsubscribable | (() => void)) {\n if (isFunction(finalizer)) {\n finalizer();\n } else {\n finalizer.unsubscribe();\n }\n}\n", "import { Subscriber } from './Subscriber';\nimport { ObservableNotification } from './types';\n\n/**\n * The {@link GlobalConfig} object for RxJS. It is used to configure things\n * like how to react on unhandled errors.\n */\nexport const config: GlobalConfig = {\n onUnhandledError: null,\n onStoppedNotification: null,\n Promise: undefined,\n useDeprecatedSynchronousErrorHandling: false,\n useDeprecatedNextContext: false,\n};\n\n/**\n * The global configuration object for RxJS, used to configure things\n * like how to react on unhandled errors. Accessible via {@link config}\n * object.\n */\nexport interface GlobalConfig {\n /**\n * A registration point for unhandled errors from RxJS. These are errors that\n * cannot were not handled by consuming code in the usual subscription path. For\n * example, if you have this configured, and you subscribe to an observable without\n * providing an error handler, errors from that subscription will end up here. This\n * will _always_ be called asynchronously on another job in the runtime. This is because\n * we do not want errors thrown in this user-configured handler to interfere with the\n * behavior of the library.\n */\n onUnhandledError: ((err: any) => void) | null;\n\n /**\n * A registration point for notifications that cannot be sent to subscribers because they\n * have completed, errored or have been explicitly unsubscribed. By default, next, complete\n * and error notifications sent to stopped subscribers are noops. However, sometimes callers\n * might want a different behavior. For example, with sources that attempt to report errors\n * to stopped subscribers, a caller can configure RxJS to throw an unhandled error instead.\n * This will _always_ be called asynchronously on another job in the runtime. This is because\n * we do not want errors thrown in this user-configured handler to interfere with the\n * behavior of the library.\n */\n onStoppedNotification: ((notification: ObservableNotification, subscriber: Subscriber) => void) | null;\n\n /**\n * The promise constructor used by default for {@link Observable#toPromise toPromise} and {@link Observable#forEach forEach}\n * methods.\n *\n * @deprecated As of version 8, RxJS will no longer support this sort of injection of a\n * Promise constructor. If you need a Promise implementation other than native promises,\n * please polyfill/patch Promise as you see appropriate. Will be removed in v8.\n */\n Promise?: PromiseConstructorLike;\n\n /**\n * If true, turns on synchronous error rethrowing, which is a deprecated behavior\n * in v6 and higher. This behavior enables bad patterns like wrapping a subscribe\n * call in a try/catch block. It also enables producer interference, a nasty bug\n * where a multicast can be broken for all observers by a downstream consumer with\n * an unhandled error. DO NOT USE THIS FLAG UNLESS IT'S NEEDED TO BUY TIME\n * FOR MIGRATION REASONS.\n *\n * @deprecated As of version 8, RxJS will no longer support synchronous throwing\n * of unhandled errors. All errors will be thrown on a separate call stack to prevent bad\n * behaviors described above. Will be removed in v8.\n */\n useDeprecatedSynchronousErrorHandling: boolean;\n\n /**\n * If true, enables an as-of-yet undocumented feature from v5: The ability to access\n * `unsubscribe()` via `this` context in `next` functions created in observers passed\n * to `subscribe`.\n *\n * This is being removed because the performance was severely problematic, and it could also cause\n * issues when types other than POJOs are passed to subscribe as subscribers, as they will likely have\n * their `this` context overwritten.\n *\n * @deprecated As of version 8, RxJS will no longer support altering the\n * context of next functions provided as part of an observer to Subscribe. Instead,\n * you will have access to a subscription or a signal or token that will allow you to do things like\n * unsubscribe and test closed status. Will be removed in v8.\n */\n useDeprecatedNextContext: boolean;\n}\n", "import type { TimerHandle } from './timerHandle';\ntype SetTimeoutFunction = (handler: () => void, timeout?: number, ...args: any[]) => TimerHandle;\ntype ClearTimeoutFunction = (handle: TimerHandle) => void;\n\ninterface TimeoutProvider {\n setTimeout: SetTimeoutFunction;\n clearTimeout: ClearTimeoutFunction;\n delegate:\n | {\n setTimeout: SetTimeoutFunction;\n clearTimeout: ClearTimeoutFunction;\n }\n | undefined;\n}\n\nexport const timeoutProvider: TimeoutProvider = {\n // When accessing the delegate, use the variable rather than `this` so that\n // the functions can be called without being bound to the provider.\n setTimeout(handler: () => void, timeout?: number, ...args) {\n const { delegate } = timeoutProvider;\n if (delegate?.setTimeout) {\n return delegate.setTimeout(handler, timeout, ...args);\n }\n return setTimeout(handler, timeout, ...args);\n },\n clearTimeout(handle) {\n const { delegate } = timeoutProvider;\n return (delegate?.clearTimeout || clearTimeout)(handle as any);\n },\n delegate: undefined,\n};\n", "import { config } from '../config';\nimport { timeoutProvider } from '../scheduler/timeoutProvider';\n\n/**\n * Handles an error on another job either with the user-configured {@link onUnhandledError},\n * or by throwing it on that new job so it can be picked up by `window.onerror`, `process.on('error')`, etc.\n *\n * This should be called whenever there is an error that is out-of-band with the subscription\n * or when an error hits a terminal boundary of the subscription and no error handler was provided.\n *\n * @param err the error to report\n */\nexport function reportUnhandledError(err: any) {\n timeoutProvider.setTimeout(() => {\n const { onUnhandledError } = config;\n if (onUnhandledError) {\n // Execute the user-configured error handler.\n onUnhandledError(err);\n } else {\n // Throw so it is picked up by the runtime's uncaught error mechanism.\n throw err;\n }\n });\n}\n", "/* tslint:disable:no-empty */\nexport function noop() { }\n", "import { CompleteNotification, NextNotification, ErrorNotification } from './types';\n\n/**\n * A completion object optimized for memory use and created to be the\n * same \"shape\" as other notifications in v8.\n * @internal\n */\nexport const COMPLETE_NOTIFICATION = (() => createNotification('C', undefined, undefined) as CompleteNotification)();\n\n/**\n * Internal use only. Creates an optimized error notification that is the same \"shape\"\n * as other notifications.\n * @internal\n */\nexport function errorNotification(error: any): ErrorNotification {\n return createNotification('E', undefined, error) as any;\n}\n\n/**\n * Internal use only. Creates an optimized next notification that is the same \"shape\"\n * as other notifications.\n * @internal\n */\nexport function nextNotification(value: T) {\n return createNotification('N', value, undefined) as NextNotification;\n}\n\n/**\n * Ensures that all notifications created internally have the same \"shape\" in v8.\n *\n * TODO: This is only exported to support a crazy legacy test in `groupBy`.\n * @internal\n */\nexport function createNotification(kind: 'N' | 'E' | 'C', value: any, error: any) {\n return {\n kind,\n value,\n error,\n };\n}\n", "import { config } from '../config';\n\nlet context: { errorThrown: boolean; error: any } | null = null;\n\n/**\n * Handles dealing with errors for super-gross mode. Creates a context, in which\n * any synchronously thrown errors will be passed to {@link captureError}. Which\n * will record the error such that it will be rethrown after the call back is complete.\n * TODO: Remove in v8\n * @param cb An immediately executed function.\n */\nexport function errorContext(cb: () => void) {\n if (config.useDeprecatedSynchronousErrorHandling) {\n const isRoot = !context;\n if (isRoot) {\n context = { errorThrown: false, error: null };\n }\n cb();\n if (isRoot) {\n const { errorThrown, error } = context!;\n context = null;\n if (errorThrown) {\n throw error;\n }\n }\n } else {\n // This is the general non-deprecated path for everyone that\n // isn't crazy enough to use super-gross mode (useDeprecatedSynchronousErrorHandling)\n cb();\n }\n}\n\n/**\n * Captures errors only in super-gross mode.\n * @param err the error to capture\n */\nexport function captureError(err: any) {\n if (config.useDeprecatedSynchronousErrorHandling && context) {\n context.errorThrown = true;\n context.error = err;\n }\n}\n", "import { isFunction } from './util/isFunction';\nimport { Observer, ObservableNotification } from './types';\nimport { isSubscription, Subscription } from './Subscription';\nimport { config } from './config';\nimport { reportUnhandledError } from './util/reportUnhandledError';\nimport { noop } from './util/noop';\nimport { nextNotification, errorNotification, COMPLETE_NOTIFICATION } from './NotificationFactories';\nimport { timeoutProvider } from './scheduler/timeoutProvider';\nimport { captureError } from './util/errorContext';\n\n/**\n * Implements the {@link Observer} interface and extends the\n * {@link Subscription} class. While the {@link Observer} is the public API for\n * consuming the values of an {@link Observable}, all Observers get converted to\n * a Subscriber, in order to provide Subscription-like capabilities such as\n * `unsubscribe`. Subscriber is a common type in RxJS, and crucial for\n * implementing operators, but it is rarely used as a public API.\n *\n * @class Subscriber\n */\nexport class Subscriber extends Subscription implements Observer {\n /**\n * A static factory for a Subscriber, given a (potentially partial) definition\n * of an Observer.\n * @param next The `next` callback of an Observer.\n * @param error The `error` callback of an\n * Observer.\n * @param complete The `complete` callback of an\n * Observer.\n * @return A Subscriber wrapping the (partially defined)\n * Observer represented by the given arguments.\n * @nocollapse\n * @deprecated Do not use. Will be removed in v8. There is no replacement for this\n * method, and there is no reason to be creating instances of `Subscriber` directly.\n * If you have a specific use case, please file an issue.\n */\n static create(next?: (x?: T) => void, error?: (e?: any) => void, complete?: () => void): Subscriber {\n return new SafeSubscriber(next, error, complete);\n }\n\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n protected isStopped: boolean = false;\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n protected destination: Subscriber | Observer; // this `any` is the escape hatch to erase extra type param (e.g. R)\n\n /**\n * @deprecated Internal implementation detail, do not use directly. Will be made internal in v8.\n * There is no reason to directly create an instance of Subscriber. This type is exported for typings reasons.\n */\n constructor(destination?: Subscriber | Observer) {\n super();\n if (destination) {\n this.destination = destination;\n // Automatically chain subscriptions together here.\n // if destination is a Subscription, then it is a Subscriber.\n if (isSubscription(destination)) {\n destination.add(this);\n }\n } else {\n this.destination = EMPTY_OBSERVER;\n }\n }\n\n /**\n * The {@link Observer} callback to receive notifications of type `next` from\n * the Observable, with a value. The Observable may call this method 0 or more\n * times.\n * @param {T} [value] The `next` value.\n * @return {void}\n */\n next(value?: T): void {\n if (this.isStopped) {\n handleStoppedNotification(nextNotification(value), this);\n } else {\n this._next(value!);\n }\n }\n\n /**\n * The {@link Observer} callback to receive notifications of type `error` from\n * the Observable, with an attached `Error`. Notifies the Observer that\n * the Observable has experienced an error condition.\n * @param {any} [err] The `error` exception.\n * @return {void}\n */\n error(err?: any): void {\n if (this.isStopped) {\n handleStoppedNotification(errorNotification(err), this);\n } else {\n this.isStopped = true;\n this._error(err);\n }\n }\n\n /**\n * The {@link Observer} callback to receive a valueless notification of type\n * `complete` from the Observable. Notifies the Observer that the Observable\n * has finished sending push-based notifications.\n * @return {void}\n */\n complete(): void {\n if (this.isStopped) {\n handleStoppedNotification(COMPLETE_NOTIFICATION, this);\n } else {\n this.isStopped = true;\n this._complete();\n }\n }\n\n unsubscribe(): void {\n if (!this.closed) {\n this.isStopped = true;\n super.unsubscribe();\n this.destination = null!;\n }\n }\n\n protected _next(value: T): void {\n this.destination.next(value);\n }\n\n protected _error(err: any): void {\n try {\n this.destination.error(err);\n } finally {\n this.unsubscribe();\n }\n }\n\n protected _complete(): void {\n try {\n this.destination.complete();\n } finally {\n this.unsubscribe();\n }\n }\n}\n\n/**\n * This bind is captured here because we want to be able to have\n * compatibility with monoid libraries that tend to use a method named\n * `bind`. In particular, a library called Monio requires this.\n */\nconst _bind = Function.prototype.bind;\n\nfunction bind any>(fn: Fn, thisArg: any): Fn {\n return _bind.call(fn, thisArg);\n}\n\n/**\n * Internal optimization only, DO NOT EXPOSE.\n * @internal\n */\nclass ConsumerObserver implements Observer {\n constructor(private partialObserver: Partial>) {}\n\n next(value: T): void {\n const { partialObserver } = this;\n if (partialObserver.next) {\n try {\n partialObserver.next(value);\n } catch (error) {\n handleUnhandledError(error);\n }\n }\n }\n\n error(err: any): void {\n const { partialObserver } = this;\n if (partialObserver.error) {\n try {\n partialObserver.error(err);\n } catch (error) {\n handleUnhandledError(error);\n }\n } else {\n handleUnhandledError(err);\n }\n }\n\n complete(): void {\n const { partialObserver } = this;\n if (partialObserver.complete) {\n try {\n partialObserver.complete();\n } catch (error) {\n handleUnhandledError(error);\n }\n }\n }\n}\n\nexport class SafeSubscriber extends Subscriber {\n constructor(\n observerOrNext?: Partial> | ((value: T) => void) | null,\n error?: ((e?: any) => void) | null,\n complete?: (() => void) | null\n ) {\n super();\n\n let partialObserver: Partial>;\n if (isFunction(observerOrNext) || !observerOrNext) {\n // The first argument is a function, not an observer. The next\n // two arguments *could* be observers, or they could be empty.\n partialObserver = {\n next: (observerOrNext ?? undefined) as (((value: T) => void) | undefined),\n error: error ?? undefined,\n complete: complete ?? undefined,\n };\n } else {\n // The first argument is a partial observer.\n let context: any;\n if (this && config.useDeprecatedNextContext) {\n // This is a deprecated path that made `this.unsubscribe()` available in\n // next handler functions passed to subscribe. This only exists behind a flag\n // now, as it is *very* slow.\n context = Object.create(observerOrNext);\n context.unsubscribe = () => this.unsubscribe();\n partialObserver = {\n next: observerOrNext.next && bind(observerOrNext.next, context),\n error: observerOrNext.error && bind(observerOrNext.error, context),\n complete: observerOrNext.complete && bind(observerOrNext.complete, context),\n };\n } else {\n // The \"normal\" path. Just use the partial observer directly.\n partialObserver = observerOrNext;\n }\n }\n\n // Wrap the partial observer to ensure it's a full observer, and\n // make sure proper error handling is accounted for.\n this.destination = new ConsumerObserver(partialObserver);\n }\n}\n\nfunction handleUnhandledError(error: any) {\n if (config.useDeprecatedSynchronousErrorHandling) {\n captureError(error);\n } else {\n // Ideal path, we report this as an unhandled error,\n // which is thrown on a new call stack.\n reportUnhandledError(error);\n }\n}\n\n/**\n * An error handler used when no error handler was supplied\n * to the SafeSubscriber -- meaning no error handler was supplied\n * do the `subscribe` call on our observable.\n * @param err The error to handle\n */\nfunction defaultErrorHandler(err: any) {\n throw err;\n}\n\n/**\n * A handler for notifications that cannot be sent to a stopped subscriber.\n * @param notification The notification being sent\n * @param subscriber The stopped subscriber\n */\nfunction handleStoppedNotification(notification: ObservableNotification, subscriber: Subscriber) {\n const { onStoppedNotification } = config;\n onStoppedNotification && timeoutProvider.setTimeout(() => onStoppedNotification(notification, subscriber));\n}\n\n/**\n * The observer used as a stub for subscriptions where the user did not\n * pass any arguments to `subscribe`. Comes with the default error handling\n * behavior.\n */\nexport const EMPTY_OBSERVER: Readonly> & { closed: true } = {\n closed: true,\n next: noop,\n error: defaultErrorHandler,\n complete: noop,\n};\n", "/**\n * Symbol.observable or a string \"@@observable\". Used for interop\n *\n * @deprecated We will no longer be exporting this symbol in upcoming versions of RxJS.\n * Instead polyfill and use Symbol.observable directly *or* use https://www.npmjs.com/package/symbol-observable\n */\nexport const observable: string | symbol = (() => (typeof Symbol === 'function' && Symbol.observable) || '@@observable')();\n", "/**\n * This function takes one parameter and just returns it. Simply put,\n * this is like `(x: T): T => x`.\n *\n * ## Examples\n *\n * This is useful in some cases when using things like `mergeMap`\n *\n * ```ts\n * import { interval, take, map, range, mergeMap, identity } from 'rxjs';\n *\n * const source$ = interval(1000).pipe(take(5));\n *\n * const result$ = source$.pipe(\n * map(i => range(i)),\n * mergeMap(identity) // same as mergeMap(x => x)\n * );\n *\n * result$.subscribe({\n * next: console.log\n * });\n * ```\n *\n * Or when you want to selectively apply an operator\n *\n * ```ts\n * import { interval, take, identity } from 'rxjs';\n *\n * const shouldLimit = () => Math.random() < 0.5;\n *\n * const source$ = interval(1000);\n *\n * const result$ = source$.pipe(shouldLimit() ? take(5) : identity);\n *\n * result$.subscribe({\n * next: console.log\n * });\n * ```\n *\n * @param x Any value that is returned by this function\n * @returns The value passed as the first parameter to this function\n */\nexport function identity(x: T): T {\n return x;\n}\n", "import { identity } from './identity';\nimport { UnaryFunction } from '../types';\n\nexport function pipe(): typeof identity;\nexport function pipe(fn1: UnaryFunction): UnaryFunction;\nexport function pipe(fn1: UnaryFunction, fn2: UnaryFunction): UnaryFunction;\nexport function pipe(fn1: UnaryFunction, fn2: UnaryFunction, fn3: UnaryFunction): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction,\n fn7: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction,\n fn7: UnaryFunction,\n fn8: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction,\n fn7: UnaryFunction,\n fn8: UnaryFunction,\n fn9: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction,\n fn7: UnaryFunction,\n fn8: UnaryFunction,\n fn9: UnaryFunction,\n ...fns: UnaryFunction[]\n): UnaryFunction;\n\n/**\n * pipe() can be called on one or more functions, each of which can take one argument (\"UnaryFunction\")\n * and uses it to return a value.\n * It returns a function that takes one argument, passes it to the first UnaryFunction, and then\n * passes the result to the next one, passes that result to the next one, and so on. \n */\nexport function pipe(...fns: Array>): UnaryFunction {\n return pipeFromArray(fns);\n}\n\n/** @internal */\nexport function pipeFromArray(fns: Array>): UnaryFunction {\n if (fns.length === 0) {\n return identity as UnaryFunction;\n }\n\n if (fns.length === 1) {\n return fns[0];\n }\n\n return function piped(input: T): R {\n return fns.reduce((prev: any, fn: UnaryFunction) => fn(prev), input as any);\n };\n}\n", "import { Operator } from './Operator';\nimport { SafeSubscriber, Subscriber } from './Subscriber';\nimport { isSubscription, Subscription } from './Subscription';\nimport { TeardownLogic, OperatorFunction, Subscribable, Observer } from './types';\nimport { observable as Symbol_observable } from './symbol/observable';\nimport { pipeFromArray } from './util/pipe';\nimport { config } from './config';\nimport { isFunction } from './util/isFunction';\nimport { errorContext } from './util/errorContext';\n\n/**\n * A representation of any set of values over any amount of time. This is the most basic building block\n * of RxJS.\n *\n * @class Observable\n */\nexport class Observable implements Subscribable {\n /**\n * @deprecated Internal implementation detail, do not use directly. Will be made internal in v8.\n */\n source: Observable | undefined;\n\n /**\n * @deprecated Internal implementation detail, do not use directly. Will be made internal in v8.\n */\n operator: Operator | undefined;\n\n /**\n * @constructor\n * @param {Function} subscribe the function that is called when the Observable is\n * initially subscribed to. This function is given a Subscriber, to which new values\n * can be `next`ed, or an `error` method can be called to raise an error, or\n * `complete` can be called to notify of a successful completion.\n */\n constructor(subscribe?: (this: Observable, subscriber: Subscriber) => TeardownLogic) {\n if (subscribe) {\n this._subscribe = subscribe;\n }\n }\n\n // HACK: Since TypeScript inherits static properties too, we have to\n // fight against TypeScript here so Subject can have a different static create signature\n /**\n * Creates a new Observable by calling the Observable constructor\n * @owner Observable\n * @method create\n * @param {Function} subscribe? the subscriber function to be passed to the Observable constructor\n * @return {Observable} a new observable\n * @nocollapse\n * @deprecated Use `new Observable()` instead. Will be removed in v8.\n */\n static create: (...args: any[]) => any = (subscribe?: (subscriber: Subscriber) => TeardownLogic) => {\n return new Observable(subscribe);\n };\n\n /**\n * Creates a new Observable, with this Observable instance as the source, and the passed\n * operator defined as the new observable's operator.\n * @method lift\n * @param operator the operator defining the operation to take on the observable\n * @return a new observable with the Operator applied\n * @deprecated Internal implementation detail, do not use directly. Will be made internal in v8.\n * If you have implemented an operator using `lift`, it is recommended that you create an\n * operator by simply returning `new Observable()` directly. See \"Creating new operators from\n * scratch\" section here: https://rxjs.dev/guide/operators\n */\n lift(operator?: Operator): Observable {\n const observable = new Observable();\n observable.source = this;\n observable.operator = operator;\n return observable;\n }\n\n subscribe(observerOrNext?: Partial> | ((value: T) => void)): Subscription;\n /** @deprecated Instead of passing separate callback arguments, use an observer argument. Signatures taking separate callback arguments will be removed in v8. Details: https://rxjs.dev/deprecations/subscribe-arguments */\n subscribe(next?: ((value: T) => void) | null, error?: ((error: any) => void) | null, complete?: (() => void) | null): Subscription;\n /**\n * Invokes an execution of an Observable and registers Observer handlers for notifications it will emit.\n *\n * Use it when you have all these Observables, but still nothing is happening.\n *\n * `subscribe` is not a regular operator, but a method that calls Observable's internal `subscribe` function. It\n * might be for example a function that you passed to Observable's constructor, but most of the time it is\n * a library implementation, which defines what will be emitted by an Observable, and when it be will emitted. This means\n * that calling `subscribe` is actually the moment when Observable starts its work, not when it is created, as it is often\n * the thought.\n *\n * Apart from starting the execution of an Observable, this method allows you to listen for values\n * that an Observable emits, as well as for when it completes or errors. You can achieve this in two\n * of the following ways.\n *\n * The first way is creating an object that implements {@link Observer} interface. It should have methods\n * defined by that interface, but note that it should be just a regular JavaScript object, which you can create\n * yourself in any way you want (ES6 class, classic function constructor, object literal etc.). In particular, do\n * not attempt to use any RxJS implementation details to create Observers - you don't need them. Remember also\n * that your object does not have to implement all methods. If you find yourself creating a method that doesn't\n * do anything, you can simply omit it. Note however, if the `error` method is not provided and an error happens,\n * it will be thrown asynchronously. Errors thrown asynchronously cannot be caught using `try`/`catch`. Instead,\n * use the {@link onUnhandledError} configuration option or use a runtime handler (like `window.onerror` or\n * `process.on('error)`) to be notified of unhandled errors. Because of this, it's recommended that you provide\n * an `error` method to avoid missing thrown errors.\n *\n * The second way is to give up on Observer object altogether and simply provide callback functions in place of its methods.\n * This means you can provide three functions as arguments to `subscribe`, where the first function is equivalent\n * of a `next` method, the second of an `error` method and the third of a `complete` method. Just as in case of an Observer,\n * if you do not need to listen for something, you can omit a function by passing `undefined` or `null`,\n * since `subscribe` recognizes these functions by where they were placed in function call. When it comes\n * to the `error` function, as with an Observer, if not provided, errors emitted by an Observable will be thrown asynchronously.\n *\n * You can, however, subscribe with no parameters at all. This may be the case where you're not interested in terminal events\n * and you also handled emissions internally by using operators (e.g. using `tap`).\n *\n * Whichever style of calling `subscribe` you use, in both cases it returns a Subscription object.\n * This object allows you to call `unsubscribe` on it, which in turn will stop the work that an Observable does and will clean\n * up all resources that an Observable used. Note that cancelling a subscription will not call `complete` callback\n * provided to `subscribe` function, which is reserved for a regular completion signal that comes from an Observable.\n *\n * Remember that callbacks provided to `subscribe` are not guaranteed to be called asynchronously.\n * It is an Observable itself that decides when these functions will be called. For example {@link of}\n * by default emits all its values synchronously. Always check documentation for how given Observable\n * will behave when subscribed and if its default behavior can be modified with a `scheduler`.\n *\n * #### Examples\n *\n * Subscribe with an {@link guide/observer Observer}\n *\n * ```ts\n * import { of } from 'rxjs';\n *\n * const sumObserver = {\n * sum: 0,\n * next(value) {\n * console.log('Adding: ' + value);\n * this.sum = this.sum + value;\n * },\n * error() {\n * // We actually could just remove this method,\n * // since we do not really care about errors right now.\n * },\n * complete() {\n * console.log('Sum equals: ' + this.sum);\n * }\n * };\n *\n * of(1, 2, 3) // Synchronously emits 1, 2, 3 and then completes.\n * .subscribe(sumObserver);\n *\n * // Logs:\n * // 'Adding: 1'\n * // 'Adding: 2'\n * // 'Adding: 3'\n * // 'Sum equals: 6'\n * ```\n *\n * Subscribe with functions ({@link deprecations/subscribe-arguments deprecated})\n *\n * ```ts\n * import { of } from 'rxjs'\n *\n * let sum = 0;\n *\n * of(1, 2, 3).subscribe(\n * value => {\n * console.log('Adding: ' + value);\n * sum = sum + value;\n * },\n * undefined,\n * () => console.log('Sum equals: ' + sum)\n * );\n *\n * // Logs:\n * // 'Adding: 1'\n * // 'Adding: 2'\n * // 'Adding: 3'\n * // 'Sum equals: 6'\n * ```\n *\n * Cancel a subscription\n *\n * ```ts\n * import { interval } from 'rxjs';\n *\n * const subscription = interval(1000).subscribe({\n * next(num) {\n * console.log(num)\n * },\n * complete() {\n * // Will not be called, even when cancelling subscription.\n * console.log('completed!');\n * }\n * });\n *\n * setTimeout(() => {\n * subscription.unsubscribe();\n * console.log('unsubscribed!');\n * }, 2500);\n *\n * // Logs:\n * // 0 after 1s\n * // 1 after 2s\n * // 'unsubscribed!' after 2.5s\n * ```\n *\n * @param {Observer|Function} observerOrNext (optional) Either an observer with methods to be called,\n * or the first of three possible handlers, which is the handler for each value emitted from the subscribed\n * Observable.\n * @param {Function} error (optional) A handler for a terminal event resulting from an error. If no error handler is provided,\n * the error will be thrown asynchronously as unhandled.\n * @param {Function} complete (optional) A handler for a terminal event resulting from successful completion.\n * @return {Subscription} a subscription reference to the registered handlers\n * @method subscribe\n */\n subscribe(\n observerOrNext?: Partial> | ((value: T) => void) | null,\n error?: ((error: any) => void) | null,\n complete?: (() => void) | null\n ): Subscription {\n const subscriber = isSubscriber(observerOrNext) ? observerOrNext : new SafeSubscriber(observerOrNext, error, complete);\n\n errorContext(() => {\n const { operator, source } = this;\n subscriber.add(\n operator\n ? // We're dealing with a subscription in the\n // operator chain to one of our lifted operators.\n operator.call(subscriber, source)\n : source\n ? // If `source` has a value, but `operator` does not, something that\n // had intimate knowledge of our API, like our `Subject`, must have\n // set it. We're going to just call `_subscribe` directly.\n this._subscribe(subscriber)\n : // In all other cases, we're likely wrapping a user-provided initializer\n // function, so we need to catch errors and handle them appropriately.\n this._trySubscribe(subscriber)\n );\n });\n\n return subscriber;\n }\n\n /** @internal */\n protected _trySubscribe(sink: Subscriber): TeardownLogic {\n try {\n return this._subscribe(sink);\n } catch (err) {\n // We don't need to return anything in this case,\n // because it's just going to try to `add()` to a subscription\n // above.\n sink.error(err);\n }\n }\n\n /**\n * Used as a NON-CANCELLABLE means of subscribing to an observable, for use with\n * APIs that expect promises, like `async/await`. You cannot unsubscribe from this.\n *\n * **WARNING**: Only use this with observables you *know* will complete. If the source\n * observable does not complete, you will end up with a promise that is hung up, and\n * potentially all of the state of an async function hanging out in memory. To avoid\n * this situation, look into adding something like {@link timeout}, {@link take},\n * {@link takeWhile}, or {@link takeUntil} amongst others.\n *\n * #### Example\n *\n * ```ts\n * import { interval, take } from 'rxjs';\n *\n * const source$ = interval(1000).pipe(take(4));\n *\n * async function getTotal() {\n * let total = 0;\n *\n * await source$.forEach(value => {\n * total += value;\n * console.log('observable -> ' + value);\n * });\n *\n * return total;\n * }\n *\n * getTotal().then(\n * total => console.log('Total: ' + total)\n * );\n *\n * // Expected:\n * // 'observable -> 0'\n * // 'observable -> 1'\n * // 'observable -> 2'\n * // 'observable -> 3'\n * // 'Total: 6'\n * ```\n *\n * @param next a handler for each value emitted by the observable\n * @return a promise that either resolves on observable completion or\n * rejects with the handled error\n */\n forEach(next: (value: T) => void): Promise;\n\n /**\n * @param next a handler for each value emitted by the observable\n * @param promiseCtor a constructor function used to instantiate the Promise\n * @return a promise that either resolves on observable completion or\n * rejects with the handled error\n * @deprecated Passing a Promise constructor will no longer be available\n * in upcoming versions of RxJS. This is because it adds weight to the library, for very\n * little benefit. If you need this functionality, it is recommended that you either\n * polyfill Promise, or you create an adapter to convert the returned native promise\n * to whatever promise implementation you wanted. Will be removed in v8.\n */\n forEach(next: (value: T) => void, promiseCtor: PromiseConstructorLike): Promise;\n\n forEach(next: (value: T) => void, promiseCtor?: PromiseConstructorLike): Promise {\n promiseCtor = getPromiseCtor(promiseCtor);\n\n return new promiseCtor((resolve, reject) => {\n const subscriber = new SafeSubscriber({\n next: (value) => {\n try {\n next(value);\n } catch (err) {\n reject(err);\n subscriber.unsubscribe();\n }\n },\n error: reject,\n complete: resolve,\n });\n this.subscribe(subscriber);\n }) as Promise;\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): TeardownLogic {\n return this.source?.subscribe(subscriber);\n }\n\n /**\n * An interop point defined by the es7-observable spec https://github.com/zenparsing/es-observable\n * @method Symbol.observable\n * @return {Observable} this instance of the observable\n */\n [Symbol_observable]() {\n return this;\n }\n\n /* tslint:disable:max-line-length */\n pipe(): Observable;\n pipe(op1: OperatorFunction): Observable;\n pipe(op1: OperatorFunction, op2: OperatorFunction): Observable;\n pipe(op1: OperatorFunction, op2: OperatorFunction, op3: OperatorFunction): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction,\n op7: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction,\n op7: OperatorFunction,\n op8: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction,\n op7: OperatorFunction,\n op8: OperatorFunction,\n op9: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction,\n op7: OperatorFunction,\n op8: OperatorFunction,\n op9: OperatorFunction,\n ...operations: OperatorFunction[]\n ): Observable;\n /* tslint:enable:max-line-length */\n\n /**\n * Used to stitch together functional operators into a chain.\n * @method pipe\n * @return {Observable} the Observable result of all of the operators having\n * been called in the order they were passed in.\n *\n * ## Example\n *\n * ```ts\n * import { interval, filter, map, scan } from 'rxjs';\n *\n * interval(1000)\n * .pipe(\n * filter(x => x % 2 === 0),\n * map(x => x + x),\n * scan((acc, x) => acc + x)\n * )\n * .subscribe(x => console.log(x));\n * ```\n */\n pipe(...operations: OperatorFunction[]): Observable {\n return pipeFromArray(operations)(this);\n }\n\n /* tslint:disable:max-line-length */\n /** @deprecated Replaced with {@link firstValueFrom} and {@link lastValueFrom}. Will be removed in v8. Details: https://rxjs.dev/deprecations/to-promise */\n toPromise(): Promise;\n /** @deprecated Replaced with {@link firstValueFrom} and {@link lastValueFrom}. Will be removed in v8. Details: https://rxjs.dev/deprecations/to-promise */\n toPromise(PromiseCtor: typeof Promise): Promise;\n /** @deprecated Replaced with {@link firstValueFrom} and {@link lastValueFrom}. Will be removed in v8. Details: https://rxjs.dev/deprecations/to-promise */\n toPromise(PromiseCtor: PromiseConstructorLike): Promise;\n /* tslint:enable:max-line-length */\n\n /**\n * Subscribe to this Observable and get a Promise resolving on\n * `complete` with the last emission (if any).\n *\n * **WARNING**: Only use this with observables you *know* will complete. If the source\n * observable does not complete, you will end up with a promise that is hung up, and\n * potentially all of the state of an async function hanging out in memory. To avoid\n * this situation, look into adding something like {@link timeout}, {@link take},\n * {@link takeWhile}, or {@link takeUntil} amongst others.\n *\n * @method toPromise\n * @param [promiseCtor] a constructor function used to instantiate\n * the Promise\n * @return A Promise that resolves with the last value emit, or\n * rejects on an error. If there were no emissions, Promise\n * resolves with undefined.\n * @deprecated Replaced with {@link firstValueFrom} and {@link lastValueFrom}. Will be removed in v8. Details: https://rxjs.dev/deprecations/to-promise\n */\n toPromise(promiseCtor?: PromiseConstructorLike): Promise {\n promiseCtor = getPromiseCtor(promiseCtor);\n\n return new promiseCtor((resolve, reject) => {\n let value: T | undefined;\n this.subscribe(\n (x: T) => (value = x),\n (err: any) => reject(err),\n () => resolve(value)\n );\n }) as Promise;\n }\n}\n\n/**\n * Decides between a passed promise constructor from consuming code,\n * A default configured promise constructor, and the native promise\n * constructor and returns it. If nothing can be found, it will throw\n * an error.\n * @param promiseCtor The optional promise constructor to passed by consuming code\n */\nfunction getPromiseCtor(promiseCtor: PromiseConstructorLike | undefined) {\n return promiseCtor ?? config.Promise ?? Promise;\n}\n\nfunction isObserver(value: any): value is Observer {\n return value && isFunction(value.next) && isFunction(value.error) && isFunction(value.complete);\n}\n\nfunction isSubscriber(value: any): value is Subscriber {\n return (value && value instanceof Subscriber) || (isObserver(value) && isSubscription(value));\n}\n", "import { Observable } from '../Observable';\nimport { Subscriber } from '../Subscriber';\nimport { OperatorFunction } from '../types';\nimport { isFunction } from './isFunction';\n\n/**\n * Used to determine if an object is an Observable with a lift function.\n */\nexport function hasLift(source: any): source is { lift: InstanceType['lift'] } {\n return isFunction(source?.lift);\n}\n\n/**\n * Creates an `OperatorFunction`. Used to define operators throughout the library in a concise way.\n * @param init The logic to connect the liftedSource to the subscriber at the moment of subscription.\n */\nexport function operate(\n init: (liftedSource: Observable, subscriber: Subscriber) => (() => void) | void\n): OperatorFunction {\n return (source: Observable) => {\n if (hasLift(source)) {\n return source.lift(function (this: Subscriber, liftedSource: Observable) {\n try {\n return init(liftedSource, this);\n } catch (err) {\n this.error(err);\n }\n });\n }\n throw new TypeError('Unable to lift unknown Observable type');\n };\n}\n", "import { Subscriber } from '../Subscriber';\n\n/**\n * Creates an instance of an `OperatorSubscriber`.\n * @param destination The downstream subscriber.\n * @param onNext Handles next values, only called if this subscriber is not stopped or closed. Any\n * error that occurs in this function is caught and sent to the `error` method of this subscriber.\n * @param onError Handles errors from the subscription, any errors that occur in this handler are caught\n * and send to the `destination` error handler.\n * @param onComplete Handles completion notification from the subscription. Any errors that occur in\n * this handler are sent to the `destination` error handler.\n * @param onFinalize Additional teardown logic here. This will only be called on teardown if the\n * subscriber itself is not already closed. This is called after all other teardown logic is executed.\n */\nexport function createOperatorSubscriber(\n destination: Subscriber,\n onNext?: (value: T) => void,\n onComplete?: () => void,\n onError?: (err: any) => void,\n onFinalize?: () => void\n): Subscriber {\n return new OperatorSubscriber(destination, onNext, onComplete, onError, onFinalize);\n}\n\n/**\n * A generic helper for allowing operators to be created with a Subscriber and\n * use closures to capture necessary state from the operator function itself.\n */\nexport class OperatorSubscriber extends Subscriber {\n /**\n * Creates an instance of an `OperatorSubscriber`.\n * @param destination The downstream subscriber.\n * @param onNext Handles next values, only called if this subscriber is not stopped or closed. Any\n * error that occurs in this function is caught and sent to the `error` method of this subscriber.\n * @param onError Handles errors from the subscription, any errors that occur in this handler are caught\n * and send to the `destination` error handler.\n * @param onComplete Handles completion notification from the subscription. Any errors that occur in\n * this handler are sent to the `destination` error handler.\n * @param onFinalize Additional finalization logic here. This will only be called on finalization if the\n * subscriber itself is not already closed. This is called after all other finalization logic is executed.\n * @param shouldUnsubscribe An optional check to see if an unsubscribe call should truly unsubscribe.\n * NOTE: This currently **ONLY** exists to support the strange behavior of {@link groupBy}, where unsubscription\n * to the resulting observable does not actually disconnect from the source if there are active subscriptions\n * to any grouped observable. (DO NOT EXPOSE OR USE EXTERNALLY!!!)\n */\n constructor(\n destination: Subscriber,\n onNext?: (value: T) => void,\n onComplete?: () => void,\n onError?: (err: any) => void,\n private onFinalize?: () => void,\n private shouldUnsubscribe?: () => boolean\n ) {\n // It's important - for performance reasons - that all of this class's\n // members are initialized and that they are always initialized in the same\n // order. This will ensure that all OperatorSubscriber instances have the\n // same hidden class in V8. This, in turn, will help keep the number of\n // hidden classes involved in property accesses within the base class as\n // low as possible. If the number of hidden classes involved exceeds four,\n // the property accesses will become megamorphic and performance penalties\n // will be incurred - i.e. inline caches won't be used.\n //\n // The reasons for ensuring all instances have the same hidden class are\n // further discussed in this blog post from Benedikt Meurer:\n // https://benediktmeurer.de/2018/03/23/impact-of-polymorphism-on-component-based-frameworks-like-react/\n super(destination);\n this._next = onNext\n ? function (this: OperatorSubscriber, value: T) {\n try {\n onNext(value);\n } catch (err) {\n destination.error(err);\n }\n }\n : super._next;\n this._error = onError\n ? function (this: OperatorSubscriber, err: any) {\n try {\n onError(err);\n } catch (err) {\n // Send any errors that occur down stream.\n destination.error(err);\n } finally {\n // Ensure finalization.\n this.unsubscribe();\n }\n }\n : super._error;\n this._complete = onComplete\n ? function (this: OperatorSubscriber) {\n try {\n onComplete();\n } catch (err) {\n // Send any errors that occur down stream.\n destination.error(err);\n } finally {\n // Ensure finalization.\n this.unsubscribe();\n }\n }\n : super._complete;\n }\n\n unsubscribe() {\n if (!this.shouldUnsubscribe || this.shouldUnsubscribe()) {\n const { closed } = this;\n super.unsubscribe();\n // Execute additional teardown if we have any and we didn't already do so.\n !closed && this.onFinalize?.();\n }\n }\n}\n", "import { Subscription } from '../Subscription';\n\ninterface AnimationFrameProvider {\n schedule(callback: FrameRequestCallback): Subscription;\n requestAnimationFrame: typeof requestAnimationFrame;\n cancelAnimationFrame: typeof cancelAnimationFrame;\n delegate:\n | {\n requestAnimationFrame: typeof requestAnimationFrame;\n cancelAnimationFrame: typeof cancelAnimationFrame;\n }\n | undefined;\n}\n\nexport const animationFrameProvider: AnimationFrameProvider = {\n // When accessing the delegate, use the variable rather than `this` so that\n // the functions can be called without being bound to the provider.\n schedule(callback) {\n let request = requestAnimationFrame;\n let cancel: typeof cancelAnimationFrame | undefined = cancelAnimationFrame;\n const { delegate } = animationFrameProvider;\n if (delegate) {\n request = delegate.requestAnimationFrame;\n cancel = delegate.cancelAnimationFrame;\n }\n const handle = request((timestamp) => {\n // Clear the cancel function. The request has been fulfilled, so\n // attempting to cancel the request upon unsubscription would be\n // pointless.\n cancel = undefined;\n callback(timestamp);\n });\n return new Subscription(() => cancel?.(handle));\n },\n requestAnimationFrame(...args) {\n const { delegate } = animationFrameProvider;\n return (delegate?.requestAnimationFrame || requestAnimationFrame)(...args);\n },\n cancelAnimationFrame(...args) {\n const { delegate } = animationFrameProvider;\n return (delegate?.cancelAnimationFrame || cancelAnimationFrame)(...args);\n },\n delegate: undefined,\n};\n", "import { createErrorClass } from './createErrorClass';\n\nexport interface ObjectUnsubscribedError extends Error {}\n\nexport interface ObjectUnsubscribedErrorCtor {\n /**\n * @deprecated Internal implementation detail. Do not construct error instances.\n * Cannot be tagged as internal: https://github.com/ReactiveX/rxjs/issues/6269\n */\n new (): ObjectUnsubscribedError;\n}\n\n/**\n * An error thrown when an action is invalid because the object has been\n * unsubscribed.\n *\n * @see {@link Subject}\n * @see {@link BehaviorSubject}\n *\n * @class ObjectUnsubscribedError\n */\nexport const ObjectUnsubscribedError: ObjectUnsubscribedErrorCtor = createErrorClass(\n (_super) =>\n function ObjectUnsubscribedErrorImpl(this: any) {\n _super(this);\n this.name = 'ObjectUnsubscribedError';\n this.message = 'object unsubscribed';\n }\n);\n", "import { Operator } from './Operator';\nimport { Observable } from './Observable';\nimport { Subscriber } from './Subscriber';\nimport { Subscription, EMPTY_SUBSCRIPTION } from './Subscription';\nimport { Observer, SubscriptionLike, TeardownLogic } from './types';\nimport { ObjectUnsubscribedError } from './util/ObjectUnsubscribedError';\nimport { arrRemove } from './util/arrRemove';\nimport { errorContext } from './util/errorContext';\n\n/**\n * A Subject is a special type of Observable that allows values to be\n * multicasted to many Observers. Subjects are like EventEmitters.\n *\n * Every Subject is an Observable and an Observer. You can subscribe to a\n * Subject, and you can call next to feed values as well as error and complete.\n */\nexport class Subject extends Observable implements SubscriptionLike {\n closed = false;\n\n private currentObservers: Observer[] | null = null;\n\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n observers: Observer[] = [];\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n isStopped = false;\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n hasError = false;\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n thrownError: any = null;\n\n /**\n * Creates a \"subject\" by basically gluing an observer to an observable.\n *\n * @nocollapse\n * @deprecated Recommended you do not use. Will be removed at some point in the future. Plans for replacement still under discussion.\n */\n static create: (...args: any[]) => any = (destination: Observer, source: Observable): AnonymousSubject => {\n return new AnonymousSubject(destination, source);\n };\n\n constructor() {\n // NOTE: This must be here to obscure Observable's constructor.\n super();\n }\n\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n lift(operator: Operator): Observable {\n const subject = new AnonymousSubject(this, this);\n subject.operator = operator as any;\n return subject as any;\n }\n\n /** @internal */\n protected _throwIfClosed() {\n if (this.closed) {\n throw new ObjectUnsubscribedError();\n }\n }\n\n next(value: T) {\n errorContext(() => {\n this._throwIfClosed();\n if (!this.isStopped) {\n if (!this.currentObservers) {\n this.currentObservers = Array.from(this.observers);\n }\n for (const observer of this.currentObservers) {\n observer.next(value);\n }\n }\n });\n }\n\n error(err: any) {\n errorContext(() => {\n this._throwIfClosed();\n if (!this.isStopped) {\n this.hasError = this.isStopped = true;\n this.thrownError = err;\n const { observers } = this;\n while (observers.length) {\n observers.shift()!.error(err);\n }\n }\n });\n }\n\n complete() {\n errorContext(() => {\n this._throwIfClosed();\n if (!this.isStopped) {\n this.isStopped = true;\n const { observers } = this;\n while (observers.length) {\n observers.shift()!.complete();\n }\n }\n });\n }\n\n unsubscribe() {\n this.isStopped = this.closed = true;\n this.observers = this.currentObservers = null!;\n }\n\n get observed() {\n return this.observers?.length > 0;\n }\n\n /** @internal */\n protected _trySubscribe(subscriber: Subscriber): TeardownLogic {\n this._throwIfClosed();\n return super._trySubscribe(subscriber);\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): Subscription {\n this._throwIfClosed();\n this._checkFinalizedStatuses(subscriber);\n return this._innerSubscribe(subscriber);\n }\n\n /** @internal */\n protected _innerSubscribe(subscriber: Subscriber) {\n const { hasError, isStopped, observers } = this;\n if (hasError || isStopped) {\n return EMPTY_SUBSCRIPTION;\n }\n this.currentObservers = null;\n observers.push(subscriber);\n return new Subscription(() => {\n this.currentObservers = null;\n arrRemove(observers, subscriber);\n });\n }\n\n /** @internal */\n protected _checkFinalizedStatuses(subscriber: Subscriber) {\n const { hasError, thrownError, isStopped } = this;\n if (hasError) {\n subscriber.error(thrownError);\n } else if (isStopped) {\n subscriber.complete();\n }\n }\n\n /**\n * Creates a new Observable with this Subject as the source. You can do this\n * to create custom Observer-side logic of the Subject and conceal it from\n * code that uses the Observable.\n * @return {Observable} Observable that the Subject casts to\n */\n asObservable(): Observable {\n const observable: any = new Observable();\n observable.source = this;\n return observable;\n }\n}\n\n/**\n * @class AnonymousSubject\n */\nexport class AnonymousSubject extends Subject {\n constructor(\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n public destination?: Observer,\n source?: Observable\n ) {\n super();\n this.source = source;\n }\n\n next(value: T) {\n this.destination?.next?.(value);\n }\n\n error(err: any) {\n this.destination?.error?.(err);\n }\n\n complete() {\n this.destination?.complete?.();\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): Subscription {\n return this.source?.subscribe(subscriber) ?? EMPTY_SUBSCRIPTION;\n }\n}\n", "import { Subject } from './Subject';\nimport { Subscriber } from './Subscriber';\nimport { Subscription } from './Subscription';\n\n/**\n * A variant of Subject that requires an initial value and emits its current\n * value whenever it is subscribed to.\n *\n * @class BehaviorSubject\n */\nexport class BehaviorSubject extends Subject {\n constructor(private _value: T) {\n super();\n }\n\n get value(): T {\n return this.getValue();\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): Subscription {\n const subscription = super._subscribe(subscriber);\n !subscription.closed && subscriber.next(this._value);\n return subscription;\n }\n\n getValue(): T {\n const { hasError, thrownError, _value } = this;\n if (hasError) {\n throw thrownError;\n }\n this._throwIfClosed();\n return _value;\n }\n\n next(value: T): void {\n super.next((this._value = value));\n }\n}\n", "import { TimestampProvider } from '../types';\n\ninterface DateTimestampProvider extends TimestampProvider {\n delegate: TimestampProvider | undefined;\n}\n\nexport const dateTimestampProvider: DateTimestampProvider = {\n now() {\n // Use the variable rather than `this` so that the function can be called\n // without being bound to the provider.\n return (dateTimestampProvider.delegate || Date).now();\n },\n delegate: undefined,\n};\n", "import { Subject } from './Subject';\nimport { TimestampProvider } from './types';\nimport { Subscriber } from './Subscriber';\nimport { Subscription } from './Subscription';\nimport { dateTimestampProvider } from './scheduler/dateTimestampProvider';\n\n/**\n * A variant of {@link Subject} that \"replays\" old values to new subscribers by emitting them when they first subscribe.\n *\n * `ReplaySubject` has an internal buffer that will store a specified number of values that it has observed. Like `Subject`,\n * `ReplaySubject` \"observes\" values by having them passed to its `next` method. When it observes a value, it will store that\n * value for a time determined by the configuration of the `ReplaySubject`, as passed to its constructor.\n *\n * When a new subscriber subscribes to the `ReplaySubject` instance, it will synchronously emit all values in its buffer in\n * a First-In-First-Out (FIFO) manner. The `ReplaySubject` will also complete, if it has observed completion; and it will\n * error if it has observed an error.\n *\n * There are two main configuration items to be concerned with:\n *\n * 1. `bufferSize` - This will determine how many items are stored in the buffer, defaults to infinite.\n * 2. `windowTime` - The amount of time to hold a value in the buffer before removing it from the buffer.\n *\n * Both configurations may exist simultaneously. So if you would like to buffer a maximum of 3 values, as long as the values\n * are less than 2 seconds old, you could do so with a `new ReplaySubject(3, 2000)`.\n *\n * ### Differences with BehaviorSubject\n *\n * `BehaviorSubject` is similar to `new ReplaySubject(1)`, with a couple of exceptions:\n *\n * 1. `BehaviorSubject` comes \"primed\" with a single value upon construction.\n * 2. `ReplaySubject` will replay values, even after observing an error, where `BehaviorSubject` will not.\n *\n * @see {@link Subject}\n * @see {@link BehaviorSubject}\n * @see {@link shareReplay}\n */\nexport class ReplaySubject extends Subject {\n private _buffer: (T | number)[] = [];\n private _infiniteTimeWindow = true;\n\n /**\n * @param bufferSize The size of the buffer to replay on subscription\n * @param windowTime The amount of time the buffered items will stay buffered\n * @param timestampProvider An object with a `now()` method that provides the current timestamp. This is used to\n * calculate the amount of time something has been buffered.\n */\n constructor(\n private _bufferSize = Infinity,\n private _windowTime = Infinity,\n private _timestampProvider: TimestampProvider = dateTimestampProvider\n ) {\n super();\n this._infiniteTimeWindow = _windowTime === Infinity;\n this._bufferSize = Math.max(1, _bufferSize);\n this._windowTime = Math.max(1, _windowTime);\n }\n\n next(value: T): void {\n const { isStopped, _buffer, _infiniteTimeWindow, _timestampProvider, _windowTime } = this;\n if (!isStopped) {\n _buffer.push(value);\n !_infiniteTimeWindow && _buffer.push(_timestampProvider.now() + _windowTime);\n }\n this._trimBuffer();\n super.next(value);\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): Subscription {\n this._throwIfClosed();\n this._trimBuffer();\n\n const subscription = this._innerSubscribe(subscriber);\n\n const { _infiniteTimeWindow, _buffer } = this;\n // We use a copy here, so reentrant code does not mutate our array while we're\n // emitting it to a new subscriber.\n const copy = _buffer.slice();\n for (let i = 0; i < copy.length && !subscriber.closed; i += _infiniteTimeWindow ? 1 : 2) {\n subscriber.next(copy[i] as T);\n }\n\n this._checkFinalizedStatuses(subscriber);\n\n return subscription;\n }\n\n private _trimBuffer() {\n const { _bufferSize, _timestampProvider, _buffer, _infiniteTimeWindow } = this;\n // If we don't have an infinite buffer size, and we're over the length,\n // use splice to truncate the old buffer values off. Note that we have to\n // double the size for instances where we're not using an infinite time window\n // because we're storing the values and the timestamps in the same array.\n const adjustedBufferSize = (_infiniteTimeWindow ? 1 : 2) * _bufferSize;\n _bufferSize < Infinity && adjustedBufferSize < _buffer.length && _buffer.splice(0, _buffer.length - adjustedBufferSize);\n\n // Now, if we're not in an infinite time window, remove all values where the time is\n // older than what is allowed.\n if (!_infiniteTimeWindow) {\n const now = _timestampProvider.now();\n let last = 0;\n // Search the array for the first timestamp that isn't expired and\n // truncate the buffer up to that point.\n for (let i = 1; i < _buffer.length && (_buffer[i] as number) <= now; i += 2) {\n last = i;\n }\n last && _buffer.splice(0, last + 1);\n }\n }\n}\n", "import { Scheduler } from '../Scheduler';\nimport { Subscription } from '../Subscription';\nimport { SchedulerAction } from '../types';\n\n/**\n * A unit of work to be executed in a `scheduler`. An action is typically\n * created from within a {@link SchedulerLike} and an RxJS user does not need to concern\n * themselves about creating and manipulating an Action.\n *\n * ```ts\n * class Action extends Subscription {\n * new (scheduler: Scheduler, work: (state?: T) => void);\n * schedule(state?: T, delay: number = 0): Subscription;\n * }\n * ```\n *\n * @class Action\n */\nexport class Action extends Subscription {\n constructor(scheduler: Scheduler, work: (this: SchedulerAction, state?: T) => void) {\n super();\n }\n /**\n * Schedules this action on its parent {@link SchedulerLike} for execution. May be passed\n * some context object, `state`. May happen at some point in the future,\n * according to the `delay` parameter, if specified.\n * @param {T} [state] Some contextual data that the `work` function uses when\n * called by the Scheduler.\n * @param {number} [delay] Time to wait before executing the work, where the\n * time unit is implicit and defined by the Scheduler.\n * @return {void}\n */\n public schedule(state?: T, delay: number = 0): Subscription {\n return this;\n }\n}\n", "import type { TimerHandle } from './timerHandle';\ntype SetIntervalFunction = (handler: () => void, timeout?: number, ...args: any[]) => TimerHandle;\ntype ClearIntervalFunction = (handle: TimerHandle) => void;\n\ninterface IntervalProvider {\n setInterval: SetIntervalFunction;\n clearInterval: ClearIntervalFunction;\n delegate:\n | {\n setInterval: SetIntervalFunction;\n clearInterval: ClearIntervalFunction;\n }\n | undefined;\n}\n\nexport const intervalProvider: IntervalProvider = {\n // When accessing the delegate, use the variable rather than `this` so that\n // the functions can be called without being bound to the provider.\n setInterval(handler: () => void, timeout?: number, ...args) {\n const { delegate } = intervalProvider;\n if (delegate?.setInterval) {\n return delegate.setInterval(handler, timeout, ...args);\n }\n return setInterval(handler, timeout, ...args);\n },\n clearInterval(handle) {\n const { delegate } = intervalProvider;\n return (delegate?.clearInterval || clearInterval)(handle as any);\n },\n delegate: undefined,\n};\n", "import { Action } from './Action';\nimport { SchedulerAction } from '../types';\nimport { Subscription } from '../Subscription';\nimport { AsyncScheduler } from './AsyncScheduler';\nimport { intervalProvider } from './intervalProvider';\nimport { arrRemove } from '../util/arrRemove';\nimport { TimerHandle } from './timerHandle';\n\nexport class AsyncAction extends Action {\n public id: TimerHandle | undefined;\n public state?: T;\n // @ts-ignore: Property has no initializer and is not definitely assigned\n public delay: number;\n protected pending: boolean = false;\n\n constructor(protected scheduler: AsyncScheduler, protected work: (this: SchedulerAction, state?: T) => void) {\n super(scheduler, work);\n }\n\n public schedule(state?: T, delay: number = 0): Subscription {\n if (this.closed) {\n return this;\n }\n\n // Always replace the current state with the new state.\n this.state = state;\n\n const id = this.id;\n const scheduler = this.scheduler;\n\n //\n // Important implementation note:\n //\n // Actions only execute once by default, unless rescheduled from within the\n // scheduled callback. This allows us to implement single and repeat\n // actions via the same code path, without adding API surface area, as well\n // as mimic traditional recursion but across asynchronous boundaries.\n //\n // However, JS runtimes and timers distinguish between intervals achieved by\n // serial `setTimeout` calls vs. a single `setInterval` call. An interval of\n // serial `setTimeout` calls can be individually delayed, which delays\n // scheduling the next `setTimeout`, and so on. `setInterval` attempts to\n // guarantee the interval callback will be invoked more precisely to the\n // interval period, regardless of load.\n //\n // Therefore, we use `setInterval` to schedule single and repeat actions.\n // If the action reschedules itself with the same delay, the interval is not\n // canceled. If the action doesn't reschedule, or reschedules with a\n // different delay, the interval will be canceled after scheduled callback\n // execution.\n //\n if (id != null) {\n this.id = this.recycleAsyncId(scheduler, id, delay);\n }\n\n // Set the pending flag indicating that this action has been scheduled, or\n // has recursively rescheduled itself.\n this.pending = true;\n\n this.delay = delay;\n // If this action has already an async Id, don't request a new one.\n this.id = this.id ?? this.requestAsyncId(scheduler, this.id, delay);\n\n return this;\n }\n\n protected requestAsyncId(scheduler: AsyncScheduler, _id?: TimerHandle, delay: number = 0): TimerHandle {\n return intervalProvider.setInterval(scheduler.flush.bind(scheduler, this), delay);\n }\n\n protected recycleAsyncId(_scheduler: AsyncScheduler, id?: TimerHandle, delay: number | null = 0): TimerHandle | undefined {\n // If this action is rescheduled with the same delay time, don't clear the interval id.\n if (delay != null && this.delay === delay && this.pending === false) {\n return id;\n }\n // Otherwise, if the action's delay time is different from the current delay,\n // or the action has been rescheduled before it's executed, clear the interval id\n if (id != null) {\n intervalProvider.clearInterval(id);\n }\n\n return undefined;\n }\n\n /**\n * Immediately executes this action and the `work` it contains.\n * @return {any}\n */\n public execute(state: T, delay: number): any {\n if (this.closed) {\n return new Error('executing a cancelled action');\n }\n\n this.pending = false;\n const error = this._execute(state, delay);\n if (error) {\n return error;\n } else if (this.pending === false && this.id != null) {\n // Dequeue if the action didn't reschedule itself. Don't call\n // unsubscribe(), because the action could reschedule later.\n // For example:\n // ```\n // scheduler.schedule(function doWork(counter) {\n // /* ... I'm a busy worker bee ... */\n // var originalAction = this;\n // /* wait 100ms before rescheduling the action */\n // setTimeout(function () {\n // originalAction.schedule(counter + 1);\n // }, 100);\n // }, 1000);\n // ```\n this.id = this.recycleAsyncId(this.scheduler, this.id, null);\n }\n }\n\n protected _execute(state: T, _delay: number): any {\n let errored: boolean = false;\n let errorValue: any;\n try {\n this.work(state);\n } catch (e) {\n errored = true;\n // HACK: Since code elsewhere is relying on the \"truthiness\" of the\n // return here, we can't have it return \"\" or 0 or false.\n // TODO: Clean this up when we refactor schedulers mid-version-8 or so.\n errorValue = e ? e : new Error('Scheduled action threw falsy error');\n }\n if (errored) {\n this.unsubscribe();\n return errorValue;\n }\n }\n\n unsubscribe() {\n if (!this.closed) {\n const { id, scheduler } = this;\n const { actions } = scheduler;\n\n this.work = this.state = this.scheduler = null!;\n this.pending = false;\n\n arrRemove(actions, this);\n if (id != null) {\n this.id = this.recycleAsyncId(scheduler, id, null);\n }\n\n this.delay = null!;\n super.unsubscribe();\n }\n }\n}\n", "import { Action } from './scheduler/Action';\nimport { Subscription } from './Subscription';\nimport { SchedulerLike, SchedulerAction } from './types';\nimport { dateTimestampProvider } from './scheduler/dateTimestampProvider';\n\n/**\n * An execution context and a data structure to order tasks and schedule their\n * execution. Provides a notion of (potentially virtual) time, through the\n * `now()` getter method.\n *\n * Each unit of work in a Scheduler is called an `Action`.\n *\n * ```ts\n * class Scheduler {\n * now(): number;\n * schedule(work, delay?, state?): Subscription;\n * }\n * ```\n *\n * @class Scheduler\n * @deprecated Scheduler is an internal implementation detail of RxJS, and\n * should not be used directly. Rather, create your own class and implement\n * {@link SchedulerLike}. Will be made internal in v8.\n */\nexport class Scheduler implements SchedulerLike {\n public static now: () => number = dateTimestampProvider.now;\n\n constructor(private schedulerActionCtor: typeof Action, now: () => number = Scheduler.now) {\n this.now = now;\n }\n\n /**\n * A getter method that returns a number representing the current time\n * (at the time this function was called) according to the scheduler's own\n * internal clock.\n * @return {number} A number that represents the current time. May or may not\n * have a relation to wall-clock time. May or may not refer to a time unit\n * (e.g. milliseconds).\n */\n public now: () => number;\n\n /**\n * Schedules a function, `work`, for execution. May happen at some point in\n * the future, according to the `delay` parameter, if specified. May be passed\n * some context object, `state`, which will be passed to the `work` function.\n *\n * The given arguments will be processed an stored as an Action object in a\n * queue of actions.\n *\n * @param {function(state: ?T): ?Subscription} work A function representing a\n * task, or some unit of work to be executed by the Scheduler.\n * @param {number} [delay] Time to wait before executing the work, where the\n * time unit is implicit and defined by the Scheduler itself.\n * @param {T} [state] Some contextual data that the `work` function uses when\n * called by the Scheduler.\n * @return {Subscription} A subscription in order to be able to unsubscribe\n * the scheduled work.\n */\n public schedule(work: (this: SchedulerAction, state?: T) => void, delay: number = 0, state?: T): Subscription {\n return new this.schedulerActionCtor(this, work).schedule(state, delay);\n }\n}\n", "import { Scheduler } from '../Scheduler';\nimport { Action } from './Action';\nimport { AsyncAction } from './AsyncAction';\nimport { TimerHandle } from './timerHandle';\n\nexport class AsyncScheduler extends Scheduler {\n public actions: Array> = [];\n /**\n * A flag to indicate whether the Scheduler is currently executing a batch of\n * queued actions.\n * @type {boolean}\n * @internal\n */\n public _active: boolean = false;\n /**\n * An internal ID used to track the latest asynchronous task such as those\n * coming from `setTimeout`, `setInterval`, `requestAnimationFrame`, and\n * others.\n * @type {any}\n * @internal\n */\n public _scheduled: TimerHandle | undefined;\n\n constructor(SchedulerAction: typeof Action, now: () => number = Scheduler.now) {\n super(SchedulerAction, now);\n }\n\n public flush(action: AsyncAction): void {\n const { actions } = this;\n\n if (this._active) {\n actions.push(action);\n return;\n }\n\n let error: any;\n this._active = true;\n\n do {\n if ((error = action.execute(action.state, action.delay))) {\n break;\n }\n } while ((action = actions.shift()!)); // exhaust the scheduler queue\n\n this._active = false;\n\n if (error) {\n while ((action = actions.shift()!)) {\n action.unsubscribe();\n }\n throw error;\n }\n }\n}\n", "import { AsyncAction } from './AsyncAction';\nimport { AsyncScheduler } from './AsyncScheduler';\n\n/**\n *\n * Async Scheduler\n *\n * Schedule task as if you used setTimeout(task, duration)\n *\n * `async` scheduler schedules tasks asynchronously, by putting them on the JavaScript\n * event loop queue. It is best used to delay tasks in time or to schedule tasks repeating\n * in intervals.\n *\n * If you just want to \"defer\" task, that is to perform it right after currently\n * executing synchronous code ends (commonly achieved by `setTimeout(deferredTask, 0)`),\n * better choice will be the {@link asapScheduler} scheduler.\n *\n * ## Examples\n * Use async scheduler to delay task\n * ```ts\n * import { asyncScheduler } from 'rxjs';\n *\n * const task = () => console.log('it works!');\n *\n * asyncScheduler.schedule(task, 2000);\n *\n * // After 2 seconds logs:\n * // \"it works!\"\n * ```\n *\n * Use async scheduler to repeat task in intervals\n * ```ts\n * import { asyncScheduler } from 'rxjs';\n *\n * function task(state) {\n * console.log(state);\n * this.schedule(state + 1, 1000); // `this` references currently executing Action,\n * // which we reschedule with new state and delay\n * }\n *\n * asyncScheduler.schedule(task, 3000, 0);\n *\n * // Logs:\n * // 0 after 3s\n * // 1 after 4s\n * // 2 after 5s\n * // 3 after 6s\n * ```\n */\n\nexport const asyncScheduler = new AsyncScheduler(AsyncAction);\n\n/**\n * @deprecated Renamed to {@link asyncScheduler}. Will be removed in v8.\n */\nexport const async = asyncScheduler;\n", "import { AsyncAction } from './AsyncAction';\nimport { Subscription } from '../Subscription';\nimport { QueueScheduler } from './QueueScheduler';\nimport { SchedulerAction } from '../types';\nimport { TimerHandle } from './timerHandle';\n\nexport class QueueAction extends AsyncAction {\n constructor(protected scheduler: QueueScheduler, protected work: (this: SchedulerAction, state?: T) => void) {\n super(scheduler, work);\n }\n\n public schedule(state?: T, delay: number = 0): Subscription {\n if (delay > 0) {\n return super.schedule(state, delay);\n }\n this.delay = delay;\n this.state = state;\n this.scheduler.flush(this);\n return this;\n }\n\n public execute(state: T, delay: number): any {\n return delay > 0 || this.closed ? super.execute(state, delay) : this._execute(state, delay);\n }\n\n protected requestAsyncId(scheduler: QueueScheduler, id?: TimerHandle, delay: number = 0): TimerHandle {\n // If delay exists and is greater than 0, or if the delay is null (the\n // action wasn't rescheduled) but was originally scheduled as an async\n // action, then recycle as an async action.\n\n if ((delay != null && delay > 0) || (delay == null && this.delay > 0)) {\n return super.requestAsyncId(scheduler, id, delay);\n }\n\n // Otherwise flush the scheduler starting with this action.\n scheduler.flush(this);\n\n // HACK: In the past, this was returning `void`. However, `void` isn't a valid\n // `TimerHandle`, and generally the return value here isn't really used. So the\n // compromise is to return `0` which is both \"falsy\" and a valid `TimerHandle`,\n // as opposed to refactoring every other instanceo of `requestAsyncId`.\n return 0;\n }\n}\n", "import { AsyncScheduler } from './AsyncScheduler';\n\nexport class QueueScheduler extends AsyncScheduler {\n}\n", "import { QueueAction } from './QueueAction';\nimport { QueueScheduler } from './QueueScheduler';\n\n/**\n *\n * Queue Scheduler\n *\n * Put every next task on a queue, instead of executing it immediately\n *\n * `queue` scheduler, when used with delay, behaves the same as {@link asyncScheduler} scheduler.\n *\n * When used without delay, it schedules given task synchronously - executes it right when\n * it is scheduled. However when called recursively, that is when inside the scheduled task,\n * another task is scheduled with queue scheduler, instead of executing immediately as well,\n * that task will be put on a queue and wait for current one to finish.\n *\n * This means that when you execute task with `queue` scheduler, you are sure it will end\n * before any other task scheduled with that scheduler will start.\n *\n * ## Examples\n * Schedule recursively first, then do something\n * ```ts\n * import { queueScheduler } from 'rxjs';\n *\n * queueScheduler.schedule(() => {\n * queueScheduler.schedule(() => console.log('second')); // will not happen now, but will be put on a queue\n *\n * console.log('first');\n * });\n *\n * // Logs:\n * // \"first\"\n * // \"second\"\n * ```\n *\n * Reschedule itself recursively\n * ```ts\n * import { queueScheduler } from 'rxjs';\n *\n * queueScheduler.schedule(function(state) {\n * if (state !== 0) {\n * console.log('before', state);\n * this.schedule(state - 1); // `this` references currently executing Action,\n * // which we reschedule with new state\n * console.log('after', state);\n * }\n * }, 0, 3);\n *\n * // In scheduler that runs recursively, you would expect:\n * // \"before\", 3\n * // \"before\", 2\n * // \"before\", 1\n * // \"after\", 1\n * // \"after\", 2\n * // \"after\", 3\n *\n * // But with queue it logs:\n * // \"before\", 3\n * // \"after\", 3\n * // \"before\", 2\n * // \"after\", 2\n * // \"before\", 1\n * // \"after\", 1\n * ```\n */\n\nexport const queueScheduler = new QueueScheduler(QueueAction);\n\n/**\n * @deprecated Renamed to {@link queueScheduler}. Will be removed in v8.\n */\nexport const queue = queueScheduler;\n", "import { AsyncAction } from './AsyncAction';\nimport { AnimationFrameScheduler } from './AnimationFrameScheduler';\nimport { SchedulerAction } from '../types';\nimport { animationFrameProvider } from './animationFrameProvider';\nimport { TimerHandle } from './timerHandle';\n\nexport class AnimationFrameAction extends AsyncAction {\n constructor(protected scheduler: AnimationFrameScheduler, protected work: (this: SchedulerAction, state?: T) => void) {\n super(scheduler, work);\n }\n\n protected requestAsyncId(scheduler: AnimationFrameScheduler, id?: TimerHandle, delay: number = 0): TimerHandle {\n // If delay is greater than 0, request as an async action.\n if (delay !== null && delay > 0) {\n return super.requestAsyncId(scheduler, id, delay);\n }\n // Push the action to the end of the scheduler queue.\n scheduler.actions.push(this);\n // If an animation frame has already been requested, don't request another\n // one. If an animation frame hasn't been requested yet, request one. Return\n // the current animation frame request id.\n return scheduler._scheduled || (scheduler._scheduled = animationFrameProvider.requestAnimationFrame(() => scheduler.flush(undefined)));\n }\n\n protected recycleAsyncId(scheduler: AnimationFrameScheduler, id?: TimerHandle, delay: number = 0): TimerHandle | undefined {\n // If delay exists and is greater than 0, or if the delay is null (the\n // action wasn't rescheduled) but was originally scheduled as an async\n // action, then recycle as an async action.\n if (delay != null ? delay > 0 : this.delay > 0) {\n return super.recycleAsyncId(scheduler, id, delay);\n }\n // If the scheduler queue has no remaining actions with the same async id,\n // cancel the requested animation frame and set the scheduled flag to\n // undefined so the next AnimationFrameAction will request its own.\n const { actions } = scheduler;\n if (id != null && actions[actions.length - 1]?.id !== id) {\n animationFrameProvider.cancelAnimationFrame(id as number);\n scheduler._scheduled = undefined;\n }\n // Return undefined so the action knows to request a new async id if it's rescheduled.\n return undefined;\n }\n}\n", "import { AsyncAction } from './AsyncAction';\nimport { AsyncScheduler } from './AsyncScheduler';\n\nexport class AnimationFrameScheduler extends AsyncScheduler {\n public flush(action?: AsyncAction): void {\n this._active = true;\n // The async id that effects a call to flush is stored in _scheduled.\n // Before executing an action, it's necessary to check the action's async\n // id to determine whether it's supposed to be executed in the current\n // flush.\n // Previous implementations of this method used a count to determine this,\n // but that was unsound, as actions that are unsubscribed - i.e. cancelled -\n // are removed from the actions array and that can shift actions that are\n // scheduled to be executed in a subsequent flush into positions at which\n // they are executed within the current flush.\n const flushId = this._scheduled;\n this._scheduled = undefined;\n\n const { actions } = this;\n let error: any;\n action = action || actions.shift()!;\n\n do {\n if ((error = action.execute(action.state, action.delay))) {\n break;\n }\n } while ((action = actions[0]) && action.id === flushId && actions.shift());\n\n this._active = false;\n\n if (error) {\n while ((action = actions[0]) && action.id === flushId && actions.shift()) {\n action.unsubscribe();\n }\n throw error;\n }\n }\n}\n", "import { AnimationFrameAction } from './AnimationFrameAction';\nimport { AnimationFrameScheduler } from './AnimationFrameScheduler';\n\n/**\n *\n * Animation Frame Scheduler\n *\n * Perform task when `window.requestAnimationFrame` would fire\n *\n * When `animationFrame` scheduler is used with delay, it will fall back to {@link asyncScheduler} scheduler\n * behaviour.\n *\n * Without delay, `animationFrame` scheduler can be used to create smooth browser animations.\n * It makes sure scheduled task will happen just before next browser content repaint,\n * thus performing animations as efficiently as possible.\n *\n * ## Example\n * Schedule div height animation\n * ```ts\n * // html:
\n * import { animationFrameScheduler } from 'rxjs';\n *\n * const div = document.querySelector('div');\n *\n * animationFrameScheduler.schedule(function(height) {\n * div.style.height = height + \"px\";\n *\n * this.schedule(height + 1); // `this` references currently executing Action,\n * // which we reschedule with new state\n * }, 0, 0);\n *\n * // You will see a div element growing in height\n * ```\n */\n\nexport const animationFrameScheduler = new AnimationFrameScheduler(AnimationFrameAction);\n\n/**\n * @deprecated Renamed to {@link animationFrameScheduler}. Will be removed in v8.\n */\nexport const animationFrame = animationFrameScheduler;\n", "import { Observable } from '../Observable';\nimport { SchedulerLike } from '../types';\n\n/**\n * A simple Observable that emits no items to the Observer and immediately\n * emits a complete notification.\n *\n * Just emits 'complete', and nothing else.\n *\n * ![](empty.png)\n *\n * A simple Observable that only emits the complete notification. It can be used\n * for composing with other Observables, such as in a {@link mergeMap}.\n *\n * ## Examples\n *\n * Log complete notification\n *\n * ```ts\n * import { EMPTY } from 'rxjs';\n *\n * EMPTY.subscribe({\n * next: () => console.log('Next'),\n * complete: () => console.log('Complete!')\n * });\n *\n * // Outputs\n * // Complete!\n * ```\n *\n * Emit the number 7, then complete\n *\n * ```ts\n * import { EMPTY, startWith } from 'rxjs';\n *\n * const result = EMPTY.pipe(startWith(7));\n * result.subscribe(x => console.log(x));\n *\n * // Outputs\n * // 7\n * ```\n *\n * Map and flatten only odd numbers to the sequence `'a'`, `'b'`, `'c'`\n *\n * ```ts\n * import { interval, mergeMap, of, EMPTY } from 'rxjs';\n *\n * const interval$ = interval(1000);\n * const result = interval$.pipe(\n * mergeMap(x => x % 2 === 1 ? of('a', 'b', 'c') : EMPTY),\n * );\n * result.subscribe(x => console.log(x));\n *\n * // Results in the following to the console:\n * // x is equal to the count on the interval, e.g. (0, 1, 2, 3, ...)\n * // x will occur every 1000ms\n * // if x % 2 is equal to 1, print a, b, c (each on its own)\n * // if x % 2 is not equal to 1, nothing will be output\n * ```\n *\n * @see {@link Observable}\n * @see {@link NEVER}\n * @see {@link of}\n * @see {@link throwError}\n */\nexport const EMPTY = new Observable((subscriber) => subscriber.complete());\n\n/**\n * @param scheduler A {@link SchedulerLike} to use for scheduling\n * the emission of the complete notification.\n * @deprecated Replaced with the {@link EMPTY} constant or {@link scheduled} (e.g. `scheduled([], scheduler)`). Will be removed in v8.\n */\nexport function empty(scheduler?: SchedulerLike) {\n return scheduler ? emptyScheduled(scheduler) : EMPTY;\n}\n\nfunction emptyScheduled(scheduler: SchedulerLike) {\n return new Observable((subscriber) => scheduler.schedule(() => subscriber.complete()));\n}\n", "import { SchedulerLike } from '../types';\nimport { isFunction } from './isFunction';\n\nexport function isScheduler(value: any): value is SchedulerLike {\n return value && isFunction(value.schedule);\n}\n", "import { SchedulerLike } from '../types';\nimport { isFunction } from './isFunction';\nimport { isScheduler } from './isScheduler';\n\nfunction last(arr: T[]): T | undefined {\n return arr[arr.length - 1];\n}\n\nexport function popResultSelector(args: any[]): ((...args: unknown[]) => unknown) | undefined {\n return isFunction(last(args)) ? args.pop() : undefined;\n}\n\nexport function popScheduler(args: any[]): SchedulerLike | undefined {\n return isScheduler(last(args)) ? args.pop() : undefined;\n}\n\nexport function popNumber(args: any[], defaultValue: number): number {\n return typeof last(args) === 'number' ? args.pop()! : defaultValue;\n}\n", "export const isArrayLike = ((x: any): x is ArrayLike => x && typeof x.length === 'number' && typeof x !== 'function');", "import { isFunction } from \"./isFunction\";\n\n/**\n * Tests to see if the object is \"thennable\".\n * @param value the object to test\n */\nexport function isPromise(value: any): value is PromiseLike {\n return isFunction(value?.then);\n}\n", "import { InteropObservable } from '../types';\nimport { observable as Symbol_observable } from '../symbol/observable';\nimport { isFunction } from './isFunction';\n\n/** Identifies an input as being Observable (but not necessary an Rx Observable) */\nexport function isInteropObservable(input: any): input is InteropObservable {\n return isFunction(input[Symbol_observable]);\n}\n", "import { isFunction } from './isFunction';\n\nexport function isAsyncIterable(obj: any): obj is AsyncIterable {\n return Symbol.asyncIterator && isFunction(obj?.[Symbol.asyncIterator]);\n}\n", "/**\n * Creates the TypeError to throw if an invalid object is passed to `from` or `scheduled`.\n * @param input The object that was passed.\n */\nexport function createInvalidObservableTypeError(input: any) {\n // TODO: We should create error codes that can be looked up, so this can be less verbose.\n return new TypeError(\n `You provided ${\n input !== null && typeof input === 'object' ? 'an invalid object' : `'${input}'`\n } where a stream was expected. You can provide an Observable, Promise, ReadableStream, Array, AsyncIterable, or Iterable.`\n );\n}\n", "export function getSymbolIterator(): symbol {\n if (typeof Symbol !== 'function' || !Symbol.iterator) {\n return '@@iterator' as any;\n }\n\n return Symbol.iterator;\n}\n\nexport const iterator = getSymbolIterator();\n", "import { iterator as Symbol_iterator } from '../symbol/iterator';\nimport { isFunction } from './isFunction';\n\n/** Identifies an input as being an Iterable */\nexport function isIterable(input: any): input is Iterable {\n return isFunction(input?.[Symbol_iterator]);\n}\n", "import { ReadableStreamLike } from '../types';\nimport { isFunction } from './isFunction';\n\nexport async function* readableStreamLikeToAsyncGenerator(readableStream: ReadableStreamLike): AsyncGenerator {\n const reader = readableStream.getReader();\n try {\n while (true) {\n const { value, done } = await reader.read();\n if (done) {\n return;\n }\n yield value!;\n }\n } finally {\n reader.releaseLock();\n }\n}\n\nexport function isReadableStreamLike(obj: any): obj is ReadableStreamLike {\n // We don't want to use instanceof checks because they would return\n // false for instances from another Realm, like an + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/examples/sweeps/index.html b/examples/sweeps/index.html new file mode 100644 index 00000000..02f2df52 --- /dev/null +++ b/examples/sweeps/index.html @@ -0,0 +1,2677 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + Running sweeps - Research Project Template (preview) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + +
+
+ + + + +

Hyper-Parameter Optimization#

+
+

Work-in-progress!

+

Please note that this is very much a work in progress!

+
+

This is a small example +Hydra and submitit make it very easy to launch lots of jobs on SLURM clusters.

+

hyper-parameter optimization (HPO)

+

Hyper-Parameter Optimization with the Orion Hydra Sweeper#

+

Here is a configuration file that you can use to launch a hyper-parameter optimization (HPO) sweep

+
+Click to show the yaml config file +
# @package _global_
+defaults:
+  - example.yaml # A configuration for a single run (that works!)
+  - override /hydra/sweeper: orion # Select the orion sweeper plugin
+
+log_level: DEBUG
+name: "local-sweep-example"
+seed: 123
+
+algorithm:
+  optimizer:
+    # This here will get overwritten by the sweeper.
+    lr: 0.002
+
+trainer:
+  accelerator: auto
+  devices: 1
+  max_epochs: 1
+  logger:
+    wandb:
+      _target_: lightning.pytorch.loggers.wandb.WandbLogger
+      project: "ResearchTemplate"
+      # TODO: Use the Orion trial name?
+      # name: ${oc.env:SLURM_JOB_ID}_${oc.env:SLURM_ARRAY_TASK_ID,0}_${oc.env:SLURM_PROCID}
+      save_dir: "${hydra:runtime.output_dir}"
+      offline: False # set True to store all logs only locally
+      # id: ${oc.env:SLURM_JOB_ID}_${oc.env:SLURM_ARRAY_TASK_ID,0}_${oc.env:SLURM_PROCID} # pass correct id to resume experiment!
+      # entity: ""  # set to name of your wandb team
+      log_model: False
+      prefix: ""
+      job_type: "train"
+      group: ["${name}"]
+      tags: ["${name}"]
+
+hydra:
+  mode: MULTIRUN
+  run:
+    # output directory, generated dynamically on each run
+    dir: logs/${name}/runs
+  sweep:
+    dir: logs/${name}/multiruns/
+    # subdir: ${hydra.job.num}
+    subdir: ${hydra.job.id}/task${hydra.job.num}
+
+  sweeper:
+    params:
+      algorithm:
+        optimizer:
+          lr: "loguniform(1e-6, 1.0, default_value=3e-4)"
+          # weight_decay: "loguniform(1e-6, 1e-2, default_value=0)"
+
+    experiment:
+      name: "${name}"
+      version: 1
+
+    algorithm:
+      type: tpe
+      config:
+        seed: 1
+
+    worker:
+      n_workers: 1
+      max_broken: 10000
+      max_trials: 10
+
+    storage:
+      type: legacy
+      use_hydra_path: false
+      database:
+        type: pickleddb
+        host: "logs/${name}/multiruns/database.pkl"
+    parametrization: null
+
+
+

You can use it like so:

+
python project/main.py experiment=local_sweep_example
+
+

Hyper-Parameter Optimization on a SLURM cluster#

+
+Click to show the yaml config file +
# @package _global_
+
+# This is an "experiment" config, that groups together other configs into a ready-to-run example.
+
+defaults:
+  - example.yaml # A configuration for a single run (that works!)
+  - override /trainer/logger: wandb
+  - override /hydra/sweeper: orion
+  - override /resources: gpu
+  - override /cluster: ??? # use `current` if you are already on a cluster, otherwise use one of the `cluster` configs.
+
+log_level: DEBUG
+name: "sweep-example"
+
+# Set the seed to be the SLURM_PROCID, so that if we run more than one task per GPU, we get
+# TODO: This should technically be something like the "run_id", which would be different than SLURM_PROCID when using >1 gpus per "run".
+seed: ${oc.env:SLURM_PROCID,123}
+
+algorithm:
+  optimizer:
+    # This here will get overwritten by the sweeper.
+    lr: 0.002
+
+trainer:
+  accelerator: gpu
+  devices: 1
+  max_epochs: 1
+  logger:
+    wandb:
+      project: "ResearchTemplate"
+      # TODO: Use the Orion trial name?
+      name: ${oc.env:SLURM_JOB_ID}_${oc.env:SLURM_ARRAY_TASK_ID,0}_${oc.env:SLURM_PROCID}
+      save_dir: "${hydra:runtime.output_dir}"
+      offline: False # set True to store all logs only locally
+      id: ${oc.env:SLURM_JOB_ID}_${oc.env:SLURM_ARRAY_TASK_ID,0}_${oc.env:SLURM_PROCID} # pass correct id to resume experiment!
+      # entity: ""  # set to name of your wandb team
+      log_model: False
+      prefix: ""
+      job_type: "train"
+      group: ${oc.env:SLURM_JOB_ID}
+      # tags: ["${name}"]
+
+hydra:
+  mode: MULTIRUN
+  # TODO: Make it so running the same command twice in the same job id resumes from the last checkpoint.
+  run:
+    # output directory, generated dynamically on each run
+    dir: logs/${name}/runs
+  sweep:
+    dir: logs/${name}/multiruns/
+    # subdir: ${hydra.job.num}
+    subdir: ${hydra.job.id}/task${oc.env:SLURM_PROCID,0}
+
+  launcher:
+    # todo: bump this up.
+    array_parallelism: 5 # max num of jobs to run in parallel
+    additional_parameters:
+      time: 0-00:10:00 # maximum wall time allocated for the job (D-HH:MM:SS)
+      # TODO: Pack more than one job on a single GPU, and support this with both a
+      # patched submitit launcher as well as our remote submitit launcher, as well as by patching the
+      # orion sweeper to not drop these other results.
+      # ntasks_per_gpu: 1
+  sweeper:
+    params:
+      algorithm:
+        optimizer:
+          lr: "loguniform(1e-6, 1.0, default_value=3e-4)"
+          # weight_decay: "loguniform(1e-6, 1e-2, default_value=0)"
+      # todo: setup a fidelity parameter. Seems to not be working right now.
+      # trainer:
+      #   # Let the HPO algorithm allocate more epochs to more promising HP configurations.
+      #   max_epochs: "fidelity(1, 10, default_value=1)"
+
+    parametrization: null
+    experiment:
+      name: "${name}"
+      version: 1
+
+    algorithm:
+      #  BUG: Getting a weird bug with TPE: KeyError in `dum_below_trials = [...]` at line 397.
+      type: tpe
+      config:
+        seed: 1
+
+    worker:
+      n_workers: ${hydra.launcher.array_parallelism}
+      max_broken: 10000
+      max_trials: 10
+
+    storage:
+      type: legacy
+      use_hydra_path: false
+      database:
+        type: pickleddb
+        host: "logs/${name}/multiruns/database.pkl"
+
+
+

Here's how you can easily launch a sweep remotely on the Mila cluster. +If you are already on a slurm cluster, use the "cluster=current" config.

+
python project/main.py experiment=cluster_sweep_example cluster=mila
+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/examples/text_classification/index.html b/examples/text_classification/index.html new file mode 100644 index 00000000..c121a513 --- /dev/null +++ b/examples/text_classification/index.html @@ -0,0 +1,2703 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + Text Classification (🤗+⚡) - Research Project Template (preview) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + +
+
+ + + + +

Text Classification (⚡ + 🤗)#

+

Overview#

+

The TextClassifier is a LightningModule for a simple text classification task.

+

It accepts a TextClassificationDataModule as input, along with a network.

+
+Click to show the code of the lightningmodule +
class TextClassifier(LightningModule):
+    """Example of a lightning module used to train a huggingface model for text classification."""
+
+    def __init__(
+        self,
+        datamodule: TextClassificationDataModule,
+        network: HydraConfigFor[PreTrainedModel],
+        hf_metric_name: str,
+        learning_rate: float = 2e-5,
+        adam_epsilon: float = 1e-8,
+        warmup_steps: int = 0,
+        weight_decay: float = 0.0,
+        init_seed: int = 42,
+    ):
+        super().__init__()
+        self.network_config = network
+        self.num_labels = datamodule.num_classes
+        self.task_name = datamodule.task_name
+        self.init_seed = init_seed
+        self.hf_metric_name = hf_metric_name
+        self.learning_rate = learning_rate
+        self.adam_epsilon = adam_epsilon
+        self.warmup_steps = warmup_steps
+        self.weight_decay = weight_decay
+
+        self.metric = evaluate.load(
+            self.hf_metric_name,
+            self.task_name,
+            # todo: replace with hydra job id perhaps?
+            experiment_id=datetime.now().strftime("%d-%m-%Y_%H-%M-%S"),
+        )
+
+        self.save_hyperparameters(ignore=["datamodule"])
+
+    def configure_model(self) -> None:
+        with torch.random.fork_rng(devices=[self.device]):
+            # deterministic weight initialization
+            torch.manual_seed(self.init_seed)
+            self.network = hydra_zen.instantiate(self.network_config)
+
+        return super().configure_model()
+
+    def forward(self, inputs: dict[str, torch.Tensor]) -> BaseModelOutput:
+        return self.network(**inputs)
+
+    def shared_step(self, batch: dict[str, torch.Tensor], batch_idx: int, stage: str):
+        outputs: CausalLMOutput | SequenceClassifierOutput = self(batch)
+        loss = outputs.loss
+        assert isinstance(loss, torch.Tensor), loss
+        # todo: log the output of the metric.
+        self.log(f"{stage}/loss", loss, prog_bar=True)
+        if isinstance(outputs, SequenceClassifierOutput):
+            metric_value = self.metric.compute(
+                # logits=outputs.logits,
+                predictions=outputs.logits.argmax(-1),
+                references=batch["labels"],
+            )
+            assert isinstance(metric_value, dict)
+            for k, v in metric_value.items():
+                self.log(
+                    f"{stage}/{k}",
+                    v,
+                    prog_bar=True,
+                )
+        return loss
+
+    def training_step(self, batch: dict[str, torch.Tensor], batch_idx: int):
+        return self.shared_step(batch, batch_idx, "train")
+
+    def validation_step(
+        self, batch: dict[str, torch.Tensor], batch_idx: int, dataloader_idx: int = 0
+    ):
+        return self.shared_step(batch, batch_idx, "val")
+
+    def configure_optimizers(self):
+        """Prepare optimizer and schedule (linear warmup and decay)"""
+        model = self.network
+        no_decay = ["bias", "LayerNorm.weight"]
+        optimizer_grouped_parameters = [
+            {
+                "params": [
+                    p
+                    for n, p in model.named_parameters()
+                    if not any(nd_param in n for nd_param in no_decay)
+                ],
+                "weight_decay": self.weight_decay,
+            },
+            {
+                "params": [
+                    p
+                    for n, p in model.named_parameters()
+                    if any(nd_param in n for nd_param in no_decay)
+                ],
+                "weight_decay": 0.0,
+            },
+        ]
+        optimizer = AdamW(
+            optimizer_grouped_parameters,
+            lr=self.learning_rate,
+            eps=self.adam_epsilon,
+        )
+
+        scheduler = get_linear_schedule_with_warmup(
+            optimizer,
+            num_warmup_steps=self.warmup_steps,
+            num_training_steps=self.trainer.estimated_stepping_batches,
+        )
+        scheduler = {"scheduler": scheduler, "interval": "step", "frequency": 1}
+        return [optimizer], [scheduler]
+
+
+

Config files#

+

Algorithm config#

+
+Click to show the Algorithm config +

Source: project/configs/algorithm/text_classifier.yaml

+
# Config for the Text classification example algorithm
+_target_: project.algorithms.text_classifier.TextClassifier
+_recursive_: false
+network:
+  _target_: transformers.models.auto.modeling_auto.AutoModelForSequenceClassification.from_pretrained
+  pretrained_model_name_or_path: albert-base-v2
+
+# NOTE: Why _partial_? Because the config doesn't create the algo directly, it creates a function
+# that will accept the datamodule and network and return the algo.
+_partial_: true
+hf_metric_name: glue
+
+
+

Datamodule config#

+
+Click to show the Datamodule config +

Source: project/configs/datamodule/glue_cola.yaml

+
_target_: project.datamodules.text.TextClassificationDataModule
+data_dir: ${oc.env:SCRATCH,.}/data
+hf_dataset_path: glue
+task_name: cola
+text_fields:
+  - "sentence"
+tokenizer:
+  _target_: transformers.models.auto.tokenization_auto.AutoTokenizer.from_pretrained
+  use_fast: true
+  # Note: We could interpolate this value with `${/algorithm/network/pretrained_model_name_or_path}`
+  # to avoid duplicating a value, but this also makes it harder to use this by itself or with
+  # another algorithm.
+  pretrained_model_name_or_path: albert-base-v2
+  cache_dir: ${..data_dir}
+  trust_remote_code: true
+num_classes: 2
+max_seq_length: 128
+train_batch_size: 32
+eval_batch_size: 32
+
+
+

Running the example#

+

Here is a configuration file that you can use to launch a simple experiment:

+
+Click to show the yaml config file +

Source: project/configs/experiment/text_classification_example.yaml

+
# @package _global_
+defaults:
+  - override /algorithm: text_classifier
+  - override /datamodule: glue_cola
+  - override /trainer/callbacks: none
+
+trainer:
+  min_epochs: 1
+  max_epochs: 2
+  limit_train_batches: 2
+  limit_val_batches: 1
+  num_sanity_val_steps: 0
+  enable_checkpointing: False
+
+
+

You can use it like so:

+
python project/main.py experiment=text_classification_example
+
+ + + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/extra.css b/extra.css new file mode 100644 index 00000000..74db49f5 --- /dev/null +++ b/extra.css @@ -0,0 +1,3 @@ +.md-grid { + /* max-width: 100%; */ +} diff --git a/features/auto_schema/index.html b/features/auto_schema/index.html new file mode 100644 index 00000000..7fbae4b2 --- /dev/null +++ b/features/auto_schema/index.html @@ -0,0 +1,2455 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + Magic Config Schemas - Research Project Template (preview) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + +
+
+ + + + +

Auto Schema for Hydra Configs#

+
+

🔥 NOTE: This is a feature that is entirely unique to this template! 🔥

+
+

This project template comes with a really neat feature: Your Hydra config files automatically get a Schema associated with them.

+

This greatly improves the experience of developing a project with Hydra:

+
    +
  • Saves you time by preventing errors caused by unexpected keys in your config files, or values that are of the wrong type + This can often happen after moving files or renaming a function, for example.
  • +
  • While writing a config file you get to see:
      +
    • the list of available configuration options in a given config
    • +
    • the default values for each value
    • +
    • the documentation for each value (taken from the source code of the function!)
    • +
    +
  • +
+

Here's a quick demo of what this looks like in practice:

+

+

Here we have a config that will be used to configure the lightning.Trainer class, but any config file in the project will also get a schema automatically, even if it doesn't have a "_target_" key directly!

+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/features/index.html b/features/index.html new file mode 100644 index 00000000..1090255d --- /dev/null +++ b/features/index.html @@ -0,0 +1,2442 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + Features 🔥 - Research Project Template (preview) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + +
+
+ + + + +

Features unique to this project template#

+

Here are some cool features that are unique to this particular template:

+
    +
  • Support for both Jax and Torch with PyTorch-Lightning (See the Jax example)
  • +
  • Your Hydra configs will have an Auto-Generated YAML schemas 🔥
  • +
  • A comprehensive suite of automated tests for new algorithms, datasets and networks +
  • +
  • Easy development inside a [devcontainer with VsCode]
  • +
  • Tailor-made for ML researchers that run their jobs on SLURM clusters (with default configurations for the Mila and DRAC clusters.)
  • +
  • Rich typing of all parts of the source code
  • +
+

This template is aimed for ML researchers that run their jobs on SLURM clusters. +The target audience is researchers and students at Mila. This template should still be useful for others outside of Mila that use PyTorch-Lightning and Hydra.

+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/features/jax/index.html b/features/jax/index.html new file mode 100644 index 00000000..fbaa24ee --- /dev/null +++ b/features/jax/index.html @@ -0,0 +1,2511 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + Jax and Torch support with Lightning ⚡ - Research Project Template (preview) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + +
+
+ + + + +

Using Jax with PyTorch-Lightning#

+
+

🔥 NOTE: This is a feature that is entirely unique to this template! 🔥

+
+

This template includes examples that use either Jax, PyTorch, or both! +There's a table describing each example here.

+

You can mix and match both Jax and Torch code. For example, you can use Jax for your dataloading, your network, or the learning algorithm, all while still benefiting from the nice stuff that comes from using PyTorch-Lightning.

+
+How does this work? +

Well, we use torch-jax-interop, another package developed here at Mila 😎, that allows easy interop between torch and jax code. Feel free to take a look at it if you'd like to use it as part of your own project. 😁

+
+

Using PyTorch-Lightning to train a Jax network#

+

If you'd like to use Jax in your network or learning algorithm, while keeping the same style of +training loop as usual, you can!

+
    +
  • Use Jax for the forward / backward passes, the parameter updates, dataset preprocessing, etc.
  • +
  • Leave the training loop / callbacks / logging / checkpointing / etc to Lightning
  • +
+

The lightning.Trainer will not be able to tell that you're using Jax!

+

Take a look at this image classification example that uses a Jax network.

+

End-to-end training in Jax: the JaxTrainer#

+

The JaxTrainer, used in the Jax RL Example, follows a similar structure as the lightning Trainer. However, instead of training LightningModules, it trains JaxModules, which are a simplified, jax-based look-alike of lightning.LightningModules.

+

The "algorithm" needs to match the JaxModule protocol: +- JaxModule.training_step: train using a batch of data

+ + + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/features/remote_slurm_launcher/index.html b/features/remote_slurm_launcher/index.html new file mode 100644 index 00000000..94857480 --- /dev/null +++ b/features/remote_slurm_launcher/index.html @@ -0,0 +1,2562 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + Launching Jobs on Remote Clusters - Research Project Template (preview) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + +
+
+ + + + +

Remote Slurm Submitit Launcher#

+
+

🔥 NOTE: This is a feature that is entirely unique to this template! 🔥

+
+

This template includes a custom submitit launcher, that can be used to launch jobs on remote slurm clusters. +This allows you to develop code locally, and easily ship it to a different cluster. +The only prerequisite is that you must have ssh access to the remote cluster.

+

Under the hood, this uses a custom remote-slurm-executor submitit plugin.

+

This feature allows you to launch jobs on remote slurm clusters using two config groups:

+
    +
  • The resources config group is used to select the job resources:
      +
    • cpu: CPU job
    • +
    • gpu: GPU job
    • +
    +
  • +
  • The cluster config group controls where to run the job:
      +
    • current: Run on the current cluster. Use this if you're already on a SLURM cluster (e.g. when using mila code). This uses the usual submitit_slurm launcher.
    • +
    • mila: Launches the job on the Mila cluster.
    • +
    • narval: Remotely launches the job on the Narval cluster
    • +
    • cedar: Remotely launches the job on the Cedar cluster
    • +
    • beluga: Remotely launches the job on the Beluga cluster
    • +
    +
  • +
+

Examples#

+

This assumes that you've already setup SSH access to the clusters (for example using mila init).

+

Local machine -> Mila#

+
python project/main.py experiment=example resources=gpu cluster=mila
+
+

Local machine -> DRAC (narval)#

+
python project/main.py experiment=example resources=gpu cluster=narval
+
+

Mila -> DRAC (narval)#

+

This assumes that you've already setup SSH access from mila to the DRAC clusters.

+

Note that command is exactly the same as above.

+
python project/main.py experiment=example resources=gpu cluster=narval
+
+
+

Warning

+

If you want to launch jobs on a remote cluster, it is (currently) necessary to place the "resources" config before the "cluster" config on the command-line.

+
+

Launching jobs on the current SLURM cluster#

+

If you develop on a SLURM cluster, you can use the cluster=current, or simply omit the cluster config group and only use a config from the resources group.

+
(mila) $ python project/main.py experiment=example resources=gpu cluster=current
+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/features/testing/index.html b/features/testing/index.html new file mode 100644 index 00000000..3b2487ec --- /dev/null +++ b/features/testing/index.html @@ -0,0 +1,2616 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + Thorough automated testing on SLURM clusters - Research Project Template (preview) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + +
+
+ + + + +

Automated Testing#

+

Tests are a vital part of any good codebase, especially in Machine Learning. +They make it easier to explore and try out new ideas, by giving you the security that your codebase +works as intended.

+

This template comes with some easy-to-use test suites as well as some pre-configured +GitHub Actions workflows to run them:

+
    +
  • Unit tests: quick to run and check small functions / modules / classes.
  • +
  • Regression tests: check that your code is reproducible and to let + you know if something changed while you were developing your code.
  • +
  • integration tests: run your code end-to-end to make sure that all the + individually-tested components work together as expected.
  • +
  • GitHub Actions runs all these tests before you merge your code.
  • +
+ + +

Automated testing on SLURM clusters with GitHub CI#

+
+

🔥 NOTE: This is a feature that is entirely unique to this template! 🔥

+
+

This template runs all the above-mentioned tests on an actual Compute Node of the Mila cluster automatically. +Assuming that you have access to the Mila / DRAC or other Slurm clusters, all you need to do is to +setup a local self-hosted GitHub runner +for your fork of this repository, launch it on your local machine with access to a Slurm cluster, +and voila: Your code will now be tested on an ACTUAL slurm cluster whenever you push or update a PR +in your project GitHub repository.

+
+

Detailed instructions on how to set this up in your project will be added soon.

+
+

Test-suites#

+

Unit testing in this template is done with pytest.

+

To run tests, simply use pytest on the command-line. You may want to add some useful flags like +pytest -x -v. See the pytest docs for more info.

+

The built-in tests cover the following:

+
    +
  • For each datamodule config, for each data split
      +
    • test that the first batch is always the same
    • +
    +
  • +
  • For each algorithm config, for all compatible network / datamodule config combinations:
      +
    • initialization is deterministic & reproducibile;
    • +
    • forward pass is deterministic & reproducibile;
    • +
    • backward pass is deterministic & reproducibile;
    • +
    +
  • +
+

Take a look at project.algorithms.testsuites.lightning_module_tests to see the included base tests for algorithms.

+

If you use Visual Studio Code, you may want to look into adding +the "test explorer" tab to your editor. Then, you'll be able to see and debug the tests using the GUI.

+

Unit tests#

+
pytest -x -v
+
+

Regression Tests#

+

We use pytest-regressions to test that code changes don't break things.

+
    +
  • --gen-missing: Use this flag when you might be missing some of the regression files (for example on the first test run).
  • +
  • --regen-all: Use this when you want to intentionally re-create the regression files. This should hopefully not be used often!
  • +
+

First run#

+

On the first run, you might want to run test with the --gen-missing files, like so:

+
pytest --regen-all
+
+

integration-tests#

+

To run slower integration tests, use the following:

+
pytest -x -v --slow
+
+

Continuous Integration#

+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/generate_reference_docs.py b/generate_reference_docs.py new file mode 100644 index 00000000..da07a682 --- /dev/null +++ b/generate_reference_docs.py @@ -0,0 +1,67 @@ +#!/usr/bin/env python +"""Script used to generate the reference docs for the project from the source code. + +Based on +https://github.com/mkdocstrings/mkdocstrings/blob/5802b1ef5ad9bf6077974f777bd55f32ce2bc219/docs/gen_doc_stubs.py#L25 +""" + +import textwrap +from logging import getLogger as get_logger +from pathlib import Path + +logger = get_logger(__name__) + + +def main(): + """Generate the code reference pages and navigation.""" + + import mkdocs_gen_files + + nav = mkdocs_gen_files.nav.Nav() + + root = Path(__file__).parent.parent + src = root / "project" + + for path in sorted(src.rglob("*.py")): + module_path = path.relative_to(root).with_suffix("") + doc_path = path.relative_to(root).with_suffix(".md") + full_doc_path = Path("reference", doc_path) + + parts = tuple(module_path.parts) + + if parts[-1] == "__init__": + parts = parts[:-1] + doc_path = doc_path.with_name("index.md") + full_doc_path = full_doc_path.with_name("index.md") + elif parts[-1] == "__main__": + continue + + nav[parts] = doc_path.as_posix() + + with mkdocs_gen_files.open(full_doc_path, "w") as fd: + ident = ".".join(parts) + fd.write( + textwrap.dedent( + # f"""\ + # --- + # additional_python_references: + # - {ident} + # --- + # ::: {ident} + # """ + f"""\ + ::: {ident} + """ + ) + ) + # fd.write(f"::: {ident}\n") + + mkdocs_gen_files.set_edit_path(full_doc_path, path.relative_to(root)) + + with mkdocs_gen_files.open("reference/SUMMARY.md", "w") as nav_file: + nav_file.writelines(nav.build_literate_nav()) + + +if __name__ in ["__main__", ""]: + # Run when executed directly or by mkdocs. Seems like the __name__ is during `mkdocs serve` + main() diff --git a/help/index.html b/help/index.html new file mode 100644 index 00000000..ceb30e8e --- /dev/null +++ b/help/index.html @@ -0,0 +1,2490 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + Getting Help - Research Project Template (preview) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + +
+
+ + + + +

Help and Support#

+

FAQ#

+

How to get help#

+
    +
  • Make an Issue on GitHub
  • +
  • Reach out via Slack (if you're a researcher at Mila)
  • +
  • Reach out via email
  • +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/index.html b/index.html new file mode 100644 index 00000000..e4f62f8a --- /dev/null +++ b/index.html @@ -0,0 +1,2635 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Research Project Template (preview) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + +
+
+ + + + +

Research Project Template#

+

Build +codecov +hydra +license

+
+

Work-in-Progress

+

Please note: This is a Work-in-Progress. The goal is to make a first release by the end of fall 2024.

+
+

This is a research project template. It is meant to be a starting point for ML researchers at Mila.

+

For more context, see this introduction to the project..

+
+ + +
+

Starting a new project#

+

To create a new project using this template, Click Here or on the green "Use this template" button on the template's GitHub repository.

+

Setting up your environment#

+

Here are two recommended ways to setup your development environment:

+
    +
  • Using the uv package manager
  • +
  • Using a development container (recommended if you are able to install Docker on your machine)
  • +
+
+
+
+
    +
  1. +

    Clone your new repo and navigate into it

    +
    git clone https://www.github.com/your-username/your-repo-name
    +cd your-repo-name
    +
    +
  2. +
  3. +

    Install the package manager

    +
    # Install uv
    +curl -LsSf https://astral.sh/uv/install.sh | sh
    +source $HOME/.cargo/env
    +
    +
  4. +
  5. +

    Install dependencies

    +
    uv sync  # Creates a virtual environment and installs dependencies in it.
    +
    +
  6. +
+
+
+
    +
  1. Install WSL following this guide
  2. +
  3. Follow the installation instructions for Linux
  4. +
+
+
+
    +
  1. +

    Clone your new repo and navigate into it

    +
    git clone https://www.github.com/your-username/your-repo-name
    +cd your-repo-name
    +
    +
  2. +
  3. +

    (Mila cluster) - Launch the setup script

    +

    If you're on the mila cluster, you can run the setup script on a compute node, just to be nice:

    +
    srun --pty --gres=gpu:1 --cpus-per-task=4 --mem=16G --time=00:10:00 scripts/mila_setup.sh
    +
    +
  4. +
+
+
+
+

Usage#

+

To see all available options:

+
uv run python project/main.py --help
+
+

For a detailed list of examples, see the examples page.

+

Developing inside a container (advanced)#

+

This repo provides a Devcontainer configuration for Visual Studio Code to use a Docker container as a pre-configured development environment. This avoids struggles setting up a development environment and makes them reproducible and consistent.

+

If that sounds useful to you, we recommend you first make yourself familiar with the container tutorials if you want to use them. The devcontainer.json file assumes that you have a GPU locally by default. If not, you can simply comment out the "--gpus" flag in the .devcontainer/devcontainer.json file.

+
    +
  1. +

    Setup Docker on your local machine

    +

    On an Linux machine where you have root access, you can install Docker using the following commands:

    +
    curl -fsSL https://get.docker.com -o get-docker.sh
    +sudo sh get-docker.sh
    +
    +

    On Windows or Mac, follow these installation instructions

    +
  2. +
  3. +

    (optional) Install the nvidia-container-toolkit to use your local machine's GPU(s).

    +
  4. +
  5. +

    Install the Dev Containers extension for Visual Studio Code.

    +
  6. +
  7. +

    When opening repository in Visual Studio Code, you should be prompted to reopen the repository in a container:

    +

    VsCode popup image

    +

    Alternatively, you can open the command palette (Ctrl+Shift+P) and select Dev Containers: Rebuild and Reopen in Container.

    +
  8. +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/intro/index.html b/intro/index.html new file mode 100644 index 00000000..3f53764f --- /dev/null +++ b/intro/index.html @@ -0,0 +1,2536 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + Intro - Research Project Template (preview) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + +
+
+ + + + +

Why use this template?#

+

Why should you use a template in the first place?#

+

For many good reasons, which are very well described here in a similar project! 😊

+

Other good reads:

+ +

Why use this template?#

+ +

Project layout#

+
pyproject.toml   # Project metadata and dependencies
+project/
+    main.py      # main entry-point
+    algorithms/  # learning algorithms
+    datamodules/ # datasets, processing and loading
+    networks/    # Neural networks used by algorithms
+    configs/     # Hydra configuration files
+docs/            # documentation
+conftest.py      # Test fixtures and utilities
+
+

Libraries used#

+

This project makes use of the following libraries:

+
    +
  • Hydra is used to configure the project. It allows you to define configuration files and override them from the command line.
  • +
  • PyTorch Lightning is used to as the training framework. It provides a high-level interface to organize ML research code.
      +
    • 🔥 Please note: You can also use Jax with this repo, as described in the Jax example 🔥
    • +
    +
  • +
  • Weights & Biases is used to log metrics and visualize results.
  • +
  • pytest is used for testing.
  • +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/macros.py b/macros.py new file mode 100644 index 00000000..5ca0a148 --- /dev/null +++ b/macros.py @@ -0,0 +1,81 @@ +from __future__ import annotations + +import importlib +import inspect +import logging +import textwrap +import typing +from pathlib import Path +from typing import Any + +import torch + +if typing.TYPE_CHECKING: + from mkdocs_macros.plugin import MacrosPlugin + +import lightning + +try: + from mkdocs_autoref_plugin.autoref_plugin import default_reference_sources + + default_reference_sources.extend( + [ + lightning.Trainer, + lightning.LightningModule, + lightning.LightningDataModule, + torch.nn.Module, + ] + ) +except ImportError: + pass + +logger = logging.getLogger(__name__) + + +def define_env(env: MacrosPlugin): + def inline(module_or_file: str, indent: int = 0): + block_type: str | None = None + # print(f"Inlining reference: {module_or_file}") + logger.info(f"Inlining reference: {module_or_file}") + # TODO: need to support adding the indent otherwise we can't use this inside a collapsible block. + file = Path(env.project_dir) / module_or_file + if file.exists(): + if not block_type: + if file.suffix in [".yaml", ".yml"]: + block_type = "yaml" + elif file.suffix == ".py": + block_type = "python3" + elif file.suffix == ".sh": + block_type = "bash" + else: + block_type = "" + content = file.read_text() + else: + block_type = block_type or "python3" + obj: Any = get_object_from_reference(module_or_file) + logger.info(f"inlining code for {obj}") + content = inspect.getsource(obj) + # BUG: Sometimes using {{ inline('some_module.SomeClass.some_method') }} will show the + # incorrect source code: it will show the method *above* the one we're looking for. + # content = "".join(inspect.getsourcelines(obj)[0]) + + content = f"```{block_type}\n" + textwrap.indent(content + "\n```", " " * indent) + return content + + env.macro(inline, name="inline") + + +def get_object_from_reference(reference: str): + """taken from https://github.com/mkdocs/mkdocs/issues/692""" + parts = reference.split(".") + for i in range(1, len(parts)): + module_name = ".".join(parts[:i]) + obj_path = parts[i:] + try: + obj = importlib.import_module(module_name) + for part in obj_path: + obj = getattr(obj, part) + return obj + except (ModuleNotFoundError, AttributeError): + continue + raise RuntimeError(f"Unable to import the {reference=}") diff --git a/objects.inv b/objects.inv new file mode 100644 index 00000000..72d3ec57 Binary files /dev/null and b/objects.inv differ diff --git a/profiling_test.py b/profiling_test.py new file mode 100644 index 00000000..14d02549 --- /dev/null +++ b/profiling_test.py @@ -0,0 +1,128 @@ +import shutil + +import hydra.errors +import lightning +import pytest +from omegaconf import DictConfig + +from project.conftest import ( # noqa: F401 + accelerator, + algorithm_config, + algorithm_network_config, + command_line_arguments, + command_line_overrides, + datamodule_config, + experiment_dictconfig, +) +from project.experiment import ( + instantiate_algorithm, + instantiate_datamodule, + instantiate_trainer, + setup_logging, +) +from project.utils.hydra_utils import resolve_dictconfig + + +# NTOE: could also run these commands with the `resources` group and `cluster=mila` +@pytest.mark.skipif(not shutil.which("sbatch"), reason="Needs to be run on a SLURM cluster") +@pytest.mark.parametrize( + "command_line_arguments", + [ + # Instrumenting your code -baseline + """ + experiment=profiling \ + algorithm=image_classifier \ + trainer.logger.wandb.name="Baseline" \ + trainer.logger.wandb.tags=["Training","Baseline comparison","CPU/GPU comparison"] + """, + # Identifying potential bottlenecks - baseline + """ + experiment=profiling\ + algorithm=no_op\ + trainer.logger.wandb.name="Baseline without training" \ + trainer.logger.wandb.tags=["No training","Baseline comparison"] + + """, + # Identifying potential bottlenecks - num_workers multirun + pytest.param( + """ + -m experiment=profiling \ + algorithm=no_op \ + trainer.logger.wandb.tags=["1 CPU Dataloading","Worker throughput"] \ + datamodule.num_workers=1,4,8,16,32 + """, + marks=pytest.mark.xfail( + reason="LexerNoViableAltException error caused by the -m flag", + raises=hydra.errors.OverrideParseException, + strict=True, + ), + ), + # Identifying potential bottlenecks - num_workers multirun + pytest.param( + """ + -m experiment=profiling \ + algorithm=no_op \ + resources=cpu \ + trainer.logger.wandb.tags=["2 CPU Dataloading","Worker throughput"] \ + hydra.launcher.timeout_min=60 \ + hydra.launcher.cpus_per_task=2 \ + hydra.launcher.constraint="sapphire" \ + datamodule.num_workers=1,4,8,16,32 + """, + marks=pytest.mark.xfail( + reason="LexerNoViableAltException error caused by the -m flag", + raises=hydra.errors.OverrideParseException, + strict=True, + ), + ), + # Identifying potential bottlenecks - fcnet mnist + """ + experiment=profiling \ + algorithm=image_classifier \ + algorithm/network=fcnet \ + datamodule=mnist \ + trainer.logger.wandb.name="FcNet/MNIST baseline with training" \ + trainer.logger.wandb.tags=["CPU/GPU comparison","GPU","MNIST"] + """, + # Throughput across GPU types + """ + experiment=profiling \ + algorithm=image_classifier \ + resources=gpu \ + hydra.launcher.gres='gpu:a100:1' \ + hydra.launcher.cpus_per_task=4 \ + datamodule.num_workers=8 \ + trainer.logger.wandb.name="A100 training" \ + trainer.logger.wandb.tags=["GPU comparison"] + """, + # Making the most out of your GPU + pytest.param( + """ + -m experiment=profiling \ + algorithm=image_classifier \ + datamodule.num_workers=8 \ + datamodule.batch_size=32,64,128,256 \ + trainer.logger.wandb.tags=["Batch size comparison"]\ + '++trainer.logger.wandb.name=Batch size ${datamodule.batch_size}' + """, + marks=pytest.mark.xfail( + reason="LexerNoViableAltException error caused by the -m flag", + raises=hydra.errors.OverrideParseException, + strict=True, + ), + ), + ], + indirect=True, +) +def test_notebook_commands_dont_cause_errors(experiment_dictconfig: DictConfig): # noqa + # check for any errors related to OmegaConf interpolations and such + config = resolve_dictconfig(experiment_dictconfig) + # check for any errors when actually instantiating the components. + # _experiment = _setup_experiment(config) + setup_logging(log_level=config.log_level) + lightning.seed_everything(config.seed, workers=True) + _trainer = instantiate_trainer(config) + datamodule = instantiate_datamodule(config.datamodule) + _algorithm = instantiate_algorithm(config.algorithm, datamodule=datamodule) + + # Note: Here we don't actually do anything with the objects. diff --git a/reference/SUMMARY/index.html b/reference/SUMMARY/index.html new file mode 100644 index 00000000..3edbf5b3 --- /dev/null +++ b/reference/SUMMARY/index.html @@ -0,0 +1,2509 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + Reference 🤓 - Research Project Template (preview) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/reference/project/algorithms/callbacks/classification_metrics/index.html b/reference/project/algorithms/callbacks/classification_metrics/index.html new file mode 100644 index 00000000..6b4d3762 --- /dev/null +++ b/reference/project/algorithms/callbacks/classification_metrics/index.html @@ -0,0 +1,2720 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + Classification metrics - Research Project Template (preview) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + +
+
+ + + + +

Classification metrics

+ +
+ + + + +
+ + + + + + + + +
+ + + + + + + + +
+ + + +

+ ClassificationOutputs + + +#

+ + +
+

+ Bases: TypedDict

+ + +

The outputs that should be minimally returned from the training/val/test_step of +classification LightningModules so that metrics can be added aumatically by the +ClassificationMetricsCallback.

+ + + + + + + + + +
+ + + + + + + +
+ + + +

+ loss + + + + instance-attribute + + +#

+
loss: NotRequired[Tensor | float]
+
+ +
+ +

The loss at this step.

+
+ +
+ +
+ + + +

+ logits + + + + instance-attribute + + +#

+
logits: Required[Tensor]
+
+ +
+ +

The un-normalized logits.

+
+ +
+ +
+ + + +

+ y + + + + instance-attribute + + +#

+
y: Required[Tensor]
+
+ +
+ +

The class labels.

+
+ +
+ + + + + +
+ +
+ +
+ +
+ + + +

+ ClassificationMetricsCallback + + +#

+ + +
+

+ Bases: Callback

+ + +

Callback that adds classification metrics to a LightningModule.

+ + + + + + + + + +
+ + + + + + + + + + + +
+ +
+ +
+ + + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/reference/project/algorithms/callbacks/index.html b/reference/project/algorithms/callbacks/index.html new file mode 100644 index 00000000..b216bc34 --- /dev/null +++ b/reference/project/algorithms/callbacks/index.html @@ -0,0 +1,2509 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + Callbacks - Research Project Template (preview) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + +
+
+ + + + +

Callbacks

+ +
+ + + + +
+ + + + + + + + +
+ + + + + + + + +
+ + + +

+ ClassificationMetricsCallback + + +#

+ + +
+

+ Bases: Callback

+ + +

Callback that adds classification metrics to a LightningModule.

+ + + + + + + + + +
+ + + + + + + + + + + +
+ +
+ +
+ + + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/reference/project/algorithms/callbacks/samples_per_second/index.html b/reference/project/algorithms/callbacks/samples_per_second/index.html new file mode 100644 index 00000000..fde7dc52 --- /dev/null +++ b/reference/project/algorithms/callbacks/samples_per_second/index.html @@ -0,0 +1,2510 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + Samples per second - Research Project Template (preview) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + +
+
+ + + + +

Samples per second

+ +
+ + + + +
+ + + + + + + + +
+ + + + + + + + + + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/reference/project/algorithms/image_classifier/index.html b/reference/project/algorithms/image_classifier/index.html new file mode 100644 index 00000000..b3675fb8 --- /dev/null +++ b/reference/project/algorithms/image_classifier/index.html @@ -0,0 +1,2771 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + Image classifier - Research Project Template (preview) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + +
+
+ + + + +

Image classifier

+ +
+ + + + +
+ +

Example of a simple algorithm for image classification.

+

This can be run from the command-line like so:

+
python project/main.py algorithm=image_classification datamodule=cifar10
+
+ + + + + + + + +
+ + + + + + + + +
+ + + +

+ ImageClassifier + + +#

+ + +
+

+ Bases: LightningModule

+ + +

Example learning algorithm for image classification.

+ + + + + + + + + +
+ + + + + + + + + +
+ + +

+ __init__ + + +#

+
__init__(
+    datamodule: ImageClassificationDataModule,
+    network: HydraConfigFor[Module],
+    optimizer: HydraConfigFor[partial[Optimizer]],
+    init_seed: int = 42,
+)
+
+ +
+ +

Create a new instance of the algorithm.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
+ datamodule + + ImageClassificationDataModule + +
+

Object used to load train/val/test data. +See the lightning docs for LightningDataModule +for more info.

+
+
+ required +
+ network + + HydraConfigFor[Module] + +
+

The config of the network to instantiate and train.

+
+
+ required +
+ optimizer + + HydraConfigFor[partial[Optimizer]] + +
+

The config for the Optimizer. Instantiating this will return a function (a functools.partial) that will create the Optimizer given the hyper-parameters.

+
+
+ required +
+ init_seed + + int + +
+

The seed to use when initializing the weights of the network.

+
+
+ 42 +
+ +
+ +
+ +
+ + +

+ forward + + +#

+
forward(input: Tensor) -> Tensor
+
+ +
+ +

Forward pass of the network.

+ +
+ +
+ +
+ + +

+ configure_optimizers + + +#

+
configure_optimizers()
+
+ +
+ +

Creates the optimizers.

+

See lightning.pytorch.core.LightningModule.configure_optimizers for more information.

+ +
+ +
+ +
+ + +

+ configure_callbacks + + +#

+
configure_callbacks() -> Sequence[Callback] | Callback
+
+ +
+ +

Creates callbacks to be used by default during training.

+ +
+ +
+ + + +
+ +
+ +
+ + + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/reference/project/algorithms/image_classifier_test/index.html b/reference/project/algorithms/image_classifier_test/index.html new file mode 100644 index 00000000..75004091 --- /dev/null +++ b/reference/project/algorithms/image_classifier_test/index.html @@ -0,0 +1,2611 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + Image classifier test - Research Project Template (preview) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + +
+
+ + + + +

Image classifier test

+ +
+ + + + +
+ +

Example showing how the test suite can be used to add tests for a new algorithm.

+ + + + + + + + +
+ + + + + + + + +
+ + + +

+ TestImageClassifier + + +#

+ + +
+

+ Bases: LightningModuleTests[ImageClassifier]

+ + +

Tests for the ImageClassifier.

+

This runs all the tests included in the base class, with the given parametrizations:

+
    +
  • algorithm_config will take the value "image_classifier"
      +
    • This is because there is an image_classifier.yaml config file in project/configs/algorithms + whose _target_ is the ImageClassifier.
    • +
    +
  • +
  • datamodule_config will take these values: ['cifar10', 'fashion_mnist', 'imagenet', 'inaturalist', 'mnist']
      +
    • These are all the configs whose target is an ImageClassificationDataModule.
    • +
    +
  • +
  • Similarly, network_config will be parametrized by the names of all configs which produce an nn.Module, + except those that would create a PreTrainedModel from HuggingFace.
      +
    • This is currently the easiest way for us to say "any network for image classification.
    • +
    +
  • +
+

Take a look at the LightningModuleTests class if you want to see the actual test code.

+ + + + + + + + + +
+ + + + + + + + + + + +
+ +
+ +
+ + +
+ + +

+ test_example_experiment_defaults + + +#

+
test_example_experiment_defaults(
+    experiment_config: Config,
+) -> None
+
+ +
+ +

Test to check that the datamodule is required (even when just an algorithm is set?!).

+ +
+ +
+ + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/reference/project/algorithms/index.html b/reference/project/algorithms/index.html new file mode 100644 index 00000000..b24123e8 --- /dev/null +++ b/reference/project/algorithms/index.html @@ -0,0 +1,2953 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + Algorithms - Research Project Template (preview) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + +
+
+ + + + +

Algorithms

+ +
+ + + + +
+ + + + + + + + +
+ + + + + + + + +
+ + + +

+ ImageClassifier + + +#

+ + +
+

+ Bases: LightningModule

+ + +

Example learning algorithm for image classification.

+ + + + + + + + + +
+ + + + + + + + + +
+ + +

+ __init__ + + +#

+
__init__(
+    datamodule: ImageClassificationDataModule,
+    network: HydraConfigFor[Module],
+    optimizer: HydraConfigFor[partial[Optimizer]],
+    init_seed: int = 42,
+)
+
+ +
+ +

Create a new instance of the algorithm.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
+ datamodule + + ImageClassificationDataModule + +
+

Object used to load train/val/test data. +See the lightning docs for LightningDataModule +for more info.

+
+
+ required +
+ network + + HydraConfigFor[Module] + +
+

The config of the network to instantiate and train.

+
+
+ required +
+ optimizer + + HydraConfigFor[partial[Optimizer]] + +
+

The config for the Optimizer. Instantiating this will return a function (a functools.partial) that will create the Optimizer given the hyper-parameters.

+
+
+ required +
+ init_seed + + int + +
+

The seed to use when initializing the weights of the network.

+
+
+ 42 +
+ +
+ +
+ +
+ + +

+ forward + + +#

+
forward(input: Tensor) -> Tensor
+
+ +
+ +

Forward pass of the network.

+ +
+ +
+ +
+ + +

+ configure_optimizers + + +#

+
configure_optimizers()
+
+ +
+ +

Creates the optimizers.

+

See lightning.pytorch.core.LightningModule.configure_optimizers for more information.

+ +
+ +
+ +
+ + +

+ configure_callbacks + + +#

+
configure_callbacks() -> Sequence[Callback] | Callback
+
+ +
+ +

Creates callbacks to be used by default during training.

+ +
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ JaxImageClassifier + + +#

+ + +
+

+ Bases: LightningModule

+ + +

Example of a learning algorithm (LightningModule) that uses Jax.

+

In this case, the network is a flax.linen.Module, and its forward and backward passes are +written in Jax, and the loss function is in pytorch.

+ + + + + + + + + +
+ + + + + + + + + +
+ + +

+ configure_optimizers + + +#

+
configure_optimizers()
+
+ +
+ +

Creates the optimizers.

+

See lightning.pytorch.core.LightningModule.configure_optimizers for more information.

+ +
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ JaxRLExample + + +#

+ + +
+

+ Bases: PyTreeNode, JaxModule[PPOState[TEnvState], TrajectoryWithLastObs, EvalMetrics], Generic[TEnvState, TEnvParams]

+ + +

Example of an RL algorithm written in Jax: PPO, based on rejax.PPO.

+

Differences w.r.t. rejax.PPO:#

+
    +
  • The state / hparams are split into different, fully-typed structs:
      +
    • The algorithm state is in a typed PPOState struct (vs an untyped, + dynamically-generated struct in rejax).
    • +
    • The hyper-parameters are in a typed PPOHParams struct.
    • +
    • The state variables related to the collection of data from the environment is a + TrajectoryCollectionState instead of everything being bunched up together.
        +
      • This makes it easier to call the collect_episodes function with just what it needs.
      • +
      +
    • +
    +
  • +
  • The seeds for the networks and the environment data collection are separated.
  • +
+

The logic is exactly the same: The losses / updates are computed in the exact same way.

+ + + + + + + + + +
+ + + + + + + + + +
+ + +

+ training_step + + +#

+
training_step(
+    batch_idx: int,
+    ts: PPOState[TEnvState],
+    batch: TrajectoryWithLastObs,
+)
+
+ +
+ +

Training step in pure jax.

+ +
+ +
+ +
+ + +

+ train + + +#

+
train(
+    rng: Array,
+    train_state: PPOState[TEnvState] | None = None,
+    skip_initial_evaluation: bool = False,
+) -> tuple[PPOState[TEnvState], EvalMetrics]
+
+ +
+ +

Full training loop in jax.

+

This is only here to match the API of rejax.PPO.train. This doesn't get called when using +the JaxTrainer, since JaxTrainer.fit already does the same thing, but also with support +for some JaxCallbacks (as well as some lightning.Callbacks!).

+

Unfolded version of rejax.PPO.train.

+ +
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ NoOp + + +#

+ + +
+

+ Bases: LightningModule

+ + +

Algorithm that does no learning and is used to benchmark the dataloading speed.

+ + + + + + + + + +
+ + + + + + + + + + + +
+ +
+ +
+ +
+ + + +

+ TextClassifier + + +#

+ + +
+

+ Bases: LightningModule

+ + +

Example of a lightning module used to train a huggingface model for text classification.

+ + + + + + + + + +
+ + + + + + + + + +
+ + +

+ configure_optimizers + + +#

+
configure_optimizers()
+
+ +
+ +

Prepare optimizer and schedule (linear warmup and decay)

+ +
+ +
+ + + +
+ +
+ +
+ + + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/reference/project/algorithms/jax_image_classifier/index.html b/reference/project/algorithms/jax_image_classifier/index.html new file mode 100644 index 00000000..6c46990c --- /dev/null +++ b/reference/project/algorithms/jax_image_classifier/index.html @@ -0,0 +1,2652 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + Jax image classifier - Research Project Template (preview) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + +
+
+ + + + +

Jax image classifier

+ +
+ + + + +
+ + + + + + + + +
+ + + + + + + + +
+ + + +

+ JaxCNN + + +#

+ + +
+

+ Bases: Module

+ + +

A simple CNN model.

+

Taken from https://flax.readthedocs.io/en/latest/quick_start.html#define-network

+ + + + + + + + + +
+ + + + + + + + + + + +
+ +
+ +
+ +
+ + + +

+ JaxImageClassifier + + +#

+ + +
+

+ Bases: LightningModule

+ + +

Example of a learning algorithm (LightningModule) that uses Jax.

+

In this case, the network is a flax.linen.Module, and its forward and backward passes are +written in Jax, and the loss function is in pytorch.

+ + + + + + + + + +
+ + + + + + + + + +
+ + +

+ configure_optimizers + + +#

+
configure_optimizers()
+
+ +
+ +

Creates the optimizers.

+

See lightning.pytorch.core.LightningModule.configure_optimizers for more information.

+ +
+ +
+ + + +
+ +
+ +
+ + + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/reference/project/algorithms/jax_image_classifier_test/index.html b/reference/project/algorithms/jax_image_classifier_test/index.html new file mode 100644 index 00000000..a9484477 --- /dev/null +++ b/reference/project/algorithms/jax_image_classifier_test/index.html @@ -0,0 +1,2592 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + Jax image classifier test - Research Project Template (preview) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + +
+
+ + + + +

Jax image classifier test

+ +
+ + + + +
+ + + + + + + + +
+ + + + + + + + +
+ + + +

+ TestJaxImageClassifier + + +#

+ + +
+

+ Bases: LightningModuleTests[JaxImageClassifier]

+ + +

Tests for the Jax image classification algorithm.

+

This simply reuses all the tests in the base test suite, specifying that the datamodule +passed to the JaxImageClassifier should be for image classification and the network should be a +flax.linen.Module.

+ + + + + + + + + +
+ + + + + + + + + + + +
+ +
+ +
+ + +
+ + +

+ test_demo + + +#

+
test_demo(tmp_path: Path)
+
+ +
+ +

Test the demo at the bottom of the module.

+ +
+ +
+ + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/reference/project/algorithms/jax_ppo/index.html b/reference/project/algorithms/jax_ppo/index.html new file mode 100644 index 00000000..dae2aa8c --- /dev/null +++ b/reference/project/algorithms/jax_ppo/index.html @@ -0,0 +1,3008 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + Jax ppo - Research Project Template (preview) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + +
+
+ + + + +

Jax ppo

+ +
+ + + + +
+ +

Example of an RL algorithm (PPO) written entirely in Jax.

+

This is based on rejax.PPO. +See the JaxRLExample class for a description of the differences w.r.t. rejax.PPO.

+ + + + + + + + +
+ + + + + + + +
+ + + +

+ TEnvParams + + + + module-attribute + + +#

+
TEnvParams = TypeVar(
+    "TEnvParams", bound=EnvParams, default=EnvParams
+)
+
+ +
+ +

Type variable for the env params (gymnax.EnvParams).

+
+ +
+ + +
+ + + +

+ Trajectory + + +#

+ + +
+

+ Bases: PyTreeNode

+ + +

A sequence of interactions between an agent and an environment.

+ + + + + + + + + +
+ + + + + + + + + + + +
+ +
+ +
+ +
+ + + +

+ TrajectoryWithLastObs + + +#

+ + +
+

+ Bases: PyTreeNode

+ + +

Trajectory with the last observation and whether the last step is the end of an episode.

+ + + + + + + + + +
+ + + + + + + + + + + +
+ +
+ +
+ +
+ + + +

+ AdvantageMinibatch + + +#

+ + +
+

+ Bases: PyTreeNode

+ + +

Annotated trajectories with advantages and targets for the critic.

+ + + + + + + + + +
+ + + + + + + + + + + +
+ +
+ +
+ +
+ + + +

+ TrajectoryCollectionState + + +#

+ + +
+

+ Bases: Generic[TEnvState], PyTreeNode

+ + +

Struct containing the state related to the collection of data from the environment.

+ + + + + + + + + +
+ + + + + + + + + + + +
+ +
+ +
+ +
+ + + +

+ PPOState + + +#

+ + +
+

+ Bases: Generic[TEnvState], PyTreeNode

+ + +

Contains all the state of the JaxRLExample algorithm.

+ + + + + + + + + +
+ + + + + + + + + + + +
+ +
+ +
+ +
+ + + +

+ PPOHParams + + +#

+ + +
+

+ Bases: PyTreeNode

+ + +

Hyper-parameters for this PPO example.

+

These are taken from rejax.PPO algorithm class.

+ + + + + + + + + +
+ + + + + + + + + + + +
+ +
+ +
+ +
+ + + +

+ JaxRLExample + + +#

+ + +
+

+ Bases: PyTreeNode, JaxModule[PPOState[TEnvState], TrajectoryWithLastObs, EvalMetrics], Generic[TEnvState, TEnvParams]

+ + +

Example of an RL algorithm written in Jax: PPO, based on rejax.PPO.

+

Differences w.r.t. rejax.PPO:#

+
    +
  • The state / hparams are split into different, fully-typed structs:
      +
    • The algorithm state is in a typed PPOState struct (vs an untyped, + dynamically-generated struct in rejax).
    • +
    • The hyper-parameters are in a typed PPOHParams struct.
    • +
    • The state variables related to the collection of data from the environment is a + TrajectoryCollectionState instead of everything being bunched up together.
        +
      • This makes it easier to call the collect_episodes function with just what it needs.
      • +
      +
    • +
    +
  • +
  • The seeds for the networks and the environment data collection are separated.
  • +
+

The logic is exactly the same: The losses / updates are computed in the exact same way.

+ + + + + + + + + +
+ + + + + + + + + +
+ + +

+ training_step + + +#

+
training_step(
+    batch_idx: int,
+    ts: PPOState[TEnvState],
+    batch: TrajectoryWithLastObs,
+)
+
+ +
+ +

Training step in pure jax.

+ +
+ +
+ +
+ + +

+ train + + +#

+
train(
+    rng: Array,
+    train_state: PPOState[TEnvState] | None = None,
+    skip_initial_evaluation: bool = False,
+) -> tuple[PPOState[TEnvState], EvalMetrics]
+
+ +
+ +

Full training loop in jax.

+

This is only here to match the API of rejax.PPO.train. This doesn't get called when using +the JaxTrainer, since JaxTrainer.fit already does the same thing, but also with support +for some JaxCallbacks (as well as some lightning.Callbacks!).

+

Unfolded version of rejax.PPO.train.

+ +
+ +
+ + + +
+ +
+ +
+ + + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/reference/project/algorithms/jax_ppo_test/index.html b/reference/project/algorithms/jax_ppo_test/index.html new file mode 100644 index 00000000..fc5a181e --- /dev/null +++ b/reference/project/algorithms/jax_ppo_test/index.html @@ -0,0 +1,2650 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + Jax ppo test - Research Project Template (preview) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + +
+
+ + + + +

Jax ppo test

+ +
+ + + + +
+ + + + + + + + +
+ + + + + + + + +
+ + + +

+ PPOLightningModule + + +#

+ + +
+

+ Bases: LightningModule

+ + +

Uses the same code as JaxRLExample, but the training loop is run with pytorch-lightning.

+

This is currently only meant to be used to compare the difference fully-jitted training loop +and lightning.

+ + + + + + + + + +
+ + + + + + + + + + + +
+ +
+ +
+ +
+ + + +

+ RlThroughputCallback + + +#

+ + +
+

+ Bases: MeasureSamplesPerSecondCallback

+ + +

A callback to measure the throughput of RL algorithms.

+ + + + + + + + + +
+ + + + + + + + + + + +
+ +
+ +
+ + +
+ + +

+ test_rejax + + +#

+
test_rejax(
+    rng: PRNGKey,
+    results_rejax: tuple[PPO, Any, EvalMetrics],
+    tensor_regression: TensorRegressionFixture,
+    original_datadir: Path,
+    seed: int | Sequence[int],
+)
+
+ +
+ +

Train rejax.PPO with the same parameters.

+ +
+ +
+ + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/reference/project/algorithms/llm_finetuning/index.html b/reference/project/algorithms/llm_finetuning/index.html new file mode 100644 index 00000000..8c28eab7 --- /dev/null +++ b/reference/project/algorithms/llm_finetuning/index.html @@ -0,0 +1,2895 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + Llm finetuning - Research Project Template (preview) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + +
+
+ + + + +

Llm finetuning

+ +
+ + + + +
+ +

Example: fine-tuning a language model (GPT, GPT-2, CTRL, OPT, etc.) on a text dataset.

+

Large chunks of the code here are taken from this example script in the transformers GitHub repository.

+

If you haven't already, you should definitely check out this walkthrough of that script from the HuggingFace docs.

+ + + + + + + + +
+ + + + + + + + +
+ + + +

+ NetworkConfig + + +#

+ + +
+ + +

Configuration options related to the choice of network.

+

When instantiated by Hydra, this calls the target function passed to the decorator. In this +case, this creates pulls the pretrained network weights from the HuggingFace model hub.

+ + + + + + + + + +
+ + + + + + + + + + + +
+ +
+ +
+ +
+ + + +

+ TokenizerConfig + + +#

+ + +
+ + +

Configuration options for the tokenizer.

+ + + + + + + + + +
+ + + + + + + + + + + +
+ +
+ +
+ +
+ + + +

+ DatasetConfig + + + + dataclass + + +#

+ + +
+ + +

Configuration options related to the dataset preparation.

+ + + + + + + + + +
+ + + + + + + +
+ + + +

+ dataset_path + + + + instance-attribute + + +#

+
dataset_path: str
+
+ +
+ +

Name of the dataset "family"?

+

For example, to load "wikitext/wikitext-103-v1", this would be "wikitext".

+
+ +
+ +
+ + + +

+ dataset_name + + + + class-attribute + instance-attribute + + +#

+
dataset_name: str | None = None
+
+ +
+ +

Name of the specific dataset?

+

For example, to load "wikitext/wikitext-103-v1", this would be "wikitext-103-v1".

+
+ +
+ +
+ + + +

+ validation_split_percentage + + + + class-attribute + instance-attribute + + +#

+
validation_split_percentage: int = 10
+
+ +
+ +

Fraction of the train dataset to use for validation if there isn't already a validation +split.

+
+ +
+ + + + + +
+ +
+ +
+ +
+ + + +

+ LLMFinetuningExample + + +#

+ + +
+

+ Bases: LightningModule

+ + +

Example of a lightning module used to fine-tune a huggingface model.

+ + + + + + + + + +
+ + + + + + + + + +
+ + +

+ setup + + +#

+
setup(stage: str)
+
+ +
+ +

Hook from Lightning that is called at the start of training, validation and testing.

+

TODO: Later perhaps we could do the preprocessing in a distributed manner like this: +https://discuss.huggingface.co/t/how-to-save-datasets-as-distributed-with-save-to-disk/25674/2

+ +
+ +
+ +
+ + +

+ configure_optimizers + + +#

+
configure_optimizers()
+
+ +
+ +

Prepare optimizer and schedule (linear warmup and decay)

+ +
+ +
+ + + +
+ +
+ +
+ + + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/reference/project/algorithms/llm_finetuning_test/index.html b/reference/project/algorithms/llm_finetuning_test/index.html new file mode 100644 index 00000000..c1df9e56 --- /dev/null +++ b/reference/project/algorithms/llm_finetuning_test/index.html @@ -0,0 +1,2563 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + Llm finetuning test - Research Project Template (preview) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + +
+
+ + + + +

Llm finetuning test

+ +
+ + + + +
+ +

Unit tests for the llm finetuning example.

+ + + + + + + + +
+ + + + + + + + +
+ + + +

+ TestLLMFinetuningExample + + +#

+ + +
+

+ Bases: LightningModuleTests[LLMFinetuningExample]

+ + +

Tests for the LLM fine-tuning example.

+ + + + + + + + + +
+ + + + + + + + + + + +
+ +
+ +
+ + + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/reference/project/algorithms/no_op/index.html b/reference/project/algorithms/no_op/index.html new file mode 100644 index 00000000..a78dd1ad --- /dev/null +++ b/reference/project/algorithms/no_op/index.html @@ -0,0 +1,2561 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + No op - Research Project Template (preview) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + +
+
+ + + + +

No op

+ +
+ + + + +
+ + + + + + + + +
+ + + + + + + + +
+ + + +

+ NoOp + + +#

+ + +
+

+ Bases: LightningModule

+ + +

Algorithm that does no learning and is used to benchmark the dataloading speed.

+ + + + + + + + + +
+ + + + + + + + + + + +
+ +
+ +
+ + + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/reference/project/algorithms/testsuites/index.html b/reference/project/algorithms/testsuites/index.html new file mode 100644 index 00000000..eb2bcf8c --- /dev/null +++ b/reference/project/algorithms/testsuites/index.html @@ -0,0 +1,2764 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + Testsuites - Research Project Template (preview) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + +
+
+ + + + +

Testsuites

+ +
+ + + + +
+ + + + + + + + +
+ + + + + + + + +
+ + + +

+ LightningModuleTests + + +#

+ + +
+

+ Bases: Generic[AlgorithmType], ABC

+ + +

Suite of generic tests for a LightningModule.

+

Simply inherit from this class and decorate the class with the appropriate markers to get a set +of decent unit tests that should apply to any LightningModule.

+

See the project.algorithms.image_classifier_test module for an example.

+

Other ideas: +- pytest-benchmark for regression tests on forward / backward pass / training step speed +- pytest-profiling for profiling the training step? (pytorch variant?) +- Dataset splits: check some basic stats about the train/val/test inputs, are they somewhat similar? +- Define the input as a space, check that the dataset samples are in that space and not too + many samples are statistically OOD?

+ + + + + + + + + +
+ + + + + + + + + +
+ + +

+ experiment_config + + +#

+
experiment_config(
+    experiment_dictconfig: DictConfig,
+) -> Config
+
+ +
+ +

The experiment configuration, with all interpolations resolved.

+ +
+ +
+ +
+ + +

+ algorithm + + +#

+
algorithm(
+    experiment_config: Config,
+    datamodule: LightningDataModule | None,
+    trainer: Trainer | JaxTrainer,
+    device: device,
+)
+
+ +
+ +

Fixture that creates the "algorithm" (a +LightningModule).

+ +
+ +
+ +
+ + +

+ make_torch_deterministic + + +#

+
make_torch_deterministic()
+
+ +
+ +

Set torch to deterministic mode for unit tests that use the tensor_regression +fixture.

+ +
+ +
+ +
+ + +

+ seed + + +#

+
seed(request: FixtureRequest)
+
+ +
+ +

Fixture that seeds everything for reproducibility and yields the random seed used.

+ +
+ +
+ +
+ + +

+ training_step_content + + +#

+
training_step_content(
+    datamodule: LightningDataModule | None,
+    algorithm: AlgorithmType,
+    seed: int,
+    accelerator: str,
+    devices: int | list[int],
+    tmp_path_factory: TempPathFactory,
+)
+
+ +
+ +

Check that the backward pass is reproducible given the same weights, inputs and random +seed.

+ +
+ +
+ +
+ + +

+ test_initialization_is_reproducible + + +#

+
test_initialization_is_reproducible(
+    training_step_content: tuple[
+        AlgorithmType,
+        GetStuffFromFirstTrainingStep,
+        list[Any],
+        list[Any],
+    ],
+    tensor_regression: TensorRegressionFixture,
+    accelerator: str,
+)
+
+ +
+ +

Check that the network initialization is reproducible given the same random seed.

+ +
+ +
+ +
+ + +

+ test_forward_pass_is_reproducible + + +#

+
test_forward_pass_is_reproducible(
+    training_step_content: tuple[
+        AlgorithmType,
+        GetStuffFromFirstTrainingStep,
+        list[Any],
+        list[Any],
+    ],
+    tensor_regression: TensorRegressionFixture,
+)
+
+ +
+ +

Check that the forward pass is reproducible given the same input and random seed.

+ +
+ +
+ +
+ + +

+ test_backward_pass_is_reproducible + + +#

+
test_backward_pass_is_reproducible(
+    training_step_content: tuple[
+        AlgorithmType,
+        GetStuffFromFirstTrainingStep,
+        list[Any],
+        list[Any],
+    ],
+    tensor_regression: TensorRegressionFixture,
+    accelerator: str,
+)
+
+ +
+ +

Check that the backward pass is reproducible given the same weights, inputs and random +seed.

+ +
+ +
+ +
+ + +

+ forward_pass_input + + +#

+
forward_pass_input(
+    training_batch: PyTree[Tensor], device: device
+)
+
+ +
+ +

Extracts the model input from a batch of data coming from the dataloader.

+

Overwrite this if your batches are not tuples of tensors (i.e. if your algorithm isn't a +simple supervised learning algorithm like the example).

+ +
+ +
+ +
+ + +

+ do_one_step_of_training + + +#

+
do_one_step_of_training(
+    algorithm: AlgorithmType,
+    datamodule: LightningDataModule | None,
+    accelerator: str,
+    devices: int | list[int] | Literal["auto"],
+    callbacks: list[Callback],
+    tmp_path: Path,
+)
+
+ +
+ +

Performs one step of training.

+

Overwrite this if you train your algorithm differently.

+ +
+ +
+ + + +
+ +
+ +
+ + + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/reference/project/algorithms/testsuites/lightning_module_tests/index.html b/reference/project/algorithms/testsuites/lightning_module_tests/index.html new file mode 100644 index 00000000..358048cf --- /dev/null +++ b/reference/project/algorithms/testsuites/lightning_module_tests/index.html @@ -0,0 +1,3010 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + Lightning module tests - Research Project Template (preview) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + +
+
+ + + + +

Lightning module tests

+ +
+ + + + +
+ +

Suite of tests for an a LightningModule.

+

See the project.algorithms.image_classifier_test module for an example of how to use this.

+ + + + + + + + +
+ + + + + + + + +
+ + + +

+ LightningModuleTests + + +#

+ + +
+

+ Bases: Generic[AlgorithmType], ABC

+ + +

Suite of generic tests for a LightningModule.

+

Simply inherit from this class and decorate the class with the appropriate markers to get a set +of decent unit tests that should apply to any LightningModule.

+

See the project.algorithms.image_classifier_test module for an example.

+

Other ideas: +- pytest-benchmark for regression tests on forward / backward pass / training step speed +- pytest-profiling for profiling the training step? (pytorch variant?) +- Dataset splits: check some basic stats about the train/val/test inputs, are they somewhat similar? +- Define the input as a space, check that the dataset samples are in that space and not too + many samples are statistically OOD?

+ + + + + + + + + +
+ + + + + + + + + +
+ + +

+ experiment_config + + +#

+
experiment_config(
+    experiment_dictconfig: DictConfig,
+) -> Config
+
+ +
+ +

The experiment configuration, with all interpolations resolved.

+ +
+ +
+ +
+ + +

+ algorithm + + +#

+
algorithm(
+    experiment_config: Config,
+    datamodule: LightningDataModule | None,
+    trainer: Trainer | JaxTrainer,
+    device: device,
+)
+
+ +
+ +

Fixture that creates the "algorithm" (a +LightningModule).

+ +
+ +
+ +
+ + +

+ make_torch_deterministic + + +#

+
make_torch_deterministic()
+
+ +
+ +

Set torch to deterministic mode for unit tests that use the tensor_regression +fixture.

+ +
+ +
+ +
+ + +

+ seed + + +#

+
seed(request: FixtureRequest)
+
+ +
+ +

Fixture that seeds everything for reproducibility and yields the random seed used.

+ +
+ +
+ +
+ + +

+ training_step_content + + +#

+
training_step_content(
+    datamodule: LightningDataModule | None,
+    algorithm: AlgorithmType,
+    seed: int,
+    accelerator: str,
+    devices: int | list[int],
+    tmp_path_factory: TempPathFactory,
+)
+
+ +
+ +

Check that the backward pass is reproducible given the same weights, inputs and random +seed.

+ +
+ +
+ +
+ + +

+ test_initialization_is_reproducible + + +#

+
test_initialization_is_reproducible(
+    training_step_content: tuple[
+        AlgorithmType,
+        GetStuffFromFirstTrainingStep,
+        list[Any],
+        list[Any],
+    ],
+    tensor_regression: TensorRegressionFixture,
+    accelerator: str,
+)
+
+ +
+ +

Check that the network initialization is reproducible given the same random seed.

+ +
+ +
+ +
+ + +

+ test_forward_pass_is_reproducible + + +#

+
test_forward_pass_is_reproducible(
+    training_step_content: tuple[
+        AlgorithmType,
+        GetStuffFromFirstTrainingStep,
+        list[Any],
+        list[Any],
+    ],
+    tensor_regression: TensorRegressionFixture,
+)
+
+ +
+ +

Check that the forward pass is reproducible given the same input and random seed.

+ +
+ +
+ +
+ + +

+ test_backward_pass_is_reproducible + + +#

+
test_backward_pass_is_reproducible(
+    training_step_content: tuple[
+        AlgorithmType,
+        GetStuffFromFirstTrainingStep,
+        list[Any],
+        list[Any],
+    ],
+    tensor_regression: TensorRegressionFixture,
+    accelerator: str,
+)
+
+ +
+ +

Check that the backward pass is reproducible given the same weights, inputs and random +seed.

+ +
+ +
+ +
+ + +

+ forward_pass_input + + +#

+
forward_pass_input(
+    training_batch: PyTree[Tensor], device: device
+)
+
+ +
+ +

Extracts the model input from a batch of data coming from the dataloader.

+

Overwrite this if your batches are not tuples of tensors (i.e. if your algorithm isn't a +simple supervised learning algorithm like the example).

+ +
+ +
+ +
+ + +

+ do_one_step_of_training + + +#

+
do_one_step_of_training(
+    algorithm: AlgorithmType,
+    datamodule: LightningDataModule | None,
+    accelerator: str,
+    devices: int | list[int] | Literal["auto"],
+    callbacks: list[Callback],
+    tmp_path: Path,
+)
+
+ +
+ +

Performs one step of training.

+

Overwrite this if you train your algorithm differently.

+ +
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ GetStuffFromFirstTrainingStep + + +#

+ + +
+

+ Bases: Callback

+ + +

Callback used in tests to get things from the first call to training_step.

+ + + + + + + + + +
+ + + + + + + + + + + +
+ +
+ +
+ + +
+ + +

+ convert_list_and_tuples_to_dicts + + +#

+
convert_list_and_tuples_to_dicts(value: Any) -> Any
+
+ +
+ +

Converts all lists and tuples in a nested structure to dictionaries.

+
+
+
+

convert_list_and_tuples_to_dicts([1, 2, 3]) +{'0': 1, '1': 2, '2': 3} +convert_list_and_tuples_to_dicts((1, 2, 3)) +{'0': 1, '1': 2, '2': 3} +convert_list_and_tuples_to_dicts({"a": [1, 2, 3], "b": (4, 5, 6)}) +{'a': {'0': 1, '1': 2, '2': 3}, 'b': {'0': 4, '1': 5, '2': 6}}

+
+
+
+ +
+ +
+ + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/reference/project/algorithms/text_classifier/index.html b/reference/project/algorithms/text_classifier/index.html new file mode 100644 index 00000000..03d3330a --- /dev/null +++ b/reference/project/algorithms/text_classifier/index.html @@ -0,0 +1,2595 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + Text classifier - Research Project Template (preview) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + +
+
+ + + + +

Text classifier

+ +
+ + + + +
+ + + + + + + + +
+ + + + + + + + +
+ + + +

+ TextClassifier + + +#

+ + +
+

+ Bases: LightningModule

+ + +

Example of a lightning module used to train a huggingface model for text classification.

+ + + + + + + + + +
+ + + + + + + + + +
+ + +

+ configure_optimizers + + +#

+
configure_optimizers()
+
+ +
+ +

Prepare optimizer and schedule (linear warmup and decay)

+ +
+ +
+ + + +
+ +
+ +
+ + + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/reference/project/algorithms/text_classifier_test/index.html b/reference/project/algorithms/text_classifier_test/index.html new file mode 100644 index 00000000..efdf9acc --- /dev/null +++ b/reference/project/algorithms/text_classifier_test/index.html @@ -0,0 +1,2600 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + Text classifier test - Research Project Template (preview) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + +
+
+ + + + +

Text classifier test

+ +
+ + + + +
+ + + + + + + + +
+ + + + + + + + +
+ + + +

+ TestTextClassifier + + +#

+ + +
+

+ Bases: LightningModuleTests[TextClassifier]

+ + +

Tests for the HF example.

+ + + + + + + + + +
+ + + + + + + + + +
+ + +

+ test_overfit_batch + + +#

+
test_overfit_batch(
+    algorithm: TextClassifier,
+    datamodule: TextClassificationDataModule,
+    tmp_path: Path,
+    num_steps: int = 3,
+)
+
+ +
+ +

Test that the loss decreases on a single batch.

+ +
+ +
+ + + +
+ +
+ +
+ + + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/reference/project/configs/algorithm/index.html b/reference/project/configs/algorithm/index.html new file mode 100644 index 00000000..37532c1b --- /dev/null +++ b/reference/project/configs/algorithm/index.html @@ -0,0 +1,2467 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + Algorithm - Research Project Template (preview) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + +
+
+ + + + +

Algorithm

+ +
+ + + + +
+ +

Configs for algorithms.

+ + + + + + + + +
+ + + + + + + + + + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/reference/project/configs/algorithm/lr_scheduler/index.html b/reference/project/configs/algorithm/lr_scheduler/index.html new file mode 100644 index 00000000..1216b631 --- /dev/null +++ b/reference/project/configs/algorithm/lr_scheduler/index.html @@ -0,0 +1,2498 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + Lr scheduler - Research Project Template (preview) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + +
+
+ + + + +

Lr scheduler

+ +
+ + + + +
+ +

Configs for learning rate schedulers.

+

You can add configurations either with a config file or in code using +hydra-zen.builds.

+ + + + + + +
+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/reference/project/configs/algorithm/network/index.html b/reference/project/configs/algorithm/network/index.html new file mode 100644 index 00000000..740df904 --- /dev/null +++ b/reference/project/configs/algorithm/network/index.html @@ -0,0 +1,2510 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + Network - Research Project Template (preview) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + +
+
+ + + + +

Network

+ +
+ + + + +
+ + + + + + + + +
+ + + + + + + + + + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/reference/project/configs/algorithm/optimizer/index.html b/reference/project/configs/algorithm/optimizer/index.html new file mode 100644 index 00000000..4c205b8a --- /dev/null +++ b/reference/project/configs/algorithm/optimizer/index.html @@ -0,0 +1,2535 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + Optimizer - Research Project Template (preview) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + +
+
+ + + + +

Optimizer

+ +
+ + + + +
+ +

Configurations for optimizers.

+

You can add configurations either with a config file or by registering structured configs in code.

+

Here is an example of how you could register a new configuration in code using +hydra-zen.builds:

+
import hydra_zen
+from torch.optim import Adam  # type: ignore
+
+optimizers_store = hydra_zen.store(group="algorithm/optimizer")
+
+AdamConfig = optimizers_store(
+    hydra_zen.builds(
+        Adam,
+        zen_partial=True,
+        populate_full_signature=True,
+        zen_exclude=["params"],
+        zen_dataclass={"cls_name": "AdamConfig", "frozen": False},
+    ),
+    name="base_adam",
+)
+
+

From the command-line, you can select both configs that are yaml files as well as structured config +(dataclasses).

+

This works the same way as creating config files for each optimizer under configs/algorithm/optimizer. +Config files can also use structured configs in their defaults list.

+ + + + + + + + +
+ + + + + + + + + + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/reference/project/configs/config/index.html b/reference/project/configs/config/index.html new file mode 100644 index 00000000..519e7ba9 --- /dev/null +++ b/reference/project/configs/config/index.html @@ -0,0 +1,2777 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + Config - Research Project Template (preview) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + +
+
+ + + + +

Config

+ +
+ + + + +
+ + + + + + + + +
+ + + + + + + + +
+ + + +

+ Config + + + + dataclass + + +#

+ + +
+ + +

The options required for a run. This dataclass acts as a structure for the Hydra configs.

+

For more info, see https://hydra.cc/docs/tutorials/structured_config/schema/

+ + + + + + + + + +
+ + + + + + + +
+ + + +

+ algorithm + + + + instance-attribute + + +#

+
algorithm: Any
+
+ +
+ +

Configuration for the algorithm (a +LightningModule).

+

It is suggested for this class to accept a datamodule and network as arguments. The +instantiated datamodule and network will be passed to the algorithm's constructor.

+

For more info, see the instantiate_algorithm function.

+
+ +
+ +
+ + + +

+ datamodule + + + + class-attribute + instance-attribute + + +#

+
datamodule: Optional[Any] = None
+
+ +
+ +

Configuration for the datamodule (dataset + transforms + dataloader creation).

+

This should normally create a LightningDataModule. +See the MNISTDataModule for an example.

+
+ +
+ +
+ + + +

+ trainer + + + + class-attribute + instance-attribute + + +#

+
trainer: dict = field(default_factory=dict)
+
+ +
+ +

Keyword arguments for the Trainer constructor.

+
+ +
+ +
+ + + +

+ log_level + + + + class-attribute + instance-attribute + + +#

+
log_level: str = 'info'
+
+ +
+ +

Logging level.

+
+ +
+ +
+ + + +

+ seed + + + + class-attribute + instance-attribute + + +#

+
seed: int = field(
+    default_factory=lambda: randint(0, int(100000.0))
+)
+
+ +
+ +

Random seed for reproducibility.

+

If None, a random seed is generated.

+
+ +
+ +
+ + + +

+ ckpt_path + + + + class-attribute + instance-attribute + + +#

+
ckpt_path: str | None = None
+
+ +
+ +

Path to a checkpoint to load the training state and resume the training run.

+

This is the same as the ckpt_path argument in the lightning.Trainer.fit method.

+
+ +
+ + + + + +
+ +
+ +
+ + + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/reference/project/configs/config_test/index.html b/reference/project/configs/config_test/index.html new file mode 100644 index 00000000..64c22f32 --- /dev/null +++ b/reference/project/configs/config_test/index.html @@ -0,0 +1,2540 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + Config test - Research Project Template (preview) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + +
+
+ + + + +

Config test

+ +
+ + + + +
+ +

TODO: Add tests for the configurations?

+ + + + + + + + +
+ + + + + + + + + +
+ + +

+ test_can_use_algo_that_doesnt_use_a_datamodule + + +#

+
test_can_use_algo_that_doesnt_use_a_datamodule(
+    register_dummy_configs: None, algorithm: LightningModule
+)
+
+ +
+ +

Test that we can use an algorithm without a datamodule.

+ +
+ +
+ + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/reference/project/configs/datamodule/index.html b/reference/project/configs/datamodule/index.html new file mode 100644 index 00000000..3e599edd --- /dev/null +++ b/reference/project/configs/datamodule/index.html @@ -0,0 +1,2508 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + Datamodule - Research Project Template (preview) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + +
+
+ + + + +

Datamodule

+ +
+ + + + +
+ + + + + + + + +
+ + + + + + + + + + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/reference/project/configs/index.html b/reference/project/configs/index.html new file mode 100644 index 00000000..72bd3807 --- /dev/null +++ b/reference/project/configs/index.html @@ -0,0 +1,2684 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + Configs - Research Project Template (preview) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + +
+
+ + + + +

Configs

+ +
+ + + + +
+ +

All the configuration classes for the project.

+ + + + + + + + +
+ + + + + + + + +
+ + + +

+ Config + + + + dataclass + + +#

+ + +
+ + +

The options required for a run. This dataclass acts as a structure for the Hydra configs.

+

For more info, see https://hydra.cc/docs/tutorials/structured_config/schema/

+ + + + + + + + + +
+ + + + + + + +
+ + + +

+ algorithm + + + + instance-attribute + + +#

+
algorithm: Any
+
+ +
+ +

Configuration for the algorithm (a +LightningModule).

+

It is suggested for this class to accept a datamodule and network as arguments. The +instantiated datamodule and network will be passed to the algorithm's constructor.

+

For more info, see the instantiate_algorithm function.

+
+ +
+ +
+ + + +

+ datamodule + + + + class-attribute + instance-attribute + + +#

+
datamodule: Optional[Any] = None
+
+ +
+ +

Configuration for the datamodule (dataset + transforms + dataloader creation).

+

This should normally create a LightningDataModule. +See the MNISTDataModule for an example.

+
+ +
+ +
+ + + +

+ trainer + + + + class-attribute + instance-attribute + + +#

+
trainer: dict = field(default_factory=dict)
+
+ +
+ +

Keyword arguments for the Trainer constructor.

+
+ +
+ +
+ + + +

+ log_level + + + + class-attribute + instance-attribute + + +#

+
log_level: str = 'info'
+
+ +
+ +

Logging level.

+
+ +
+ +
+ + + +

+ seed + + + + class-attribute + instance-attribute + + +#

+
seed: int = field(
+    default_factory=lambda: randint(0, int(100000.0))
+)
+
+ +
+ +

Random seed for reproducibility.

+

If None, a random seed is generated.

+
+ +
+ +
+ + + +

+ ckpt_path + + + + class-attribute + instance-attribute + + +#

+
ckpt_path: str | None = None
+
+ +
+ +

Path to a checkpoint to load the training state and resume the training run.

+

This is the same as the ckpt_path argument in the lightning.Trainer.fit method.

+
+ +
+ + + + + +
+ +
+ +
+ + +
+ + +

+ add_configs_to_hydra_store + + +#

+
add_configs_to_hydra_store()
+
+ +
+ +

Adds all configs to the Hydra Config store.

+ +
+ +
+ + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/reference/project/conftest/index.html b/reference/project/conftest/index.html new file mode 100644 index 00000000..012370a2 --- /dev/null +++ b/reference/project/conftest/index.html @@ -0,0 +1,3093 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + Conftest - Research Project Template (preview) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + +
+
+ + + + +

Conftest

+ +
+ + + + +
+ +

Fixtures and test utilities.

+

This module contains PyTest fixtures that are used +by tests.

+

How this works#

+

Our goal here is to make sure that the way we create networks/datasets/algorithms during tests match +as closely as possible how they are created normally in a real run. +For example, when running python project/main.py algorithm=image_classifier.

+

We achieve this like so: All the components of an experiment are created using fixtures. +The first fixtures to be invoked are the ones that would correspond to command-line arguments. +The fixtures for command-line arguments

+

For example, one of the fixtures which is created first is datamodule_config.

+

The first fixtures to be created are the datamodule_config, network_config and algorithm_config, along with overrides. +From these, the experiment_dictconfig is created

+
---
+title: Fixture dependency graph
+---
+flowchart TD
+datamodule_config[
+    <a href="#project.conftest.datamodule_config">datamodule_config</a>
+] -- 'datamodule=A' --> command_line_arguments
+algorithm_config[
+    <a href="#project.conftest.algorithm_config">algorithm_config</a>
+] -- 'algorithm=B' --> command_line_arguments
+command_line_overrides[
+    <a href="#project.conftest.command_line_overrides">command_line_overrides</a>
+] -- 'seed=123' --> command_line_arguments
+command_line_arguments[
+    <a href="#project.conftest.command_line_arguments">command_line_arguments</a>
+] -- load configs for 'datamodule=A algorithm=B seed=123' --> experiment_dictconfig
+experiment_dictconfig[
+    <a href="#project.conftest.experiment_dictconfig">experiment_dictconfig</a>
+] -- instantiate objects from configs --> experiment_config
+experiment_config[
+    <a href="#project.conftest.experiment_config">experiment_config</a>
+] --> datamodule & algorithm
+datamodule[
+    <a href="#project.conftest.datamodule">datamodule</a>
+] --> algorithm
+algorithm[
+    <a href="#project.conftest.algorithm">algorithm</a>
+] -- is used by --> some_test
+algorithm & datamodule -- is used by --> some_other_test
+ + + + + + + + +
+ + + + + + + + + +
+ + +

+ original_datadir + + +#

+
original_datadir(original_datadir: Path)
+
+ +
+ +

Overwrite the original_datadir fixture value to change where regression files are created.

+

By default, they are in a folder next to the source. Here instead we move them to $SCRATCH if +available, or to a .regression_files folder at the root of the repo otherwise.

+ +
+ +
+ +
+ + +

+ algorithm_config + + +#

+
algorithm_config(request: FixtureRequest) -> str | None
+
+ +
+ +

The algorithm config to use in the experiment, as if algorithm=<value> was passed.

+

This is parametrized with all the configurations for a given algorithm type when using the +included tests, for example as is done in project.algorithms.image_classifier_test.

+ +
+ +
+ +
+ + +

+ datamodule_config + + +#

+
datamodule_config(request: FixtureRequest) -> str | None
+
+ +
+ +

The datamodule config to use in the experiment, as if datamodule=<value> was passed.

+ +
+ +
+ +
+ + +

+ algorithm_network_config + + +#

+
algorithm_network_config(
+    request: FixtureRequest,
+) -> str | None
+
+ +
+ +

The network config to use in the experiment, as in algorithm/network=<value>.

+ +
+ +
+ +
+ + +

+ command_line_arguments + + +#

+
command_line_arguments(
+    algorithm_config: str | None,
+    datamodule_config: str | None,
+    algorithm_network_config: str | None,
+    command_line_overrides: tuple[str, ...],
+    request: FixtureRequest,
+)
+
+ +
+ +

Fixture that returns the command-line arguments that will be passed to Hydra to run the +experiment.

+

The algorithm_config, network_config and datamodule_config values here are parametrized +indirectly by most tests using the project.utils.testutils.run_for_all_configs_of_type +function so that the respective components are created in the same way as they +would be by Hydra in a regular run.

+ +
+ +
+ +
+ + +

+ experiment_dictconfig + + +#

+
experiment_dictconfig(
+    command_line_arguments: tuple[str, ...],
+    tmp_path_factory: TempPathFactory,
+) -> DictConfig
+
+ +
+ +

The omegaconf.DictConfig that is created by Hydra from the command-line arguments.

+

Any interpolations in the configs will not have been resolved at this point.

+ +
+ +
+ +
+ + +

+ experiment_config + + +#

+
experiment_config(
+    experiment_dictconfig: DictConfig,
+) -> Config
+
+ +
+ +

The experiment configuration, with all interpolations resolved.

+ +
+ +
+ +
+ + +

+ datamodule + + +#

+
datamodule(
+    experiment_dictconfig: DictConfig,
+) -> LightningDataModule | None
+
+ +
+ +

Fixture that creates the datamodule for the given config.

+ +
+ +
+ +
+ + +

+ algorithm + + +#

+
algorithm(
+    experiment_config: Config,
+    datamodule: LightningDataModule | None,
+    trainer: Trainer | JaxTrainer,
+    seed: int,
+    device: device,
+)
+
+ +
+ +

Fixture that creates the "algorithm" (a +LightningModule).

+ +
+ +
+ +
+ + +

+ seed + + +#

+
seed(
+    request: FixtureRequest, make_torch_deterministic: None
+)
+
+ +
+ +

Fixture that seeds everything for reproducibility and yields the random seed used.

+ +
+ +
+ +
+ + +

+ accelerator + + +#

+
accelerator(request: FixtureRequest)
+
+ +
+ +

Returns the accelerator to use during unit tests.

+

By default, if cuda is available, returns "cuda". If the tests are run with -vvv, then also +runs CPU.

+ +
+ +
+ +
+ + +

+ devices + + +#

+
devices(
+    accelerator: str, request: FixtureRequest
+) -> Generator[
+    list[int] | int | Literal["auto"], None, None
+]
+
+ +
+ +

Fixture that creates the 'devices' argument for the Trainer config.

+

Splits up the GPUs between pytest-xdist workers when using distributed testing. +This isn't currently used in the CI.

+

TODO: Design dilemna here: Should we be parametrizing the devices command-line override and +force experiments to run with this value during tests? Or should we be changing things based on +this value in the config?

+ +
+ +
+ +
+ + +

+ command_line_overrides + + +#

+
command_line_overrides(
+    request: FixtureRequest,
+) -> tuple[str, ...]
+
+ +
+ +

Fixture that makes it possible to specify command-line overrides to use in a given test.

+

Tests that require running an experiment should use the experiment_config fixture below.

+

Multiple test using the same overrides will use the same experiment.

+ +
+ +
+ +
+ + +

+ make_torch_deterministic + + +#

+
make_torch_deterministic()
+
+ +
+ +

Set torch to deterministic mode for unit tests that use the tensor_regression fixture.

+ +
+ +
+ +
+ + +

+ pytest_runtest_makereport + + +#

+
pytest_runtest_makereport(item: Function, call: CallInfo)
+
+ +
+ +

Used to setup the pytest.mark.incremental mark, as described in the pytest docs.

+

See this page

+ +
+ +
+ +
+ + +

+ pytest_runtest_setup + + +#

+
pytest_runtest_setup(item: Function)
+
+ +
+ +

Used to setup the pytest.mark.incremental mark, as described in this page.

+ +
+ +
+ +
+ + +

+ pytest_generate_tests + + +#

+
pytest_generate_tests(metafunc: Metafunc) -> None
+
+ +
+ +

Allows one to define custom parametrization schemes or extensions.

+

This is used to implement the parametrize_when_used mark, which allows one to parametrize an argument when it is used.

+

See +https://docs.pytest.org/en/7.1.x/how-to/parametrize.html#how-to-parametrize-fixtures-and-test-functions

+ +
+ +
+ + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/reference/project/datamodules/datamodules_test/index.html b/reference/project/datamodules/datamodules_test/index.html new file mode 100644 index 00000000..4cc43391 --- /dev/null +++ b/reference/project/datamodules/datamodules_test/index.html @@ -0,0 +1,2508 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + Datamodules test - Research Project Template (preview) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + +
+
+ + + + +

Datamodules test

+ +
+ + + + +
+ + + + + + + + +
+ + + + + + + + + + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/reference/project/datamodules/image_classification/cifar10/index.html b/reference/project/datamodules/image_classification/cifar10/index.html new file mode 100644 index 00000000..8b0ddce2 --- /dev/null +++ b/reference/project/datamodules/image_classification/cifar10/index.html @@ -0,0 +1,2599 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + Cifar10 - Research Project Template (preview) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + +
+
+ + + + +

Cifar10

+ +
+ + + + +
+ + + + + + + + +
+ + + + + + + + +
+ + + +

+ CIFAR10DataModule + + +#

+ + +
+

+ Bases: ImageClassificationDataModule

+ + +

.. figure:: https://3qeqpr26caki16dnhd19sv6by6v-wpengine.netdna-ssl.com/wp-content/uploads/2019/01/ + Plot-of-a-Subset-of-Images-from-the-CIFAR-10-Dataset.png + :width: 400 + :alt: CIFAR-10

+ + +
+ Specs +
    +
  • 10 classes (1 per class)
  • +
  • Each image is (3 x 32 x 32)
  • +
+

Standard CIFAR10, train, val, test splits and transforms

+

Transforms::

+
transforms = transform_lib.Compose([
+    transform_lib.ToImage(),
+    transform_lib.ToDtype(torch.float32, scale=True),
+    transform_lib.Normalize(
+        mean=[x / 255.0 for x in [125.3, 123.0, 113.9]],
+        std=[x / 255.0 for x in [63.0, 62.1, 66.7]]
+    )
+])
+
+

Example::

+
from pl_bolts.datamodules import CIFAR10DataModule
+
+dm = CIFAR10DataModule(PATH)
+model = LitModel()
+
+Trainer().fit(model, datamodule=dm)
+
+

Or you can set your own transforms

+

Example::

+
dm.train_transforms = ...
+dm.test_transforms = ...
+dm.val_transforms  = ...
+
+ + + + + + + + + +
+ + + + + + + + + + + +
+ +
+ +
+ + + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/reference/project/datamodules/image_classification/fashion_mnist/index.html b/reference/project/datamodules/image_classification/fashion_mnist/index.html new file mode 100644 index 00000000..2d0590e2 --- /dev/null +++ b/reference/project/datamodules/image_classification/fashion_mnist/index.html @@ -0,0 +1,2587 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + Fashion mnist - Research Project Template (preview) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + +
+
+ + + + +

Fashion mnist

+ +
+ + + + +
+ + + + + + + + +
+ + + + + + + + +
+ + + +

+ FashionMNISTDataModule + + +#

+ + +
+

+ Bases: MNISTDataModule

+ + +

.. figure:: https://storage.googleapis.com/kaggle-datasets-images/2243/3791/9384af51de8baa77f6320901f53bd26b/dataset-cover.png + :width: 400 + :alt: Fashion MNIST

+ + +
+ Specs +
    +
  • 10 classes (1 per type)
  • +
  • Each image is (1 x 28 x 28)
  • +
+

Standard FashionMNIST, train, val, test splits and transforms

+

Transforms::

+
mnist_transforms = transform_lib.Compose([
+    transform_lib.ToTensor()
+])
+
+

Example::

+
from pl_bolts.datamodules import FashionMNISTDataModule
+
+dm = FashionMNISTDataModule('.')
+model = LitModel()
+
+Trainer().fit(model, datamodule=dm)
+
+ + + + + + + + + +
+ + + + + + + + + + + +
+ +
+ +
+ + + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/reference/project/datamodules/image_classification/image_classification/index.html b/reference/project/datamodules/image_classification/image_classification/index.html new file mode 100644 index 00000000..afc6b30e --- /dev/null +++ b/reference/project/datamodules/image_classification/image_classification/index.html @@ -0,0 +1,2633 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + Image classification - Research Project Template (preview) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + +
+
+ + + + +

Image classification

+ +
+ + + + +
+ + + + + + + + +
+ + + + + + + + +
+ + + +

+ ImageClassificationDataModule + + +#

+ + +
+

+ Bases: VisionDataModule[ImageBatchType], ClassificationDataModule[ImageBatchType]

+ + +

Lightning data modules for image classification.

+ + + + + + + + + +
+ + + + + + + +
+ + + +

+ num_classes + + + + instance-attribute + + +#

+
num_classes: int
+
+ +
+ +

Number of classes in the dataset.

+
+ +
+ +
+ + + +

+ dims + + + + instance-attribute + + +#

+
dims: tuple[C, H, W]
+
+ +
+ +

A tuple describing the shape of the data.

+
+ +
+ + + + + +
+ +
+ +
+ + + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/reference/project/datamodules/image_classification/imagenet/index.html b/reference/project/datamodules/image_classification/imagenet/index.html new file mode 100644 index 00000000..b358d4fb --- /dev/null +++ b/reference/project/datamodules/image_classification/imagenet/index.html @@ -0,0 +1,2988 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + Imagenet - Research Project Template (preview) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + +
+
+ + + + +

Imagenet

+ +
+ + + + +
+ + + + + + + + +
+ + + + + + + + +
+ + + +

+ ImageNetDataModule + + +#

+ + +
+

+ Bases: ImageClassificationDataModule

+ + +

ImageNet datamodule.

+

Extracted from https://github.com/Lightning-Universe/lightning-bolts/blob/master/src/pl_bolts/datamodules/imagenet_datamodule.py +- Made this a subclass of VisionDataModule

+

Notes:

+
    +
  • train_dataloader uses the train split of imagenet2012 and puts away a portion of it for the validation split.
  • +
  • val_dataloader uses the part of the train split of imagenet2012 that was not used for training via + num_imgs_per_val_class
  • +
  • test_dataloader uses the validation split of imagenet2012 for testing.
      +
    • TODO: need to pass num_imgs_per_class=-1 for test dataset and split="test".
    • +
    +
  • +
+ + + + + + + + + +
+ + + + + + + +
+ + + +

+ name + + + + class-attribute + instance-attribute + + +#

+
name: str | None = 'imagenet'
+
+ +
+ +

Dataset name.

+
+ +
+ +
+ + + +

+ dataset_cls + + + + class-attribute + + +#

+
dataset_cls: type[VisionDataset] = ImageNet
+
+ +
+ +

Dataset class to use.

+
+ +
+ +
+ + + +

+ dims + + + + class-attribute + instance-attribute + + +#

+
dims: tuple[C, H, W] = (
+    C(3),
+    H(image_size),
+    W(image_size),
+)
+
+ +
+ +

A tuple describing the shape of the data.

+
+ +
+ + + +
+ + +

+ __init__ + + +#

+
__init__(
+    data_dir: str | Path = DATA_DIR,
+    *,
+    val_split: int | float = 0.01,
+    num_workers: int = NUM_WORKERS,
+    normalize: bool = False,
+    image_size: int = 224,
+    batch_size: int = 32,
+    seed: int = 42,
+    shuffle: bool = True,
+    pin_memory: bool = True,
+    drop_last: bool = False,
+    train_transforms: Callable | None = None,
+    val_transforms: Callable | None = None,
+    test_transforms: Callable | None = None,
+    **kwargs
+)
+
+ +
+ +

Creates an ImageNet datamodule (doesn't load or prepare the dataset yet).

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
+ data_dir + + str | Path + +
+

path to the imagenet dataset file

+
+
+ DATA_DIR +
+ val_split + + int | float + +
+

save val_split% of the training data of each class for validation.

+
+
+ 0.01 +
+ image_size + + int + +
+

final image size

+
+
+ 224 +
+ num_workers + + int + +
+

how many data workers

+
+
+ NUM_WORKERS +
+ batch_size + + int + +
+

batch_size

+
+
+ 32 +
+ shuffle + + bool + +
+

If true shuffles the data every epoch

+
+
+ True +
+ pin_memory + + bool + +
+

If true, the data loader will copy Tensors into CUDA pinned memory before returning them

+
+
+ True +
+ drop_last + + bool + +
+

If true drops the last incomplete batch

+
+
+ False +
+ +
+ +
+ +
+ + +

+ train_transform + + +#

+
train_transform() -> Module
+
+ +
+ +

The standard imagenet transforms.

+
transforms.Compose([
+    transforms.RandomResizedCrop(self.image_size),
+    transforms.RandomHorizontalFlip(),
+    transforms.ToTensor(),
+    transforms.Normalize(
+        mean=[0.485, 0.456, 0.406],
+        std=[0.229, 0.224, 0.225]
+    ),
+])
+
+ +
+ +
+ +
+ + +

+ val_transform + + +#

+
val_transform() -> Compose
+
+ +
+ +

The standard imagenet transforms for validation.

+

.. code-block:: python

+
transforms.Compose([
+    transforms.Resize(self.image_size + 32),
+    transforms.CenterCrop(self.image_size),
+    transforms.ToTensor(),
+    transforms.Normalize(
+        mean=[0.485, 0.456, 0.406],
+        std=[0.229, 0.224, 0.225]
+    ),
+])
+
+ +
+ +
+ + + +
+ +
+ +
+ + +
+ + +

+ prepare_imagenet + + +#

+
prepare_imagenet(
+    root: Path,
+    *,
+    split: Literal["train", "val"] = "train",
+    network_imagenet_dir: Path
+) -> None
+
+ +
+ +

Custom preparation function for ImageNet, using @obilaniu's tar magic in Python form.

+

The core of this is equivalent to these bash commands:

+
mkdir -p $SLURM_TMPDIR/imagenet/val
+cd       $SLURM_TMPDIR/imagenet/val
+tar  -xf /network/scratch/b/bilaniuo/ILSVRC2012_img_val.tar
+mkdir -p $SLURM_TMPDIR/imagenet/train
+cd       $SLURM_TMPDIR/imagenet/train
+tar  -xf /network/datasets/imagenet/ILSVRC2012_img_train.tar          --to-command='mkdir ${TAR_REALNAME%.tar}; tar -xC ${TAR_REALNAME%.tar}'
+
+ +
+ +
+ + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/reference/project/datamodules/image_classification/inaturalist/index.html b/reference/project/datamodules/image_classification/inaturalist/index.html new file mode 100644 index 00000000..766a3720 --- /dev/null +++ b/reference/project/datamodules/image_classification/inaturalist/index.html @@ -0,0 +1,2693 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + Inaturalist - Research Project Template (preview) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + +
+
+ + + + +

Inaturalist

+ +
+ + + + +
+ + + + + + + + +
+ + + + + + + + +
+ + + +

+ INaturalistDataModule + + +#

+ + +
+

+ Bases: VisionDataModule

+ + + + + + + + + + +
+ + + + + + + +
+ + + +

+ name + + + + class-attribute + instance-attribute + + +#

+
name: str | None = 'inaturalist'
+
+ +
+ +

Dataset name.

+
+ +
+ +
+ + + +

+ dataset_cls + + + + class-attribute + + +#

+
dataset_cls: type[VisionDataset] = INaturalist
+
+ +
+ +

Dataset class to use.

+
+ +
+ +
+ + + +

+ dims + + + + class-attribute + instance-attribute + + +#

+
dims: tuple[C, H, W] = (C(3), H(224), W(224))
+
+ +
+ +

A tuple describing the shape of the data.

+
+ +
+ + + +
+ + +

+ default_transforms + + +#

+
default_transforms() -> Callable
+
+ +
+ +

Default transform for the dataset.

+ +
+ +
+ + + +
+ +
+ +
+ + + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/reference/project/datamodules/image_classification/inaturalist_test/index.html b/reference/project/datamodules/image_classification/inaturalist_test/index.html new file mode 100644 index 00000000..c04a1590 --- /dev/null +++ b/reference/project/datamodules/image_classification/inaturalist_test/index.html @@ -0,0 +1,2510 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + Inaturalist test - Research Project Template (preview) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + +
+
+ + + + +

Inaturalist test

+ +
+ + + + +
+ + + + + + + + +
+ + + + + + + + + + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/reference/project/datamodules/image_classification/index.html b/reference/project/datamodules/image_classification/index.html new file mode 100644 index 00000000..24200a3a --- /dev/null +++ b/reference/project/datamodules/image_classification/index.html @@ -0,0 +1,2555 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + Image classification - Research Project Template (preview) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + +
+
+ + + + +

Image classification

+ +
+ + + + +
+ + + + + + + + +
+ + + + + + + + +
+ + + +

+ ImageClassificationDataModule + + +#

+ + +
+

+ Bases: VisionDataModule[ImageBatchType], ClassificationDataModule[ImageBatchType]

+ + +

Lightning data modules for image classification.

+ + + + + + + + + +
+ + + + + + + +
+ + + +

+ num_classes + + + + instance-attribute + + +#

+
num_classes: int
+
+ +
+ +

Number of classes in the dataset.

+
+ +
+ +
+ + + +

+ dims + + + + instance-attribute + + +#

+
dims: tuple[C, H, W]
+
+ +
+ +

A tuple describing the shape of the data.

+
+ +
+ + + + + +
+ +
+ +
+ + + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/reference/project/datamodules/image_classification/mnist/index.html b/reference/project/datamodules/image_classification/mnist/index.html new file mode 100644 index 00000000..cbcd2570 --- /dev/null +++ b/reference/project/datamodules/image_classification/mnist/index.html @@ -0,0 +1,2792 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + Mnist - Research Project Template (preview) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + +
+
+ + + + +

Mnist

+ +
+ + + + +
+ + + + + + + + +
+ + + + + + + + +
+ + + +

+ MNISTDataModule + + +#

+ + +
+

+ Bases: ImageClassificationDataModule

+ + +

.. figure:: https://miro.medium.com/max/744/1*AO2rIhzRYzFVQlFLx9DM9A.png + :width: 400 + :alt: MNIST

+ + +
+ Specs +
    +
  • 10 classes (1 per digit)
  • +
  • Each image is (1 x 28 x 28)
  • +
+

Standard MNIST, train, val, test splits and transforms

+

Transforms::

+
mnist_transforms = transform_lib.Compose([
+    transform_lib.ToTensor()
+])
+
+

Example::

+
from pl_bolts.datamodules import MNISTDataModule
+
+dm = MNISTDataModule('.')
+model = LitModel()
+
+Trainer().fit(model, datamodule=dm)
+
+ + + + + + + + + +
+ + + + + + + + + +
+ + +

+ __init__ + + +#

+
__init__(
+    data_dir: str | Path = DATA_DIR,
+    val_split: int | float = 0.2,
+    num_workers: int = 0,
+    normalize: bool = False,
+    batch_size: int = 32,
+    seed: int = 42,
+    shuffle: bool = True,
+    pin_memory: bool = True,
+    drop_last: bool = False,
+    *args: Any,
+    **kwargs: Any
+) -> None
+
+ +
+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
+ data_dir + + str | Path + +
+

Where to save/load the data

+
+
+ DATA_DIR +
+ val_split + + int | float + +
+

Percent (float) or number (int) of samples to use for the validation split

+
+
+ 0.2 +
+ num_workers + + int + +
+

How many workers to use for loading data

+
+
+ 0 +
+ normalize + + bool + +
+

If true applies image normalize

+
+
+ False +
+ batch_size + + int + +
+

How many samples per batch to load

+
+
+ 32 +
+ seed + + int + +
+

Random seed to be used for train/val/test splits

+
+
+ 42 +
+ shuffle + + bool + +
+

If true shuffles the train data every epoch

+
+
+ True +
+ pin_memory + + bool + +
+

If true, the data loader will copy Tensors into CUDA pinned memory before + returning them

+
+
+ True +
+ drop_last + + bool + +
+

If true drops the last incomplete batch

+
+
+ False +
+ +
+ +
+ + + +
+ +
+ +
+ + + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/reference/project/datamodules/index.html b/reference/project/datamodules/index.html new file mode 100644 index 00000000..55e5b474 --- /dev/null +++ b/reference/project/datamodules/index.html @@ -0,0 +1,3980 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + Datamodules - Research Project Template (preview) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + +
+
+ + + + +

Datamodules

+ +
+ + + + +
+ +

Datamodules (datasets + preprocessing + dataloading)

+

See the lightning.LightningDataModule class for more information.

+ + + + + + + + +
+ + + + + + + + +
+ + + +

+ ImageClassificationDataModule + + +#

+ + +
+

+ Bases: VisionDataModule[ImageBatchType], ClassificationDataModule[ImageBatchType]

+ + +

Lightning data modules for image classification.

+ + + + + + + + + +
+ + + + + + + +
+ + + +

+ num_classes + + + + instance-attribute + + +#

+
num_classes: int
+
+ +
+ +

Number of classes in the dataset.

+
+ +
+ +
+ + + +

+ dims + + + + instance-attribute + + +#

+
dims: tuple[C, H, W]
+
+ +
+ +

A tuple describing the shape of the data.

+
+ +
+ + + + + +
+ +
+ +
+ +
+ + + +

+ CIFAR10DataModule + + +#

+ + +
+

+ Bases: ImageClassificationDataModule

+ + +

.. figure:: https://3qeqpr26caki16dnhd19sv6by6v-wpengine.netdna-ssl.com/wp-content/uploads/2019/01/ + Plot-of-a-Subset-of-Images-from-the-CIFAR-10-Dataset.png + :width: 400 + :alt: CIFAR-10

+ + +
+ Specs +
    +
  • 10 classes (1 per class)
  • +
  • Each image is (3 x 32 x 32)
  • +
+

Standard CIFAR10, train, val, test splits and transforms

+

Transforms::

+
transforms = transform_lib.Compose([
+    transform_lib.ToImage(),
+    transform_lib.ToDtype(torch.float32, scale=True),
+    transform_lib.Normalize(
+        mean=[x / 255.0 for x in [125.3, 123.0, 113.9]],
+        std=[x / 255.0 for x in [63.0, 62.1, 66.7]]
+    )
+])
+
+

Example::

+
from pl_bolts.datamodules import CIFAR10DataModule
+
+dm = CIFAR10DataModule(PATH)
+model = LitModel()
+
+Trainer().fit(model, datamodule=dm)
+
+

Or you can set your own transforms

+

Example::

+
dm.train_transforms = ...
+dm.test_transforms = ...
+dm.val_transforms  = ...
+
+ + + + + + + + + +
+ + + + + + + + + + + +
+ +
+ +
+ +
+ + + +

+ FashionMNISTDataModule + + +#

+ + +
+

+ Bases: MNISTDataModule

+ + +

.. figure:: https://storage.googleapis.com/kaggle-datasets-images/2243/3791/9384af51de8baa77f6320901f53bd26b/dataset-cover.png + :width: 400 + :alt: Fashion MNIST

+ + +
+ Specs +
    +
  • 10 classes (1 per type)
  • +
  • Each image is (1 x 28 x 28)
  • +
+

Standard FashionMNIST, train, val, test splits and transforms

+

Transforms::

+
mnist_transforms = transform_lib.Compose([
+    transform_lib.ToTensor()
+])
+
+

Example::

+
from pl_bolts.datamodules import FashionMNISTDataModule
+
+dm = FashionMNISTDataModule('.')
+model = LitModel()
+
+Trainer().fit(model, datamodule=dm)
+
+ + + + + + + + + +
+ + + + + + + + + + + +
+ +
+ +
+ +
+ + + +

+ ImageNetDataModule + + +#

+ + +
+

+ Bases: ImageClassificationDataModule

+ + +

ImageNet datamodule.

+

Extracted from https://github.com/Lightning-Universe/lightning-bolts/blob/master/src/pl_bolts/datamodules/imagenet_datamodule.py +- Made this a subclass of VisionDataModule

+

Notes:

+
    +
  • train_dataloader uses the train split of imagenet2012 and puts away a portion of it for the validation split.
  • +
  • val_dataloader uses the part of the train split of imagenet2012 that was not used for training via + num_imgs_per_val_class
  • +
  • test_dataloader uses the validation split of imagenet2012 for testing.
      +
    • TODO: need to pass num_imgs_per_class=-1 for test dataset and split="test".
    • +
    +
  • +
+ + + + + + + + + +
+ + + + + + + +
+ + + +

+ name + + + + class-attribute + instance-attribute + + +#

+
name: str | None = 'imagenet'
+
+ +
+ +

Dataset name.

+
+ +
+ +
+ + + +

+ dataset_cls + + + + class-attribute + + +#

+
dataset_cls: type[VisionDataset] = ImageNet
+
+ +
+ +

Dataset class to use.

+
+ +
+ +
+ + + +

+ dims + + + + class-attribute + instance-attribute + + +#

+
dims: tuple[C, H, W] = (
+    C(3),
+    H(image_size),
+    W(image_size),
+)
+
+ +
+ +

A tuple describing the shape of the data.

+
+ +
+ + + +
+ + +

+ __init__ + + +#

+
__init__(
+    data_dir: str | Path = DATA_DIR,
+    *,
+    val_split: int | float = 0.01,
+    num_workers: int = NUM_WORKERS,
+    normalize: bool = False,
+    image_size: int = 224,
+    batch_size: int = 32,
+    seed: int = 42,
+    shuffle: bool = True,
+    pin_memory: bool = True,
+    drop_last: bool = False,
+    train_transforms: Callable | None = None,
+    val_transforms: Callable | None = None,
+    test_transforms: Callable | None = None,
+    **kwargs
+)
+
+ +
+ +

Creates an ImageNet datamodule (doesn't load or prepare the dataset yet).

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
+ data_dir + + str | Path + +
+

path to the imagenet dataset file

+
+
+ DATA_DIR +
+ val_split + + int | float + +
+

save val_split% of the training data of each class for validation.

+
+
+ 0.01 +
+ image_size + + int + +
+

final image size

+
+
+ 224 +
+ num_workers + + int + +
+

how many data workers

+
+
+ NUM_WORKERS +
+ batch_size + + int + +
+

batch_size

+
+
+ 32 +
+ shuffle + + bool + +
+

If true shuffles the data every epoch

+
+
+ True +
+ pin_memory + + bool + +
+

If true, the data loader will copy Tensors into CUDA pinned memory before returning them

+
+
+ True +
+ drop_last + + bool + +
+

If true drops the last incomplete batch

+
+
+ False +
+ +
+ +
+ +
+ + +

+ train_transform + + +#

+
train_transform() -> Module
+
+ +
+ +

The standard imagenet transforms.

+
transforms.Compose([
+    transforms.RandomResizedCrop(self.image_size),
+    transforms.RandomHorizontalFlip(),
+    transforms.ToTensor(),
+    transforms.Normalize(
+        mean=[0.485, 0.456, 0.406],
+        std=[0.229, 0.224, 0.225]
+    ),
+])
+
+ +
+ +
+ +
+ + +

+ val_transform + + +#

+
val_transform() -> Compose
+
+ +
+ +

The standard imagenet transforms for validation.

+

.. code-block:: python

+
transforms.Compose([
+    transforms.Resize(self.image_size + 32),
+    transforms.CenterCrop(self.image_size),
+    transforms.ToTensor(),
+    transforms.Normalize(
+        mean=[0.485, 0.456, 0.406],
+        std=[0.229, 0.224, 0.225]
+    ),
+])
+
+ +
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ INaturalistDataModule + + +#

+ + +
+

+ Bases: VisionDataModule

+ + + + + + + + + + +
+ + + + + + + +
+ + + +

+ name + + + + class-attribute + instance-attribute + + +#

+
name: str | None = 'inaturalist'
+
+ +
+ +

Dataset name.

+
+ +
+ +
+ + + +

+ dataset_cls + + + + class-attribute + + +#

+
dataset_cls: type[VisionDataset] = INaturalist
+
+ +
+ +

Dataset class to use.

+
+ +
+ +
+ + + +

+ dims + + + + class-attribute + instance-attribute + + +#

+
dims: tuple[C, H, W] = (C(3), H(224), W(224))
+
+ +
+ +

A tuple describing the shape of the data.

+
+ +
+ + + +
+ + +

+ default_transforms + + +#

+
default_transforms() -> Callable
+
+ +
+ +

Default transform for the dataset.

+ +
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ MNISTDataModule + + +#

+ + +
+

+ Bases: ImageClassificationDataModule

+ + +

.. figure:: https://miro.medium.com/max/744/1*AO2rIhzRYzFVQlFLx9DM9A.png + :width: 400 + :alt: MNIST

+ + +
+ Specs +
    +
  • 10 classes (1 per digit)
  • +
  • Each image is (1 x 28 x 28)
  • +
+

Standard MNIST, train, val, test splits and transforms

+

Transforms::

+
mnist_transforms = transform_lib.Compose([
+    transform_lib.ToTensor()
+])
+
+

Example::

+
from pl_bolts.datamodules import MNISTDataModule
+
+dm = MNISTDataModule('.')
+model = LitModel()
+
+Trainer().fit(model, datamodule=dm)
+
+ + + + + + + + + +
+ + + + + + + + + +
+ + +

+ __init__ + + +#

+
__init__(
+    data_dir: str | Path = DATA_DIR,
+    val_split: int | float = 0.2,
+    num_workers: int = 0,
+    normalize: bool = False,
+    batch_size: int = 32,
+    seed: int = 42,
+    shuffle: bool = True,
+    pin_memory: bool = True,
+    drop_last: bool = False,
+    *args: Any,
+    **kwargs: Any
+) -> None
+
+ +
+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
+ data_dir + + str | Path + +
+

Where to save/load the data

+
+
+ DATA_DIR +
+ val_split + + int | float + +
+

Percent (float) or number (int) of samples to use for the validation split

+
+
+ 0.2 +
+ num_workers + + int + +
+

How many workers to use for loading data

+
+
+ 0 +
+ normalize + + bool + +
+

If true applies image normalize

+
+
+ False +
+ batch_size + + int + +
+

How many samples per batch to load

+
+
+ 32 +
+ seed + + int + +
+

Random seed to be used for train/val/test splits

+
+
+ 42 +
+ shuffle + + bool + +
+

If true shuffles the train data every epoch

+
+
+ True +
+ pin_memory + + bool + +
+

If true, the data loader will copy Tensors into CUDA pinned memory before + returning them

+
+
+ True +
+ drop_last + + bool + +
+

If true drops the last incomplete batch

+
+
+ False +
+ +
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ TextClassificationDataModule + + +#

+ + +
+

+ Bases: LightningDataModule

+ + +

Lightning data module for HF text classification datasets.

+

This is based on this tutorial: +https://lightning.ai/docs/pytorch/stable/notebooks/lightning_examples/text-transformers.html

+ + + + + + + + + +
+ + + + + + + + + + + +
+ +
+ +
+ +
+ + + +

+ VisionDataModule + + +#

+ + +
+

+ Bases: LightningDataModule, DataModule[BatchType_co]

+ + +

A LightningDataModule for image datasets.

+

(Taken from pl_bolts which is not very well maintained.)

+ + + + + + + + + +
+ + + + + + + +
+ + + +

+ name + + + + class-attribute + instance-attribute + + +#

+
name: str | None = ''
+
+ +
+ +

Dataset name.

+
+ +
+ +
+ + + +

+ dataset_cls + + + + class-attribute + + +#

+
dataset_cls: type[VisionDataset]
+
+ +
+ +

Dataset class to use.

+
+ +
+ +
+ + + +

+ dims + + + + instance-attribute + + +#

+
dims: tuple[C, H, W]
+
+ +
+ +

A tuple describing the shape of the data.

+
+ +
+ + + +
+ + +

+ __init__ + + +#

+
__init__(
+    data_dir: str | Path = DATA_DIR,
+    val_split: int | float = 0.2,
+    num_workers: int = NUM_WORKERS,
+    normalize: bool = False,
+    batch_size: int = 32,
+    seed: int = 42,
+    shuffle: bool = True,
+    pin_memory: bool = True,
+    drop_last: bool = False,
+    train_transforms: Callable | None = None,
+    val_transforms: Callable | None = None,
+    test_transforms: Callable | None = None,
+    **kwargs
+) -> None
+
+ +
+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
+ data_dir + + str | Path + +
+

Where to save/load the data

+
+
+ DATA_DIR +
+ val_split + + int | float + +
+

Percent (float) or number (int) of samples to use for the validation split

+
+
+ 0.2 +
+ num_workers + + int + +
+

How many workers to use for loading data

+
+
+ NUM_WORKERS +
+ normalize + + bool + +
+

If true applies image normalize

+
+
+ False +
+ batch_size + + int + +
+

How many samples per batch to load

+
+
+ 32 +
+ seed + + int + +
+

Random seed to be used for train/val/test splits

+
+
+ 42 +
+ shuffle + + bool + +
+

If true shuffles the train data every epoch

+
+
+ True +
+ pin_memory + + bool + +
+

If true, the data loader will copy Tensors into CUDA pinned memory before + returning them

+
+
+ True +
+ drop_last + + bool + +
+

If true drops the last incomplete batch

+
+
+ False +
+ train_transforms + + Callable | None + +
+

transformations you can apply to train dataset

+
+
+ None +
+ val_transforms + + Callable | None + +
+

transformations you can apply to validation dataset

+
+
+ None +
+ test_transforms + + Callable | None + +
+

transformations you can apply to test dataset

+
+
+ None +
+ +
+ +
+ +
+ + +

+ prepare_data + + +#

+
prepare_data() -> None
+
+ +
+ +

Saves files to data_dir.

+ +
+ +
+ +
+ + +

+ default_transforms + + + + abstractmethod + + +#

+
default_transforms() -> Callable
+
+ +
+ +

Default transform for the dataset.

+ +
+ +
+ +
+ + +

+ train_dataloader + + +#

+
train_dataloader(
+    _dataloader_fn: Callable[
+        Concatenate[Dataset, P], DataLoader
+    ] = DataLoader,
+    *args: args,
+    **kwargs: kwargs
+) -> DataLoader
+
+ +
+ +

The train dataloader.

+ +
+ +
+ +
+ + +

+ val_dataloader + + +#

+
val_dataloader(
+    _dataloader_fn: Callable[
+        Concatenate[Dataset, P], DataLoader
+    ] = DataLoader,
+    *args: args,
+    **kwargs: kwargs
+) -> DataLoader
+
+ +
+ +

The val dataloader.

+ +
+ +
+ +
+ + +

+ test_dataloader + + +#

+
test_dataloader(
+    _dataloader_fn: Callable[
+        Concatenate[Dataset, P], DataLoader
+    ] = DataLoader,
+    *args: args,
+    **kwargs: kwargs
+) -> DataLoader
+
+ +
+ +

The test dataloader.

+ +
+ +
+ + + +
+ +
+ +
+ + + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/reference/project/datamodules/text/index.html b/reference/project/datamodules/text/index.html new file mode 100644 index 00000000..2bd6f532 --- /dev/null +++ b/reference/project/datamodules/text/index.html @@ -0,0 +1,2511 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + Text - Research Project Template (preview) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + +
+ +
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/reference/project/datamodules/text/text_classification/index.html b/reference/project/datamodules/text/text_classification/index.html new file mode 100644 index 00000000..eca75e04 --- /dev/null +++ b/reference/project/datamodules/text/text_classification/index.html @@ -0,0 +1,2569 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + Text classification - Research Project Template (preview) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + +
+
+ + + + +

Text classification

+ +
+ + + + +
+ +

Example algorithm that can train a huggingface model.

+

Also check out this link for more detailed example script:

+

https://github.com/lebrice/mila-docs/blob/llm_training/docs/examples/distributed/LLM_training/main.py

+ + + + + + + + +
+ + + + + + + + +
+ + + +

+ TextClassificationDataModule + + +#

+ + +
+

+ Bases: LightningDataModule

+ + +

Lightning data module for HF text classification datasets.

+

This is based on this tutorial: +https://lightning.ai/docs/pytorch/stable/notebooks/lightning_examples/text-transformers.html

+ + + + + + + + + +
+ + + + + + + + + + + +
+ +
+ +
+ + + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/reference/project/datamodules/text/text_classification_test/index.html b/reference/project/datamodules/text/text_classification_test/index.html new file mode 100644 index 00000000..f60f43bc --- /dev/null +++ b/reference/project/datamodules/text/text_classification_test/index.html @@ -0,0 +1,2568 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + Text classification test - Research Project Template (preview) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + +
+
+ + + + +

Text classification test

+ +
+ + + + +
+ + + + + + + + +
+ + + + + + + + + +
+ + +

+ datamodule + + +#

+
datamodule(request: FixtureRequest) -> LightningDataModule
+
+ +
+ +

Fixture that creates the datamodule for the given config.

+ +
+ +
+ +
+ + +

+ test_dataset_location + + +#

+
test_dataset_location(
+    prepared_datamodule: TextClassificationDataModule,
+)
+
+ +
+ +

Test that the dataset is downloaded to the correct location.

+ +
+ +
+ + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/reference/project/datamodules/vision/index.html b/reference/project/datamodules/vision/index.html new file mode 100644 index 00000000..91289e9a --- /dev/null +++ b/reference/project/datamodules/vision/index.html @@ -0,0 +1,3076 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + Vision - Research Project Template (preview) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + +
+
+ + + + +

Vision

+ +
+ + + + +
+ + + + + + + + +
+ + + + + + + + +
+ + + +

+ VisionDataModule + + +#

+ + +
+

+ Bases: LightningDataModule, DataModule[BatchType_co]

+ + +

A LightningDataModule for image datasets.

+

(Taken from pl_bolts which is not very well maintained.)

+ + + + + + + + + +
+ + + + + + + +
+ + + +

+ name + + + + class-attribute + instance-attribute + + +#

+
name: str | None = ''
+
+ +
+ +

Dataset name.

+
+ +
+ +
+ + + +

+ dataset_cls + + + + class-attribute + + +#

+
dataset_cls: type[VisionDataset]
+
+ +
+ +

Dataset class to use.

+
+ +
+ +
+ + + +

+ dims + + + + instance-attribute + + +#

+
dims: tuple[C, H, W]
+
+ +
+ +

A tuple describing the shape of the data.

+
+ +
+ + + +
+ + +

+ __init__ + + +#

+
__init__(
+    data_dir: str | Path = DATA_DIR,
+    val_split: int | float = 0.2,
+    num_workers: int = NUM_WORKERS,
+    normalize: bool = False,
+    batch_size: int = 32,
+    seed: int = 42,
+    shuffle: bool = True,
+    pin_memory: bool = True,
+    drop_last: bool = False,
+    train_transforms: Callable | None = None,
+    val_transforms: Callable | None = None,
+    test_transforms: Callable | None = None,
+    **kwargs
+) -> None
+
+ +
+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
+ data_dir + + str | Path + +
+

Where to save/load the data

+
+
+ DATA_DIR +
+ val_split + + int | float + +
+

Percent (float) or number (int) of samples to use for the validation split

+
+
+ 0.2 +
+ num_workers + + int + +
+

How many workers to use for loading data

+
+
+ NUM_WORKERS +
+ normalize + + bool + +
+

If true applies image normalize

+
+
+ False +
+ batch_size + + int + +
+

How many samples per batch to load

+
+
+ 32 +
+ seed + + int + +
+

Random seed to be used for train/val/test splits

+
+
+ 42 +
+ shuffle + + bool + +
+

If true shuffles the train data every epoch

+
+
+ True +
+ pin_memory + + bool + +
+

If true, the data loader will copy Tensors into CUDA pinned memory before + returning them

+
+
+ True +
+ drop_last + + bool + +
+

If true drops the last incomplete batch

+
+
+ False +
+ train_transforms + + Callable | None + +
+

transformations you can apply to train dataset

+
+
+ None +
+ val_transforms + + Callable | None + +
+

transformations you can apply to validation dataset

+
+
+ None +
+ test_transforms + + Callable | None + +
+

transformations you can apply to test dataset

+
+
+ None +
+ +
+ +
+ +
+ + +

+ prepare_data + + +#

+
prepare_data() -> None
+
+ +
+ +

Saves files to data_dir.

+ +
+ +
+ +
+ + +

+ default_transforms + + + + abstractmethod + + +#

+
default_transforms() -> Callable
+
+ +
+ +

Default transform for the dataset.

+ +
+ +
+ +
+ + +

+ train_dataloader + + +#

+
train_dataloader(
+    _dataloader_fn: Callable[
+        Concatenate[Dataset, P], DataLoader
+    ] = DataLoader,
+    *args: args,
+    **kwargs: kwargs
+) -> DataLoader
+
+ +
+ +

The train dataloader.

+ +
+ +
+ +
+ + +

+ val_dataloader + + +#

+
val_dataloader(
+    _dataloader_fn: Callable[
+        Concatenate[Dataset, P], DataLoader
+    ] = DataLoader,
+    *args: args,
+    **kwargs: kwargs
+) -> DataLoader
+
+ +
+ +

The val dataloader.

+ +
+ +
+ +
+ + +

+ test_dataloader + + +#

+
test_dataloader(
+    _dataloader_fn: Callable[
+        Concatenate[Dataset, P], DataLoader
+    ] = DataLoader,
+    *args: args,
+    **kwargs: kwargs
+) -> DataLoader
+
+ +
+ +

The test dataloader.

+ +
+ +
+ + + +
+ +
+ +
+ + + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/reference/project/experiment/index.html b/reference/project/experiment/index.html new file mode 100644 index 00000000..e598abfd --- /dev/null +++ b/reference/project/experiment/index.html @@ -0,0 +1,2583 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + Experiment - Research Project Template (preview) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + +
+
+ + + + +

Experiment

+ +
+ + + + +
+ +

Module containing the functions which create experiment components from Hydra configs.

+

This is essentially just calling hydra.utils.instantiate +on the +datamodule, network, trainer, and algorithm configs in a certain order.

+

This also adds the instance_attr custom resolver, which allows you to retrieve an attribute of +an instantiated object instead of a config.

+ + + + + + + + +
+ + + + + + + + + +
+ + +

+ instantiate_datamodule + + +#

+
instantiate_datamodule(
+    datamodule_config: (
+        Builds[type[LightningDataModule]]
+        | LightningDataModule
+        | None
+    ),
+) -> LightningDataModule | None
+
+ +
+ +

Instantiate the datamodule from the configuration dict.

+

Any interpolations in the config will have already been resolved by the time we get here.

+ +
+ +
+ +
+ + +

+ instantiate_algorithm + + +#

+
instantiate_algorithm(
+    algorithm_config: Config,
+    datamodule: LightningDataModule | None,
+) -> LightningModule | JaxModule
+
+ +
+ +

Function used to instantiate the algorithm.

+

It is suggested that your algorithm (LightningModule) take in the datamodule and network +as arguments, to make it easier to swap out different networks and datamodules during +experiments.

+

The instantiated datamodule and network will be passed to the algorithm's constructor.

+ +
+ +
+ + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/reference/project/index.html b/reference/project/index.html new file mode 100644 index 00000000..5cde2499 --- /dev/null +++ b/reference/project/index.html @@ -0,0 +1,2463 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + Project - Research Project Template (preview) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + +
+
+ + + + +

Project

+ +
+ + + + +
+ +

Root module for this research project.

+ + + + + + + + +
+ + + + + + + + + + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/reference/project/main/index.html b/reference/project/main/index.html new file mode 100644 index 00000000..b815c4a4 --- /dev/null +++ b/reference/project/main/index.html @@ -0,0 +1,2654 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + Main - Research Project Template (preview) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + +
+
+ + + + +

Main

+ +
+ + + + +
+ +

Training script using Hydra.

+

This does the following: +1. Parses the config using Hydra; +2. Instantiated the components (trainer / algorithm), optionally datamodule and network; +3. Trains the model; +4. Optionally runs an evaluation loop.

+ + + + + + + + +
+ + + + + + + + + +
+ + +

+ main + + +#

+
main(dict_config: DictConfig) -> dict
+
+ +
+ +

Main entry point for training a model.

+

This does roughly the same thing as +https://github.com/ashleve/lightning-hydra-template/blob/main/src/train.py

+
    +
  1. Instantiates the experiment components from the Hydra configuration:
      +
    • trainer
    • +
    • algorithm
    • +
    • datamodule (optional)
    • +
    +
  2. +
  3. Calls train to train the algorithm
  4. +
  5. Calls evaluation to evaluate the model
  6. +
  7. Returns the evaluation metrics.
  8. +
+ +
+ +
+ +
+ + +

+ instantiate_values + + +#

+
instantiate_values(
+    config_dict: DictConfig | None,
+) -> list[Any] | None
+
+ +
+ +

Returns the list of objects at the values in this dict of configs.

+

This is used for the config of the trainer/logger and trainer/callbacks fields, where +we can combine multiple config groups by adding entries in a dict.

+

For example, using trainer/logger=wandb and trainer/logger=tensorboard would result in a +dict with wandb and tensorboard as keys, and the corresponding config groups as values.

+

This would then return a list with the instantiated WandbLogger and TensorBoardLogger objects.

+ +
+ +
+ +
+ + +

+ evaluate_lightningmodule + + +#

+
evaluate_lightningmodule(
+    algorithm: LightningModule,
+    trainer: Trainer,
+    datamodule: LightningDataModule | None,
+) -> tuple[MetricName, float | None, dict]
+
+ +
+ +

Evaluates the algorithm and returns the metrics.

+

By default, if validation is to be performed, returns the validation error. Returns the +training error when trainer.overfit_batches != 0 (e.g. when debugging or testing). Otherwise, +if trainer.limit_val_batches == 0, returns the test error.

+ +
+ +
+ +
+ + +

+ get_error_from_metrics + + +#

+
get_error_from_metrics(
+    metrics: _MetricsT,
+) -> tuple[str, float, dict]
+
+ +
+ +

Returns the main metric name, its value, and the full metrics dictionary.

+ +
+ +
+ + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/reference/project/main_test/index.html b/reference/project/main_test/index.html new file mode 100644 index 00000000..5bf1a63e --- /dev/null +++ b/reference/project/main_test/index.html @@ -0,0 +1,2622 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + Main test - Research Project Template (preview) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + +
+
+ + + + +

Main test

+ +
+ + + + +
+ + + + + + + + +
+ + + + + + + + + +
+ + +

+ test_jax_can_use_the_GPU + + +#

+
test_jax_can_use_the_GPU()
+
+ +
+ +

Test that Jax can use the GPU if it we have one.

+ +
+ +
+ +
+ + +

+ test_torch_can_use_the_GPU + + +#

+
test_torch_can_use_the_GPU()
+
+ +
+ +

Test that torch can use the GPU if it we have one.

+ +
+ +
+ +
+ + +

+ test_setting_just_algorithm_isnt_enough + + +#

+
test_setting_just_algorithm_isnt_enough(
+    experiment_dictconfig: DictConfig,
+) -> None
+
+ +
+ +

Test to check that the datamodule is required (even when just the example algorithm is set).

+

TODO: We could probably move the datamodule config under algorithm/datamodule. Maybe that +would be better?

+ +
+ +
+ +
+ + +

+ test_run_auto_schema_via_cli_without_errors + + +#

+
test_run_auto_schema_via_cli_without_errors()
+
+ +
+ +

Checks that the command completes without errors.

+ +
+ +
+ + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/reference/project/networks/fcnet/index.html b/reference/project/networks/fcnet/index.html new file mode 100644 index 00000000..63f45738 --- /dev/null +++ b/reference/project/networks/fcnet/index.html @@ -0,0 +1,2645 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + Fcnet - Research Project Template (preview) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + +
+
+ + + + +

Fcnet

+ +
+ + + + +
+ +

An example of a simple fully connected network.

+ + + + + + + + +
+ + + + + + + + +
+ + + +

+ FcNet + + +#

+ + +
+

+ Bases: Sequential

+ + + + + + + + + + +
+ + + + + + + + +
+ + + +

+ HParams + + +#

+ + +
+ + +

Dataclass containing the network hyper-parameters.

+

This is an example of how Pydantic can be used to validate configs and command-line +arguments.

+ + + + + + + + + +
+ + + + + + + +
+ + + +

+ dropout_rate + + + + class-attribute + instance-attribute + + +#

+
dropout_rate: float = 0.5
+
+ +
+ +

Dropout rate.

+

Set to 0 to disable dropout.

+
+ +
+ + + + + +
+ +
+ +
+ + + + +
+ +
+ +
+ + + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/reference/project/networks/index.html b/reference/project/networks/index.html new file mode 100644 index 00000000..b88a7f39 --- /dev/null +++ b/reference/project/networks/index.html @@ -0,0 +1,2576 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + Networks - Research Project Template (preview) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + +
+
+ + + + +

Networks

+ +
+ + + + +
+ +

Network definitions.

+ + + + + + + + +
+ + + + + + + + +
+ + + +

+ FcNet + + +#

+ + +
+

+ Bases: Sequential

+ + + + + + + + + + +
+ + + + + + + + +
+ + + +

+ HParams + + +#

+ + +
+ + +

Dataclass containing the network hyper-parameters.

+

This is an example of how Pydantic can be used to validate configs and command-line +arguments.

+ + + + + + + + + +
+ + + + + + + +
+ + + +

+ dropout_rate + + + + class-attribute + instance-attribute + + +#

+
dropout_rate: float = 0.5
+
+ +
+ +

Dropout rate.

+

Set to 0 to disable dropout.

+
+ +
+ + + + + +
+ +
+ +
+ + + + +
+ +
+ +
+ + + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/reference/project/trainers/index.html b/reference/project/trainers/index.html new file mode 100644 index 00000000..e6b9f7f4 --- /dev/null +++ b/reference/project/trainers/index.html @@ -0,0 +1,2610 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + Trainers - Research Project Template (preview) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + +
+
+ + + + +

Trainers

+ +
+ + + + +
+ +

Trainers: actually run the training loop.

+

You can define custom trainers here.

+ + + + + + + + +
+ + + + + + + + +
+ + + +

+ JaxTrainer + + +#

+ + +
+

+ Bases: PyTreeNode

+ + +

A simplified version of the lightning.Trainer with a fully jitted training loop.

+

Assumptions:#

+
    +
  • The algo object must match the JaxModule protocol (in other words, it should implement its + methods).
  • +
+

Training loop#

+

This is the training loop, which is fully jitted:

+
ts = algo.init_train_state(rng)
+
+setup("fit")
+on_fit_start()
+on_train_start()
+
+eval_metrics = []
+for epoch in range(self.max_epochs):
+    on_train_epoch_start()
+
+    for step in range(self.training_steps_per_epoch):
+
+        batch = algo.get_batch(ts, step)
+
+        on_train_batch_start()
+
+        ts, metrics = algo.training_step(step, ts, batch)
+
+        on_train_batch_end()
+
+    on_train_epoch_end()
+
+    # Evaluation "loop"
+    on_validation_epoch_start()
+    epoch_eval_metrics = self.eval_epoch(ts, epoch, algo)
+    on_validation_epoch_start()
+
+    eval_metrics.append(epoch_eval_metrics)
+
+return ts, eval_metrics
+
+

Caveats#

+
    +
  • Some lightning callbacks can be used with this trainer and work well, but not all of them.
  • +
  • You can either use Regular pytorch-lightning callbacks, or use jax.vmap on the fit method, + but not both.
  • +
  • If you want to use jax.vmap on the fit method, just remove the callbacks on the + Trainer for now.
  • +
+

TODOs / ideas#

+
    +
  • Add a checkpoint callback with orbax-checkpoint?
  • +
+ + + + + + + + + +
+ + + + + + + + + +
+ + +

+ fit + + +#

+
fit(
+    algo: JaxModule[Ts, _B, _MetricsT],
+    rng: PRNGKey,
+    train_state: Ts | None = None,
+    skip_initial_evaluation: bool = False,
+) -> tuple[Ts, _MetricsT]
+
+ +
+ +

Full training loop in pure jax (a lot faster than when using pytorch-lightning).

+

Unfolded version of rejax.PPO.train.

+

Training loop in pure jax (a lot faster than when using pytorch-lightning).

+ +
+ +
+ +
+ + +

+ training_step + + +#

+
training_step(
+    batch_idx: int,
+    ts: Ts,
+    algo: JaxModule[Ts, _B, _MetricsT],
+)
+
+ +
+ +

Training step in pure jax (joined data collection + training).

+

MUCH faster than using pytorch-lightning, but you lose the callbacks and such.

+ +
+ +
+ + + +
+ +
+ +
+ + + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/reference/project/trainers/jax_trainer/index.html b/reference/project/trainers/jax_trainer/index.html new file mode 100644 index 00000000..f370757d --- /dev/null +++ b/reference/project/trainers/jax_trainer/index.html @@ -0,0 +1,2920 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + Jax trainer - Research Project Template (preview) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + +
+
+ + + + +

Jax trainer

+ +
+ + + + +
+ + + + + + + + +
+ + + + + + + +
+ + + +

+ Ts + + + + module-attribute + + +#

+
Ts = TypeVar('Ts', bound=PyTreeNode, default=PyTreeNode)
+
+ +
+ +

Type Variable for the training state.

+
+ +
+ + +
+ + + +

+ JaxModule + + +#

+ + +
+

+ Bases: Protocol[Ts, _B, _MetricsT]

+ + +

A protocol for algorithms that can be trained by the JaxTrainer.

+

The JaxRLExample is an example that follows this structure and can be trained with a +JaxTrainer.

+ + + + + + + + + +
+ + + + + + + + + +
+ + +

+ init_train_state + + +#

+
init_train_state(rng: PRNGKey) -> Ts
+
+ +
+ +

Create the initial training state.

+ +
+ +
+ +
+ + +

+ get_batch + + +#

+
get_batch(ts: Ts, batch_idx: int) -> tuple[Ts, _B]
+
+ +
+ +

Produces a batch of data.

+ +
+ +
+ +
+ + +

+ training_step + + +#

+
training_step(
+    batch_idx: int, ts: Ts, batch: _B
+) -> tuple[Ts, PyTreeNode]
+
+ +
+ +

Update the training state using a "batch" of data.

+ +
+ +
+ +
+ + +

+ eval_callback + + +#

+
eval_callback(ts: Ts) -> _MetricsT
+
+ +
+ +

Perform evaluation and return metrics.

+ +
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ JaxTrainer + + +#

+ + +
+

+ Bases: PyTreeNode

+ + +

A simplified version of the lightning.Trainer with a fully jitted training loop.

+

Assumptions:#

+
    +
  • The algo object must match the JaxModule protocol (in other words, it should implement its + methods).
  • +
+

Training loop#

+

This is the training loop, which is fully jitted:

+
ts = algo.init_train_state(rng)
+
+setup("fit")
+on_fit_start()
+on_train_start()
+
+eval_metrics = []
+for epoch in range(self.max_epochs):
+    on_train_epoch_start()
+
+    for step in range(self.training_steps_per_epoch):
+
+        batch = algo.get_batch(ts, step)
+
+        on_train_batch_start()
+
+        ts, metrics = algo.training_step(step, ts, batch)
+
+        on_train_batch_end()
+
+    on_train_epoch_end()
+
+    # Evaluation "loop"
+    on_validation_epoch_start()
+    epoch_eval_metrics = self.eval_epoch(ts, epoch, algo)
+    on_validation_epoch_start()
+
+    eval_metrics.append(epoch_eval_metrics)
+
+return ts, eval_metrics
+
+

Caveats#

+
    +
  • Some lightning callbacks can be used with this trainer and work well, but not all of them.
  • +
  • You can either use Regular pytorch-lightning callbacks, or use jax.vmap on the fit method, + but not both.
  • +
  • If you want to use jax.vmap on the fit method, just remove the callbacks on the + Trainer for now.
  • +
+

TODOs / ideas#

+
    +
  • Add a checkpoint callback with orbax-checkpoint?
  • +
+ + + + + + + + + +
+ + + + + + + + + +
+ + +

+ fit + + +#

+
fit(
+    algo: JaxModule[Ts, _B, _MetricsT],
+    rng: PRNGKey,
+    train_state: Ts | None = None,
+    skip_initial_evaluation: bool = False,
+) -> tuple[Ts, _MetricsT]
+
+ +
+ +

Full training loop in pure jax (a lot faster than when using pytorch-lightning).

+

Unfolded version of rejax.PPO.train.

+

Training loop in pure jax (a lot faster than when using pytorch-lightning).

+ +
+ +
+ +
+ + +

+ training_step + + +#

+
training_step(
+    batch_idx: int,
+    ts: Ts,
+    algo: JaxModule[Ts, _B, _MetricsT],
+)
+
+ +
+ +

Training step in pure jax (joined data collection + training).

+

MUCH faster than using pytorch-lightning, but you lose the callbacks and such.

+ +
+ +
+ + + +
+ +
+ +
+ + +
+ + +

+ hparams_to_dict + + +#

+
hparams_to_dict(algo: PyTreeNode) -> dict
+
+ +
+ +

Convert the learner struct to a serializable dict.

+ +
+ +
+ + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/reference/project/utils/env_vars/index.html b/reference/project/utils/env_vars/index.html new file mode 100644 index 00000000..3b03ef14 --- /dev/null +++ b/reference/project/utils/env_vars/index.html @@ -0,0 +1,2878 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + Env vars - Research Project Template (preview) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + +
+
+ + + + +

Env vars

+ +
+ + + + +
+ + + + + + + + +
+ + + + + + + +
+ + + +

+ SLURM_JOB_ID + + + + module-attribute + + +#

+
SLURM_JOB_ID: int | None = (
+    int(environ["SLURM_JOB_ID"])
+    if "SLURM_JOB_ID" in environ
+    else None
+)
+
+ +
+ +

The value of the 'SLURM_JOB_ID' environment variable.

+

See https://slurm.schedmd.com/sbatch.html#OPT_SLURM_JOB_ID.

+
+ +
+ +
+ + + +

+ SLURM_TMPDIR + + + + module-attribute + + +#

+
SLURM_TMPDIR: Path | None = (
+    Path(environ["SLURM_TMPDIR"])
+    if "SLURM_TMPDIR" in environ
+    else (
+        tmp
+        if SLURM_JOB_ID is not None and exists()
+        else None
+    )
+)
+
+ +
+ +

The SLURM temporary directory, the fastest storage available.

+
    +
  • Extract your dataset to this directory at the start of your job.
  • +
  • Remember to move any files created here to $SCRATCH since everything gets deleted at the end of the job.
  • +
+

See https://docs.mila.quebec/Information.html#slurm-tmpdir for more information.

+
+ +
+ +
+ + + +

+ SCRATCH + + + + module-attribute + + +#

+
SCRATCH = (
+    Path(environ["SCRATCH"])
+    if "SCRATCH" in environ
+    else None
+)
+
+ +
+ +

Network directory where temporary logs / checkpoints / custom datasets should be saved.

+

Note that this is temporary storage. Files that you wish to be saved long-term should be saved to the ARCHIVE directory.

+

See https://docs.mila.quebec/Information.html#scratch for more information.

+
+ +
+ +
+ + + +

+ ARCHIVE + + + + module-attribute + + +#

+
ARCHIVE = (
+    Path(environ["ARCHIVE"])
+    if "ARCHIVE" in environ
+    else None
+)
+
+ +
+ +

Network directory for long-term storage. Only accessible from the login or cpu-only compute +nodes.

+

See https://docs.mila.quebec/Information.html#archive for more information.

+
+ +
+ +
+ + + +

+ NETWORK_DIR + + + + module-attribute + + +#

+
NETWORK_DIR = (
+    Path(environ["NETWORK_DIR"])
+    if "NETWORK_DIR" in environ
+    else _network_dir if exists() else None
+)
+
+ +
+ +

The (read-only) network directory that contains datasets/weights/etc.

+

todo: adapt this for the DRAC clusters.

+

When running outside of the mila/DRAC clusters, this will be None, but can be mocked by setting the NETWORK_DIR environment variable.

+
+ +
+ +
+ + + +

+ REPO_ROOTDIR + + + + module-attribute + + +#

+
REPO_ROOTDIR = parent
+
+ +
+ +

The root directory of this repository on this machine.

+
+ +
+ +
+ + + +

+ DATA_DIR + + + + module-attribute + + +#

+
DATA_DIR = Path(
+    get(
+        "DATA_DIR",
+        SLURM_TMPDIR or SCRATCH or REPO_ROOTDIR / "data",
+    )
+)
+
+ +
+ +

Local Directory where datasets should be extracted on this machine.

+
+ +
+ +
+ + + +

+ torchvision_dir + + + + module-attribute + + +#

+
torchvision_dir: Path | None = None
+
+ +
+ +

Network directory with torchvision datasets.

+
+ +
+ +
+ + + +

+ NUM_WORKERS + + + + module-attribute + + +#

+
NUM_WORKERS = int(
+    get(
+        "SLURM_CPUS_PER_TASK",
+        get(
+            "SLURM_CPUS_ON_NODE",
+            (
+                len(sched_getaffinity(0))
+                if hasattr(os, "sched_getaffinity")
+                else cpu_count()
+            ),
+        ),
+    )
+)
+
+ +
+ +

Default number of workers to be used by dataloaders, based on the number of CPUs and/or +tasks.

+
+ +
+ + + +
+ + +

+ get_constant + + +#

+
get_constant(*names: str)
+
+ +
+ +

Resolver for Hydra to get the value of a constant in this file.

+ +
+ +
+ + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/reference/project/utils/hydra_utils/index.html b/reference/project/utils/hydra_utils/index.html new file mode 100644 index 00000000..ab53485d --- /dev/null +++ b/reference/project/utils/hydra_utils/index.html @@ -0,0 +1,2688 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + Hydra utils - Research Project Template (preview) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + +
+
+ + + + +

Hydra utils

+ +
+ + + + +
+ +

Utility functions related to working with Hydra.

+ + + + + + + + +
+ + + + + + + + + +
+ + +

+ get_attr + + +#

+
get_attr(obj: Any, *attributes: str)
+
+ +
+ +

Recursive version of getattr when the attribute is like 'a.b.c'.

+ +
+ +
+ +
+ + +

+ register_instance_attr_resolver + + +#

+
register_instance_attr_resolver(
+    instantiated_objects_cache: dict[str, Any]
+) -> None
+
+ +
+ +

Registers the instance_attr custom resolver with OmegaConf.

+ +
+ +
+ +
+ + +

+ resolve_dictconfig + + +#

+
resolve_dictconfig(dict_config: DictConfig) -> Config
+
+ +
+ +

Resolve all interpolations in the DictConfig.

+

Returns a Config object, which is a simple dataclass used to give +nicer type hints for the contents of an experiment config.

+ +
+ +
+ +
+ + +

+ instance_attr + + +#

+
instance_attr(
+    *attributes: str,
+    _instantiated_objects_cache: (
+        MutableMapping[str, Any] | None
+    ) = None
+)
+
+ +
+ +

Allows interpolations of the instantiated objects attributes (rather than configs).

+
+

This is very hacky

+

This is quite hacky and very dependent on the code of Hydra / OmegaConf not changing too +much in the future. For this reason, consider pinning the versions of these libraries in +your project if you intend do use this feature.

+
+

This works during a call to hydra.utils.instantiate, by looking at the stack trace to find +the instantiated objects, which are in a variable in that function.

+

If there is a ${instance_attr:datamodule.num_classes} interpolation in a config, this will:

+
    +
  1. instantiate the datamodule config
  2. +
  3. +

    store it at the key 'datamodule' in the instantiated objects cache dict (if passed).

    +
    +

    (This is useful since it makes it possible for us to later reuse this instantiated +datamodule instead of re-instantiating it.)

    +
    +
  4. +
  5. +

    Retrieve the value of the attribute (getattr(datamodule, 'num_classes')) and return it.

    +
  6. +
+ +
+ +
+ +
+ + +

+ make_config_and_store + + +#

+
make_config_and_store(
+    target: Callable[..., Target],
+    *,
+    store: ZenStore,
+    **overrides
+)
+
+ +
+ +

Creates a config dataclass for the given target and stores it in the config store.

+

This uses hydra_zen.builds +to create the config dataclass and stores it at the name config_name, or target.__name__.

+ +
+ +
+ + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/reference/project/utils/index.html b/reference/project/utils/index.html new file mode 100644 index 00000000..a2d54169 --- /dev/null +++ b/reference/project/utils/index.html @@ -0,0 +1,2463 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + Utils - Research Project Template (preview) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + +
+
+ + + + +

Utils

+ +
+ + + + +
+ + + + + + + + +
+ + + + + + + + + + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/reference/project/utils/remote_launcher_plugin/index.html b/reference/project/utils/remote_launcher_plugin/index.html new file mode 100644 index 00000000..6e9c1f34 --- /dev/null +++ b/reference/project/utils/remote_launcher_plugin/index.html @@ -0,0 +1,2770 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + Remote launcher plugin - Research Project Template (preview) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + +
+
+ + + + +

Remote launcher plugin

+ +
+ + + + +
+ + + + + + + + +
+ + + + + + + + +
+ + + +

+ PatchedSlurmQueueConf + + + + dataclass + + +#

+ + +
+

+ Bases: _AddedArgumentsConf, SlurmQueueConf

+ + +

Adds more SLURM parameters to the config for the SLURM submitit launcher of Hydra.

+ + + + + + + + + +
+ + + + + + + +
+ + + +

+ signal_delay_s + + + + class-attribute + instance-attribute + + +#

+
signal_delay_s: int = 120
+
+ +
+ +

USR1 signal delay before timeout.

+
+ +
+ +
+ + + +

+ max_num_timeout + + + + class-attribute + instance-attribute + + +#

+
max_num_timeout: int = 0
+
+ +
+ +

Maximum number of retries on job timeout.

+

Change this only after you confirmed your code can handle re-submission by properly resuming +from the latest stored checkpoint. check the following for more info on slurm_max_num_timeout +https://github.com/facebookincubator/submitit/blob/master/docs/checkpointing.md

+
+ +
+ +
+ + + +

+ additional_parameters + + + + class-attribute + instance-attribute + + +#

+
additional_parameters: dict[str, Any] = field(
+    default_factory=dict
+)
+
+ +
+ +

Useful to add parameters which are not currently available in the plugin.

+

Eg: {"mail-user": "blublu@fb.com", "mail-type": "BEGIN"}

+
+ +
+ +
+ + + +

+ array_parallelism + + + + class-attribute + instance-attribute + + +#

+
array_parallelism: int = 256
+
+ +
+ +

Maximum number of jobs running in parallel.

+
+ +
+ +
+ + + +

+ setup + + + + class-attribute + instance-attribute + + +#

+
setup: list[str] | None = None
+
+ +
+ +

A list of commands to run in sbatch before running srun.

+
+ +
+ + + + + +
+ +
+ +
+ + +
+ + +

+ get_slurm_accounts + + +#

+
get_slurm_accounts(cluster: str) -> list[str]
+
+ +
+ +

Gets the SLURM accounts of the user using sacctmgr on the slurm cluster.

+ +
+ +
+ + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/reference/project/utils/remote_launcher_plugin_test/index.html b/reference/project/utils/remote_launcher_plugin_test/index.html new file mode 100644 index 00000000..082ac885 --- /dev/null +++ b/reference/project/utils/remote_launcher_plugin_test/index.html @@ -0,0 +1,2536 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + Remote launcher plugin test - Research Project Template (preview) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + +
+
+ + + + +

Remote launcher plugin test

+ +
+ + + + +
+ + + + + + + + +
+ + + + + + + + + +
+ + +

+ test_can_load_configs + + +#

+
test_can_load_configs(command_line_args: str)
+
+ +
+ +

Test that the cluster and resource configs can be loaded without errors.

+ +
+ +
+ + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/reference/project/utils/testutils/index.html b/reference/project/utils/testutils/index.html new file mode 100644 index 00000000..b393bfbc --- /dev/null +++ b/reference/project/utils/testutils/index.html @@ -0,0 +1,2976 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + Testutils - Research Project Template (preview) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + +
+
+ + + + +

Testutils

+ +
+ + + + +
+ +

Utility functions useful for testing.

+ + + + + + + + +
+ + + + + + + +
+ + + +

+ default_marks_for_config_name + + + + module-attribute + + +#

+
default_marks_for_config_name: dict[
+    str, list[MarkDecorator]
+] = {
+    "inaturalist": [
+        slow,
+        skipif(
+            not NETWORK_DIR and exists(),
+            reason="Expects to be run on the Mila cluster for now",
+        ),
+    ],
+    "imagenet": [
+        slow,
+        skipif(
+            not NETWORK_DIR and exists(),
+            reason="Expects to be run on a cluster with the ImageNet dataset.",
+        ),
+    ],
+    "vision": [
+        skip(
+            reason="Base class, shouldn't be instantiated."
+        )
+    ],
+}
+
+ +
+ +

Dict with some default marks for some configs name.

+
+ +
+ +
+ + + +

+ default_marks_for_config_combinations + + + + module-attribute + + +#

+
default_marks_for_config_combinations: dict[
+    tuple[str, ...], list[MarkDecorator]
+] = {
+    ("imagenet", "fcnet"): [
+        xfail(
+            reason="FcNet shouldn't be applied to the ImageNet datamodule. It can lead to nans in the parameters."
+        )
+    ],
+    ("imagenet", "jax_fcnet"): [
+        xfail(
+            reason="FcNet shouldn't be applied to the ImageNet datamodule. It can lead to nans in the parameters."
+        )
+    ],
+    ("imagenet", "jax_cnn"): [
+        xfail(
+            reason="todo: parameters contain nans when overfitting on one batch? Maybe we're using too many iterations?"
+        )
+    ],
+    None: {
+        (resnet_config, mnist_dataset_config): [
+            skip(
+                reason="ResNets don't work with MNIST datasets because the image resolution is too small."
+            )
+        ]
+        for (
+            resnet_config,
+            mnist_dataset_config,
+        ) in product(
+            get_all_configs_in_group_of_type(
+                "algorithm/network", ResNet
+            ),
+            get_all_configs_in_group_of_type(
+                "datamodule",
+                (MNISTDataModule, FashionMNISTDataModule),
+            ),
+        )
+    },
+}
+
+ +
+ +

Dict with some default marks to add to tests when some config combinations are present.

+

For example, ResNet networks can't be applied to the MNIST datasets.

+
+ +
+ + + +
+ + +

+ get_target_of_config + + +#

+
get_target_of_config(
+    config_group: str,
+    config_name: str,
+    _cs: ConfigStore | None = None,
+) -> Callable
+
+ +
+ +

Returns the class that is to be instantiated by the given config name.

+

In the case of inner dataclasses (e.g. Model.HParams), this returns the outer class (Model).

+ +
+ +
+ +
+ + +

+ get_all_configs_in_group_of_type + + +#

+
get_all_configs_in_group_of_type(
+    config_group: str,
+    config_target_type: type | tuple[type, ...],
+    include_subclasses: bool = True,
+    excluding: type | tuple[type, ...] = (),
+) -> list[str]
+
+ +
+ +

Returns the names of all the configs in the given config group that have this target or a +subclass of it.

+ +
+ +
+ +
+ + +

+ run_for_all_configs_of_type + + +#

+
run_for_all_configs_of_type(
+    config_group: str,
+    target_type: type,
+    excluding: type | tuple[type, ...] = (),
+)
+
+ +
+ +

Parametrizes a test to run with all the configs in the given group that have targets which +are subclasses of the given type.

+

For example:

+
@run_for_all_configs_of_type("algorithm", torch.nn.Module)
+def test_something_about_the_algorithm(algorithm: torch.nn.Module):
+    ''' This test will run with all the configs in the 'algorithm' group that create nn.Modules! '''
+
+

Concretely, this works by indirectly parametrizing the f"{config_group}_config" fixture. +To learn more about indirect parametrization in PyTest, take a look at +https://docs.pytest.org/en/stable/example/parametrize.html#indirect-parametrization

+ +
+ +
+ +
+ + +

+ parametrize_when_used + + +#

+
parametrize_when_used(
+    arg_name_or_fixture: str | Callable,
+    values: list,
+    indirect: bool | None = None,
+) -> MarkDecorator
+
+ +
+ +

Fixture that applies pytest.mark.parametrize only when the argument is used (directly or +indirectly).

+

When pytest.mark.parametrize is applied to a class, all test methods in that class need to +use the parametrized argument, otherwise an error is raised. This function exists to work around +this and allows writing test methods that don't use the parametrized argument.

+

For example, this works, but would not be possible with pytest.mark.parametrize:

+
import pytest
+
+@parametrize_when_used("value", [1, 2, 3])
+class TestFoo:
+    def test_foo(self, value):
+        ...
+
+    def test_bar(self, value):
+        ...
+
+    def test_something_else(self):  # This will cause an error!
+        pass
+
+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
+ arg_name_or_fixture + + str | Callable + +
+

The name of the argument to parametrize, or a fixture to parametrize indirectly.

+
+
+ required +
+ values + + list + +
+

The values to be used to parametrize the test.

+
+
+ required +
+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ MarkDecorator + +
+

A pytest.MarkDecorator that parametrizes the test with the given values only when the argument is used (directly or indirectly) by the test.

+
+
+ +
+ +
+ +
+ + +

+ run_for_all_configs_in_group + + +#

+
run_for_all_configs_in_group(
+    group_name: str,
+    config_name_to_marks: (
+        Mapping[str, MarkDecorator | list[MarkDecorator]]
+        | None
+    ) = None,
+)
+
+ +
+ +

Apply this marker to a test to make it run with all configs in a given group.

+

This assumes that a "group_name_config" fixture is defined, for example, algorithm_config, +datamodule_config, network_config. This then does an indirect parametrization of that fixture, so that it +receives the config name as a parameter and returns it.

+

The test wrapped test will uses all config from that group if they are used either as an input +argument to the test function or if it the input argument to a fixture function.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
+ group_name + + str + +
+

List of datamodule names to use for tests. By default, lists out the generic datamodules (the datamodules that aren't specific to a single algorithm, for example the InfGendatamodules of WakeSleep.)

+
+
+ required +
+ config_name_to_marks + + Mapping[str, MarkDecorator | list[MarkDecorator]] | None + +
+

Dictionary from config names to pytest marks (e.g. pytest.mark.xfail, pytest.mark.skip) to use for that particular config.

+
+
+ None +
+ +
+ +
+ +
+ + +

+ total_vram_gb + + +#

+
total_vram_gb() -> float
+
+ +
+ +

Returns the total VRAM in GB.

+ +
+ +
+ + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/reference/project/utils/typing_utils/index.html b/reference/project/utils/typing_utils/index.html new file mode 100644 index 00000000..ab728789 --- /dev/null +++ b/reference/project/utils/typing_utils/index.html @@ -0,0 +1,2579 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + Typing utils - Research Project Template (preview) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + +
+
+ + + + +

Typing utils

+ +
+ + + + +
+ +

Utilities to help annotate the types of values in the project.

+ + + + + + + + +
+ + + + + + + +
+ + + +

+ HydraConfigFor + + + + module-attribute + + +#

+
HydraConfigFor = Builds[type[T]]
+
+ +
+ +

Type annotation to say "a hydra config that returns an object of type T when instantiated".

+
+ +
+ + +
+ + + +

+ DataModule + + +#

+ + +
+

+ Bases: Protocol[BatchType]

+ + +

Protocol that shows the minimal attributes / methods of the LightningDataModule class.

+

This is used to type hint the batches that are yielded by the DataLoaders.

+ + + + + + + + + +
+ + + + + + + + + + + +
+ +
+ +
+ + +
+ + +

+ is_sequence_of + + +#

+
is_sequence_of(
+    object: Any, item_type: type[V] | tuple[type[V], ...]
+) -> TypeGuard[Sequence[V]]
+
+ +
+ +

Used to check (and tell the type checker) that object is a sequence of items of this +type.

+ +
+ +
+ +
+ + +

+ is_mapping_of + + +#

+
is_mapping_of(
+    object: Any, key_type: type[K], value_type: type[V]
+) -> TypeGuard[Mapping[K, V]]
+
+ +
+ +

Used to check (and tell the type checker) that object is a mapping with keys and values of +the given types.

+ +
+ +
+ + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/reference/project/utils/typing_utils/jax_typing_utils/index.html b/reference/project/utils/typing_utils/jax_typing_utils/index.html new file mode 100644 index 00000000..3de8139c --- /dev/null +++ b/reference/project/utils/typing_utils/jax_typing_utils/index.html @@ -0,0 +1,2591 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + Jax typing utils - Research Project Template (preview) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + +
+
+ + + + +

Jax typing utils

+ +
+ + + + +
+ +

Small typing helpers for Jax.

+

This makes jax.jit preserve the signature of the wrapped callable.

+ + + + + + + + +
+ + + + + + + + + +
+ + +

+ field + + +#

+
+
field(
+    *,
+    default: _T,
+    init: bool = True,
+    repr: bool = True,
+    hash: bool | None = None,
+    compare: bool = True,
+    metadata: Mapping[Any, Any] | None = None,
+    kw_only: bool = ...,
+    pytree_node: bool = True
+) -> _T
+
field(
+    *,
+    default_factory: Callable[[], _T],
+    init: bool = True,
+    repr: bool = True,
+    hash: bool | None = None,
+    compare: bool = True,
+    metadata: Mapping[Any, Any] | None = None,
+    kw_only: bool = ...,
+    pytree_node: bool = True
+) -> _T
+
field(
+    *,
+    init: bool = True,
+    repr: bool = True,
+    hash: bool | None = None,
+    compare: bool = True,
+    metadata: Mapping[Any, Any] | None = None,
+    kw_only: bool = ...,
+    pytree_node: bool = True
+) -> Any
+
+
field(
+    *,
+    default=MISSING,
+    default_factory=MISSING,
+    init=True,
+    repr=True,
+    hash=None,
+    compare=True,
+    metadata: Mapping[Any, Any] | None = None,
+    kw_only=MISSING,
+    pytree_node: bool | None = None
+)
+
+ +
+ +

Small Typing fix for flax.struct.field.

+
    +
  • Add type annotations so it doesn't drop the signature of the dataclasses.field function.
  • +
  • Make the pytree_node has a default value of False for ints and bools, and True for + everything else.
  • +
+ +
+ +
+ + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/reference/project/utils/typing_utils/protocols/index.html b/reference/project/utils/typing_utils/protocols/index.html new file mode 100644 index 00000000..a9a8a81c --- /dev/null +++ b/reference/project/utils/typing_utils/protocols/index.html @@ -0,0 +1,2670 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + Protocols - Research Project Template (preview) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + +
+
+ + + + +

Protocols

+ +
+ + + + +
+ + + + + + + + +
+ + + + + + + + +
+ + + +

+ Module + + +#

+ + +
+

+ Bases: Protocol[P, OutT]

+ + +

Small protocol that can be used to annotate the input/output types of torch.nn.Modules.

+ + + + + + + + + +
+ + + + + + + + + + + +
+ +
+ +
+ +
+ + + +

+ DataModule + + +#

+ + +
+

+ Bases: Protocol[BatchType]

+ + +

Protocol that shows the minimal attributes / methods of the LightningDataModule class.

+

This is used to type hint the batches that are yielded by the DataLoaders.

+ + + + + + + + + +
+ + + + + + + + + + + +
+ +
+ +
+ +
+ + + +

+ ClassificationDataModule + + +#

+ + +
+

+ Bases: DataModule[BatchType], Protocol

+ + +

Protocol that matches "datamodules with a 'num_classes' int attribute.

+ + + + + + + + + +
+ + + + + + + + + + + +
+ +
+ +
+ + + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/reference/project/utils/utils/index.html b/reference/project/utils/utils/index.html new file mode 100644 index 00000000..80a4fb47 --- /dev/null +++ b/reference/project/utils/utils/index.html @@ -0,0 +1,2608 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + Utils - Research Project Template (preview) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + +
+
+ + + + +

Utils

+ +
+ + + + +
+ + + + + + + + +
+ + + + + + + + + +
+ + +

+ print_config + + +#

+
print_config(
+    config: DictConfig,
+    print_order: Sequence[str] = (
+        "algorithm",
+        "datamodule",
+        "trainer",
+    ),
+    resolve: bool = True,
+) -> None
+
+ +
+ +

Prints content of DictConfig using Rich library and its tree structure.

+

TAKEN FROM https://github.com/ashleve/lightning-hydra-template/blob/6a92395ed6afd573fa44dd3a054a603acbdcac06/src/utils/__init__.py#L56

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
+ config + + DictConfig + +
+

Configuration composed by Hydra.

+
+
+ required +
+ print_order + + Sequence[str] + +
+

Determines in what order config components are printed.

+
+
+ ('algorithm', 'datamodule', 'trainer') +
+ resolve + + bool + +
+

Whether to resolve reference fields of DictConfig.

+
+
+ True +
+ +
+ +
+ + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/resources/index.html b/resources/index.html new file mode 100644 index 00000000..eb8e84d9 --- /dev/null +++ b/resources/index.html @@ -0,0 +1,2538 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + Learning Resources - Research Project Template (preview) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + +
+
+ + + + +

Related projects and resources#

+

Hydra docs#

+

Other project templates#

+

There are other project templates out there, that often have better documentation. +If you need an introduction to Hydra, or Lightning, or good software development practices, these might have better guides and documentation for you.

+

Here are some we'd recommend:

+

lightning-hydra-template#

+ +

For everything that has to do with Hydra and PyTorch-Lightning, their documentation also applies directly to this template. In order to avoid copying their documentation, we recommend you take a look at their nice readme.

+

yet-another-lightning-hydra-template#

+
- Excellent template.  based on the lightning-hydra-template. Great documentation, which is referenced extensively in this project.
+- - Has a **great** Readme with lots of information
+- - Is really well organized
+- - doesn't support Jax
+- - doesn't have a devcontainer
+- Great blog: https://hackernoon.com/yet-another-lightning-hydra-template-for-ml-experiments
+
+

cookiecutter-data-science#

+
- Awesome library for data science.
+- Related projects: https://github.com/drivendataorg/cookiecutter-data-science/blob/master/docs/docs/related.md#links-to-related-projects-and-references
+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/search/search_index.json b/search/search_index.json new file mode 100644 index 00000000..cab3f8f7 --- /dev/null +++ b/search/search_index.json @@ -0,0 +1 @@ +{"config":{"lang":["en"],"separator":"[\\s\\-]+","pipeline":["stopWordFilter"]},"docs":[{"location":"","title":"Home","text":""},{"location":"#research-project-template","title":"Research Project Template","text":"

Work-in-Progress

Please note: This is a Work-in-Progress. The goal is to make a first release by the end of fall 2024.

This is a research project template. It is meant to be a starting point for ML researchers at Mila.

For more context, see this introduction to the project..

  • Set up in 5 minutes

    Get started quickly with a single installation script and get up and running in minutes

    Getting started

  • Well-tested, robust codebase

    Focus on your research! Let tests take care of detecting bugs and broken configs!

    Check out the included tests

  • Support for both PyTorch and Jax

    You can use both PyTorch and Jax for your algorithms! (Lightning handles the rest.)

    Check out the Jax example

  • Ready-to-use examples

    Includes examples for Supervised learning(1) and NLP \ud83e\udd17, with unsupervised learning and RL coming soon.

    1. The source code for the example is available here

    Check out the examples here

"},{"location":"#starting-a-new-project","title":"Starting a new project","text":"

To create a new project using this template, Click Here or on the green \"Use this template\" button on the template's GitHub repository.

"},{"location":"#setting-up-your-environment","title":"Setting up your environment","text":"

Here are two recommended ways to setup your development environment:

  • Using the uv package manager
  • Using a development container (recommended if you are able to install Docker on your machine)
Locally (Linux / Mac)Locally (Windows)On a SLURM cluster
  1. Clone your new repo and navigate into it

    git clone https://www.github.com/your-username/your-repo-name\ncd your-repo-name\n
  2. Install the package manager

    # Install uv\ncurl -LsSf https://astral.sh/uv/install.sh | sh\nsource $HOME/.cargo/env\n
  3. Install dependencies

    uv sync  # Creates a virtual environment and installs dependencies in it.\n
  1. Install WSL following this guide
  2. Follow the installation instructions for Linux
  1. Clone your new repo and navigate into it

    git clone https://www.github.com/your-username/your-repo-name\ncd your-repo-name\n
  2. (Mila cluster) - Launch the setup script

    If you're on the mila cluster, you can run the setup script on a compute node, just to be nice:

    srun --pty --gres=gpu:1 --cpus-per-task=4 --mem=16G --time=00:10:00 scripts/mila_setup.sh\n
"},{"location":"#usage","title":"Usage","text":"

To see all available options:

uv run python project/main.py --help\n

For a detailed list of examples, see the examples page.

"},{"location":"#developing-inside-a-container-advanced","title":"Developing inside a container (advanced)","text":"

This repo provides a Devcontainer configuration for Visual Studio Code to use a Docker container as a pre-configured development environment. This avoids struggles setting up a development environment and makes them reproducible and consistent.

If that sounds useful to you, we recommend you first make yourself familiar with the container tutorials if you want to use them. The devcontainer.json file assumes that you have a GPU locally by default. If not, you can simply comment out the \"--gpus\" flag in the .devcontainer/devcontainer.json file.

  1. Setup Docker on your local machine

    On an Linux machine where you have root access, you can install Docker using the following commands:

    curl -fsSL https://get.docker.com -o get-docker.sh\nsudo sh get-docker.sh\n

    On Windows or Mac, follow these installation instructions

  2. (optional) Install the nvidia-container-toolkit to use your local machine's GPU(s).

  3. Install the Dev Containers extension for Visual Studio Code.

  4. When opening repository in Visual Studio Code, you should be prompted to reopen the repository in a container:

    Alternatively, you can open the command palette (Ctrl+Shift+P) and select Dev Containers: Rebuild and Reopen in Container.

"},{"location":"SUMMARY/","title":"SUMMARY","text":"
  • Home
  • Intro
  • Features \ud83d\udd25
    • Magic Config Schemas
    • Jax and Torch support with Lightning \u26a1
    • Launching Jobs on Remote Clusters
    • Thorough automated testing on SLURM clusters
    • features/*.md
  • Examples \ud83e\uddea
    • Image Classification (\u26a1)
    • Image Classification (jax+\u26a1)
    • Text Classification (\ud83e\udd17+\u26a1)
    • Fine-tuning an LLM (\ud83e\udd17+\u26a1)
    • Reinforcement Learning (jax)
    • Running sweeps
    • Profiling your code\ud83d\udcce
    • examples/*.md
  • Reference \ud83e\udd13
    • reference/*
  • Learning Resources
  • Getting Help
  • Contributing
"},{"location":"contributing/","title":"Contributing","text":""},{"location":"contributing/#contributing","title":"Contributing","text":"

TODOs:

  • [ ] Describe how to contribute to the project.
"},{"location":"help/","title":"Getting Help","text":""},{"location":"help/#help-and-support","title":"Help and Support","text":""},{"location":"help/#faq","title":"FAQ","text":""},{"location":"help/#how-to-get-help","title":"How to get help","text":"
  • Make an Issue on GitHub
  • Reach out via Slack (if you're a researcher at Mila)
  • Reach out via email
"},{"location":"intro/","title":"Intro","text":""},{"location":"intro/#why-use-this-template","title":"Why use this template?","text":""},{"location":"intro/#why-should-you-use-a-template-in-the-first-place","title":"Why should you use a template in the first place?","text":"

For many good reasons, which are very well described here in a similar project! \ud83d\ude0a

Other good reads:

  • https://cookiecutter-data-science.drivendata.org/why/
  • https://cookiecutter-data-science.drivendata.org/opinions/
  • https://12factor.net/
  • https://github.com/ashleve/lightning-hydra-template/tree/main?tab=readme-ov-file#main-ideas
"},{"location":"intro/#why-use-this-template_1","title":"Why use this template?","text":"
  • Cool, unique features that can only be found here (for now)!
"},{"location":"intro/#project-layout","title":"Project layout","text":"
pyproject.toml   # Project metadata and dependencies\nproject/\n    main.py      # main entry-point\n    algorithms/  # learning algorithms\n    datamodules/ # datasets, processing and loading\n    networks/    # Neural networks used by algorithms\n    configs/     # Hydra configuration files\ndocs/            # documentation\nconftest.py      # Test fixtures and utilities\n
"},{"location":"intro/#libraries-used","title":"Libraries used","text":"

This project makes use of the following libraries:

  • Hydra is used to configure the project. It allows you to define configuration files and override them from the command line.
  • PyTorch Lightning is used to as the training framework. It provides a high-level interface to organize ML research code.
    • \ud83d\udd25 Please note: You can also use Jax with this repo, as described in the Jax example \ud83d\udd25
  • Weights & Biases is used to log metrics and visualize results.
  • pytest is used for testing.
"},{"location":"resources/","title":"Learning Resources","text":""},{"location":"resources/#related-projects-and-resources","title":"Related projects and resources","text":""},{"location":"resources/#hydra-docs","title":"Hydra docs","text":""},{"location":"resources/#other-project-templates","title":"Other project templates","text":"

There are other project templates out there, that often have better documentation. If you need an introduction to Hydra, or Lightning, or good software development practices, these might have better guides and documentation for you.

Here are some we'd recommend:

"},{"location":"resources/#lightning-hydra-template","title":"lightning-hydra-template","text":"
  • How it works: https://github.com/gorodnitskiy/yet-another-lightning-hydra-template/tree/main?tab=readme-ov-file#workflow---how-it-works

For everything that has to do with Hydra and PyTorch-Lightning, their documentation also applies directly to this template. In order to avoid copying their documentation, we recommend you take a look at their nice readme.

"},{"location":"resources/#yet-another-lightning-hydra-template","title":"yet-another-lightning-hydra-template","text":"
- Excellent template.  based on the lightning-hydra-template. Great documentation, which is referenced extensively in this project.\n- - Has a **great** Readme with lots of information\n- - Is really well organized\n- - doesn't support Jax\n- - doesn't have a devcontainer\n- Great blog: https://hackernoon.com/yet-another-lightning-hydra-template-for-ml-experiments\n
"},{"location":"resources/#cookiecutter-data-science","title":"cookiecutter-data-science","text":"
- Awesome library for data science.\n- Related projects: https://github.com/drivendataorg/cookiecutter-data-science/blob/master/docs/docs/related.md#links-to-related-projects-and-references\n
"},{"location":"examples/","title":"Examples \ud83e\uddea","text":""},{"location":"examples/#examples","title":"Examples","text":"

This template includes examples that use either Jax, PyTorch, or both!

Example link Research Area Reference link Frameworks Image Classification Supervised Learning (image classification) ImageClassifier Torch + \u26a1 Image Classification (Jax) Supervised Learning (image classification) JaxImageClassifier Torch + Jax + \u26a1 Text Classification NLP (text classification) TextClassifier Torch + \ud83e\udd17 + \u26a1 Reinforcement Learning (Jax) RL JaxRLExample Jax LLM Fine-tuning NLP (Causal language modeling) LLMFineTuningExample Torch + \ud83e\udd17 + \u26a1"},{"location":"examples/image_classification/","title":"Image Classification (\u26a1)","text":""},{"location":"examples/image_classification/#supervised-learning-pytorch","title":"Supervised Learning (PyTorch)","text":""},{"location":"examples/image_classification/#imageclassifier","title":"ImageClassifier","text":"

The ImageClassifier is a simple LightningModule for image classification. It accepts a vision datamodule as input.

Click to show the code of the ImageClassifier class.
class ImageClassifier(LightningModule):\n    \"\"\"Example learning algorithm for image classification.\"\"\"\n\n    def __init__(\n        self,\n        datamodule: ImageClassificationDataModule,\n        network: HydraConfigFor[torch.nn.Module],\n        optimizer: HydraConfigFor[functools.partial[Optimizer]],\n        init_seed: int = 42,\n    ):\n        \"\"\"Create a new instance of the algorithm.\n\n        Parameters:\n            datamodule: Object used to load train/val/test data.\n                See the lightning docs for [LightningDataModule][lightning.pytorch.core.datamodule.LightningDataModule]\n                for more info.\n            network:\n                The config of the network to instantiate and train.\n            optimizer: The config for the Optimizer. Instantiating this will return a function \\\n                (a [functools.partial][]) that will create the Optimizer given the hyper-parameters.\n            init_seed: The seed to use when initializing the weights of the network.\n        \"\"\"\n        super().__init__()\n        self.datamodule = datamodule\n        self.network_config = network\n        self.optimizer_config = optimizer\n        self.init_seed = init_seed\n\n        # Save hyper-parameters.\n        self.save_hyperparameters(ignore=[\"datamodule\"])\n        # Used by Pytorch-Lightning to compute the input/output shapes of the network.\n\n        self.network: torch.nn.Module | None = None\n\n    def configure_model(self):\n        # Save this for PyTorch-Lightning to infer the input/output shapes of the network.\n        self.example_input_array = torch.zeros((self.datamodule.batch_size, *self.datamodule.dims))\n        with torch.random.fork_rng():\n            # deterministic weight initialization\n            torch.manual_seed(self.init_seed)\n            self.network = hydra_zen.instantiate(self.network_config)\n            if any(torch.nn.parameter.is_lazy(p) for p in self.network.parameters()):\n                # Do a forward pass to initialize any lazy weights. This is necessary for\n                # distributed training and to infer shapes.\n                _ = self.network(self.example_input_array)\n\n    def forward(self, input: Tensor) -> Tensor:\n        \"\"\"Forward pass of the network.\"\"\"\n        assert self.network is not None\n        logits = self.network(input)\n        return logits\n\n    def training_step(self, batch: tuple[Tensor, Tensor], batch_index: int):\n        return self.shared_step(batch, batch_index=batch_index, phase=\"train\")\n\n    def validation_step(self, batch: tuple[Tensor, Tensor], batch_index: int):\n        return self.shared_step(batch, batch_index=batch_index, phase=\"val\")\n\n    def test_step(self, batch: tuple[Tensor, Tensor], batch_index: int):\n        return self.shared_step(batch, batch_index=batch_index, phase=\"test\")\n\n    def shared_step(\n        self,\n        batch: tuple[Tensor, Tensor],\n        batch_index: int,\n        phase: Literal[\"train\", \"val\", \"test\"],\n    ):\n        x, y = batch\n        logits: torch.Tensor = self(x)\n        loss = F.cross_entropy(logits, y, reduction=\"mean\")\n        self.log(f\"{phase}/loss\", loss.detach().mean())\n        acc = logits.detach().argmax(-1).eq(y).float().mean()\n        self.log(f\"{phase}/accuracy\", acc)\n        return {\"loss\": loss, \"logits\": logits, \"y\": y}\n\n    def configure_optimizers(self):\n        \"\"\"Creates the optimizers.\n\n        See [`lightning.pytorch.core.LightningModule.configure_optimizers`][] for more information.\n        \"\"\"\n        # Instantiate the optimizer config into a functools.partial object.\n        optimizer_partial = hydra_zen.instantiate(self.optimizer_config)\n        # Call the functools.partial object, passing the parameters as an argument.\n        optimizer = optimizer_partial(self.parameters())\n        # This then returns the optimizer.\n        return optimizer\n\n    def configure_callbacks(self) -> Sequence[Callback] | Callback:\n        \"\"\"Creates callbacks to be used by default during training.\"\"\"\n        return [\n            ClassificationMetricsCallback.attach_to(self, num_classes=self.datamodule.num_classes)\n        ]\n
"},{"location":"examples/image_classification/#running-the-example","title":"Running the example","text":"

Here is a configuration file that you can use to launch a simple experiment:

Click to show the yaml config file
# @package _global_\n\n# This is an \"experiment\" config, that groups together other configs into a ready-to-run example.\n\n# To execute this experiment, use:\n# python project/main.py experiment=example\n\ndefaults:\n  - override /algorithm: image_classifier\n  - override /algorithm/network: resnet18\n  - override /datamodule: cifar10\n  - override /trainer: default\n  - override /trainer/logger: tensorboard\n  - override /trainer/callbacks: default\n\n# The parameters below will be merged with parameters from default configurations set above.\n# This allows you to overwrite only specified parameters\n\n# The name of the e\nname: example\n\nseed: ${oc.env:SLURM_PROCID,42}\n\nalgorithm:\n  optimizer:\n    lr: 0.002\n\ndatamodule:\n  batch_size: 64\n\ntrainer:\n  min_epochs: 1\n  max_epochs: 10\n  gradient_clip_val: 0.5\n

You can use it like so:

python project/main.py experiment=example\n
"},{"location":"examples/jax_image_classification/","title":"Image Classification (jax+\u26a1)","text":""},{"location":"examples/jax_image_classification/#jax-pytorch-lightning","title":"Jax + PyTorch-Lightning \u26a1","text":""},{"location":"examples/jax_image_classification/#a-lightningmodule-that-trains-a-jax-network","title":"A LightningModule that trains a Jax network","text":"

The JaxImageClassifier algorithm uses a network which is a flax.linen.Module. The network is wrapped with torch_jax_interop.JaxFunction, so that it can accept torch tensors as inputs, produces torch tensors as outputs, and the parameters are saved as as torch.nn.Parameters (which use the same underlying memory as the jax arrays). In this example, the loss function and optimizers are in PyTorch, while the network forward and backward passes are written in Jax.

The loss that is returned in the training step is used by Lightning in the usual way. The backward pass uses Jax to calculate the gradients, and the weights are updated by a PyTorch optimizer.

Info

You could also very well do both the forward and backward passes in Jax! To do this, use the 'manual optimization' mode of PyTorch-Lightning and perform the parameter updates yourself. For the rest of Lightning to work, just make sure to store the parameters as torch.nn.Parameters. An example of how to do this will be added shortly.

What about end-to-end training in Jax?

See the Jax RL Example!

"},{"location":"examples/jax_image_classification/#jax-network","title":"Jax Network","text":"
class JaxCNN(flax.linen.Module):\n    \"\"\"A simple CNN model.\n\n    Taken from https://flax.readthedocs.io/en/latest/quick_start.html#define-network\n    \"\"\"\n\n    num_classes: int = 10\n\n    @flax.linen.compact\n    def __call__(self, x: jax.Array):\n        x = to_channels_last(x)\n        x = flax.linen.Conv(features=32, kernel_size=(3, 3))(x)\n        x = flax.linen.relu(x)\n        x = flax.linen.avg_pool(x, window_shape=(2, 2), strides=(2, 2))\n        x = flax.linen.Conv(features=64, kernel_size=(3, 3))(x)\n        x = flax.linen.relu(x)\n        x = flax.linen.avg_pool(x, window_shape=(2, 2), strides=(2, 2))\n\n        x = flatten(x)\n        x = flax.linen.Dense(features=256)(x)\n        x = flax.linen.relu(x)\n        x = flax.linen.Dense(features=self.num_classes)(x)\n        return x\n
"},{"location":"examples/jax_image_classification/#jax-algorithm","title":"Jax Algorithm","text":"
class JaxImageClassifier(LightningModule):\n    \"\"\"Example of a learning algorithm (`LightningModule`) that uses Jax.\n\n    In this case, the network is a flax.linen.Module, and its forward and backward passes are\n    written in Jax, and the loss function is in pytorch.\n    \"\"\"\n\n    def __init__(\n        self,\n        datamodule: ImageClassificationDataModule,\n        network: HydraConfigFor[flax.linen.Module],\n        optimizer: HydraConfigFor[functools.partial[Optimizer]],\n        init_seed: int = 123,\n        debug: bool = True,\n    ):\n        super().__init__()\n        self.datamodule = datamodule\n        self.network_config = network\n        self.optimizer_config = optimizer\n        self.init_seed = init_seed\n        self.debug = debug\n\n        # Create the jax network (safe to do even on CPU here).\n        self.jax_network: flax.linen.Module = hydra_zen.instantiate(self.network_config)\n        # We'll instantiate the parameters and the torch wrapper around the jax network in\n        # `configure_model` so the weights are directly on the GPU.\n        self.network: torch.nn.Module | None = None\n        self.save_hyperparameters(ignore=[\"datamodule\"])\n\n    def configure_model(self):\n        example_input = torch.zeros(\n            (self.datamodule.batch_size, *self.datamodule.dims),\n        )\n        # Save this for PyTorch-Lightning to infer the input/output shapes of the network.\n        self.example_input_array = example_input\n\n        # Initialize the jax parameters with a forward pass.\n        jax_params = self.jax_network.init(\n            jax.random.key(self.init_seed), torch_to_jax(example_input)\n        )\n\n        jax_network_forward = self.jax_network.apply\n        if not self.debug:\n            jax_network_forward = jax.jit(jax_network_forward)\n\n        # Wrap the jax network into a nn.Module:\n        self.network = WrappedJaxFunction(\n            jax_function=jax_network_forward,\n            jax_params=jax_params,\n            # Need to call .clone() when doing distributed training, otherwise we get a RuntimeError:\n            # Invalid device pointer when trying to share the CUDA tensors that come from jax.\n            clone_params=True,\n            has_aux=False,\n        )\n\n    def forward(self, input: torch.Tensor) -> torch.Tensor:\n        assert self.network is not None\n        logits = self.network(input)\n        return logits\n\n    def training_step(self, batch: tuple[torch.Tensor, torch.Tensor], batch_index: int):\n        return self.shared_step(batch, batch_index=batch_index, phase=\"train\")\n\n    def validation_step(self, batch: tuple[torch.Tensor, torch.Tensor], batch_index: int):\n        return self.shared_step(batch, batch_index=batch_index, phase=\"val\")\n\n    def test_step(self, batch: tuple[torch.Tensor, torch.Tensor], batch_index: int):\n        return self.shared_step(batch, batch_index=batch_index, phase=\"test\")\n\n    def shared_step(\n        self,\n        batch: tuple[torch.Tensor, torch.Tensor],\n        batch_index: int,\n        phase: Literal[\"train\", \"val\", \"test\"],\n    ):\n        # This is the same thing as the `ImageClassifier.shared_step`!\n        x, y = batch\n        assert not x.requires_grad\n        assert self.network is not None\n        logits = self.network(x)\n        assert isinstance(logits, torch.Tensor)\n        # In this example we use a jax \"encoder\" network and a PyTorch loss function, but we could\n        # also just as easily have done the whole forward and backward pass in jax if we wanted to.\n        loss = F.cross_entropy(logits, y, reduction=\"mean\")\n        acc = logits.argmax(-1).eq(y).float().mean()\n        self.log(f\"{phase}/loss\", loss, prog_bar=True, sync_dist=True)\n        self.log(f\"{phase}/acc\", acc, prog_bar=True, sync_dist=True)\n        return {\"loss\": loss, \"logits\": logits, \"y\": y}\n\n    def configure_optimizers(self):\n        \"\"\"Creates the optimizers.\n\n        See [`lightning.pytorch.core.LightningModule.configure_optimizers`][] for more information.\n        \"\"\"\n        # Instantiate the optimizer config into a functools.partial object.\n        optimizer_partial = hydra_zen.instantiate(self.optimizer_config)\n        # Call the functools.partial object, passing the parameters as an argument.\n        optimizer = optimizer_partial(self.parameters())\n        # This then returns the optimizer.\n        return optimizer\n\n    def configure_callbacks(self) -> list[Callback]:\n        assert isinstance(self.datamodule, ImageClassificationDataModule)\n        return [\n            MeasureSamplesPerSecondCallback(),\n            ClassificationMetricsCallback.attach_to(self, num_classes=self.datamodule.num_classes),\n        ]\n
"},{"location":"examples/jax_image_classification/#configs","title":"Configs","text":""},{"location":"examples/jax_image_classification/#lightningmodule-config","title":"LightningModule config","text":"
# Config for the JaxImageClassifier algorithm\ndefaults:\n  - network: jax_cnn\n  - optimizer: SGD\n_target_: project.algorithms.jax_image_classifier.JaxImageClassifier\n# NOTE: Why _partial_ here? Because the config doesn't create the algo directly.\n# The datamodule is instantiated first and then passed to the algorithm.\n_partial_: true\n_recursive_: false\n\noptimizer:\n  lr: 0.001\n\ninit_seed: 123\ndebug: False\n
"},{"location":"examples/jax_image_classification/#running-the-example","title":"Running the example","text":"
$ python project/main.py algorithm=jax_image_classifier network=jax_cnn datamodule=cifar10\n
"},{"location":"examples/jax_rl/","title":"Reinforcement Learning (jax)","text":""},{"location":"examples/jax_rl/#reinforcement-learning-in-jax","title":"Reinforcement Learning in Jax","text":"

This example follows the same structure as the other examples:

  • An \"algorithm\" (in this case JaxRLExample) is trained with a \"trainer\" (JaxTrainer);

However, there are some very important differences:

  • There is no \"datamodule\". The algorithm accepts an Environment (gymnax.Environment) as input.
  • The \"Trainer\" is a JaxTrainer, instead of a lightning.Trainer.
  • The full training loop is written in Jax;
  • Some (but not all) PyTorch-Lightning callbacks can still be used with the JaxTrainer;
  • The JaxRLExample class is an algorithm based on rejax.PPO.
"},{"location":"examples/jax_rl/#jaxrlexample","title":"JaxRLExample","text":"

The JaxRLExample is based on rejax.PPO. It follows the structure of a JaxModule, and is trained with a JaxTrainer.

Click to show the code for JaxRLExample
class JaxRLExample(\n    flax.struct.PyTreeNode,\n    JaxModule[PPOState[TEnvState], TrajectoryWithLastObs, EvalMetrics],\n    Generic[TEnvState, TEnvParams],\n):\n    \"\"\"Example of an RL algorithm written in Jax: PPO, based on `rejax.PPO`.\n\n    ## Differences w.r.t. rejax.PPO:\n\n    - The state / hparams are split into different, fully-typed structs:\n        - The algorithm state is in a typed `PPOState` struct (vs an untyped,\n            dynamically-generated struct in rejax).\n        - The hyper-parameters are in a typed `PPOHParams` struct.\n        - The state variables related to the collection of data from the environment is a\n            `TrajectoryCollectionState` instead of everything being bunched up together.\n            - This makes it easier to call the `collect_episodes` function with just what it needs.\n    - The seeds for the networks and the environment data collection are separated.\n\n    The logic is exactly the same: The losses / updates are computed in the exact same way.\n    \"\"\"\n\n    env: Environment[TEnvState, TEnvParams] = flax.struct.field(pytree_node=False)\n    env_params: TEnvParams\n    actor: flax.linen.Module = flax.struct.field(pytree_node=False)\n    critic: flax.linen.Module = flax.struct.field(pytree_node=False)\n    hp: PPOHParams\n\n    @classmethod\n    def create(\n        cls,\n        env_id: str | None = None,\n        env: Environment[TEnvState, TEnvParams] | None = None,\n        env_params: TEnvParams | None = None,\n        hp: PPOHParams | None = None,\n    ) -> JaxRLExample[TEnvState, TEnvParams]:\n        from brax.envs import _envs as brax_envs\n        from rejax.compat.brax2gymnax import create_brax\n\n        # env_params: gymnax.EnvParams\n        if env_id is None:\n            assert env is not None\n            env_params = env_params or env.default_params  # type: ignore\n        elif env_id in brax_envs:\n            env, env_params = create_brax(  # type: ignore\n                env_id,\n                episode_length=1000,\n                action_repeat=1,\n                auto_reset=True,\n                batch_size=None,\n                backend=\"generalized\",\n            )\n        elif isinstance(env_id, str):\n            env, env_params = gymnax.make(env_id=env_id)  # type: ignore\n        else:\n            raise NotImplementedError(env_id)\n\n        assert env is not None\n        assert env_params is not None\n        return cls(\n            env=env,\n            env_params=env_params,\n            actor=cls.create_actor(env, env_params),\n            critic=cls.create_critic(),\n            hp=hp or PPOHParams(),\n        )\n\n    @classmethod\n    def create_networks(\n        cls,\n        env: Environment[gymnax.EnvState, TEnvParams],\n        env_params: TEnvParams,\n        config: _NetworkConfig,\n    ):\n        # Equivalent to:\n        # return rejax.PPO.create_agent(config, env, env_params)\n        return {\n            \"actor\": cls.create_actor(env, env_params, **config[\"agent_kwargs\"]),\n            \"critic\": cls.create_actor(env, env_params, **config[\"agent_kwargs\"]),\n        }\n\n    _TEnvParams = TypeVar(\"_TEnvParams\", bound=gymnax.EnvParams, covariant=True)\n    _TEnvState = TypeVar(\"_TEnvState\", bound=gymnax.EnvState, covariant=True)\n\n    @classmethod\n    def create_actor(\n        cls,\n        env: Environment[_TEnvState, _TEnvParams],\n        env_params: _TEnvParams,\n        activation: str | Callable[[jax.Array], jax.Array] = \"swish\",\n        hidden_layer_sizes: Sequence[int] = (64, 64),\n        **actor_kwargs,\n    ) -> DiscretePolicy | GaussianPolicy:\n        activation_fn: Callable[[jax.Array], jax.Array] = (\n            getattr(flax.linen, activation) if not callable(activation) else activation\n        )\n        hidden_layer_sizes = tuple(hidden_layer_sizes)\n        action_space = env.action_space(env_params)\n\n        if isinstance(action_space, gymnax.environments.spaces.Discrete):\n            return DiscretePolicy(\n                action_space.n,\n                activation=activation_fn,\n                hidden_layer_sizes=hidden_layer_sizes,\n                **actor_kwargs,\n            )\n        assert isinstance(action_space, gymnax.environments.spaces.Box)\n        return GaussianPolicy(\n            np.prod(action_space.shape),\n            (action_space.low, action_space.high),  # type: ignore\n            activation=activation_fn,\n            hidden_layer_sizes=hidden_layer_sizes,\n            **actor_kwargs,\n        )\n\n    @classmethod\n    def create_critic(\n        cls,\n        activation: str | Callable[[jax.Array], jax.Array] = \"swish\",\n        hidden_layer_sizes: Sequence[int] = (64, 64),\n        **critic_kwargs,\n    ) -> VNetwork:\n        activation_fn: Callable[[jax.Array], jax.Array] = (\n            getattr(flax.linen, activation) if isinstance(activation, str) else activation\n        )\n        hidden_layer_sizes = tuple(hidden_layer_sizes)\n        return VNetwork(\n            hidden_layer_sizes=hidden_layer_sizes, activation=activation_fn, **critic_kwargs\n        )\n\n    def init_train_state(self, rng: chex.PRNGKey) -> PPOState[TEnvState]:\n        rng, networks_rng, env_rng = jax.random.split(rng, 3)\n\n        rng_actor, rng_critic = jax.random.split(networks_rng, 2)\n\n        obs_ph = jnp.empty([1, *self.env.observation_space(self.env_params).shape])\n\n        actor_params = self.actor.init(rng_actor, obs_ph, rng_actor)\n        critic_params = self.critic.init(rng_critic, obs_ph)\n\n        tx = optax.adam(learning_rate=self.hp.learning_rate)\n        # TODO: Why isn't the `apply_fn` not set in rejax?\n        actor_ts = TrainState.create(apply_fn=self.actor.apply, params=actor_params, tx=tx)\n        critic_ts = TrainState.create(apply_fn=self.critic.apply, params=critic_params, tx=tx)\n\n        env_rng, reset_rng = jax.random.split(env_rng)\n        obs, env_state = jax.vmap(self.env.reset, in_axes=(0, None))(\n            jax.random.split(reset_rng, self.hp.num_envs), self.env_params\n        )\n\n        collection_state = TrajectoryCollectionState(\n            last_obs=obs,\n            rms_state=RMSState.create(shape=obs_ph.shape),\n            global_step=0,\n            env_state=env_state,\n            last_done=jnp.zeros(self.hp.num_envs, dtype=bool),\n            rng=env_rng,\n        )\n\n        return PPOState(\n            actor_ts=actor_ts,\n            critic_ts=critic_ts,\n            rng=rng,\n            data_collection_state=collection_state,\n        )\n\n    # @jit\n    def training_step(self, batch_idx: int, ts: PPOState[TEnvState], batch: TrajectoryWithLastObs):\n        \"\"\"Training step in pure jax.\"\"\"\n        trajectories = batch\n\n        ts, (actor_losses, critic_losses) = jax.lax.scan(\n            functools.partial(self.ppo_update_epoch, trajectories=trajectories),\n            init=ts,\n            xs=jnp.arange(self.hp.num_epochs),  # type: ignore\n            length=self.hp.num_epochs,\n        )\n        # todo: perhaps we could have a callback that updates a progress bar?\n        # jax.debug.print(\"actor_losses {}: {}\", iteration, actor_losses.mean())\n        # jax.debug.print(\"critic_losses {}: {}\", iteration, critic_losses.mean())\n\n        return ts, TrainStepMetrics(actor_losses=actor_losses, critic_losses=critic_losses)\n\n    # @jit\n    def ppo_update_epoch(\n        self, ts: PPOState[TEnvState], epoch_index: int, trajectories: TrajectoryWithLastObs\n    ):\n        minibatch_rng = jax.random.fold_in(ts.rng, epoch_index)\n\n        last_val = self.critic.apply(ts.critic_ts.params, ts.data_collection_state.last_obs)\n        assert isinstance(last_val, jax.Array)\n        last_val = jnp.where(ts.data_collection_state.last_done, 0, last_val)\n        advantages, targets = calculate_gae(\n            trajectories, last_val, gamma=self.hp.gamma, gae_lambda=self.hp.gae_lambda\n        )\n        batch = AdvantageMinibatch(trajectories.trajectories, advantages, targets)\n        minibatches = shuffle_and_split(\n            batch, minibatch_rng, num_minibatches=self.hp.num_minibatches\n        )\n\n        # shuffle the data and split it into minibatches\n\n        num_steps = self.hp.num_steps\n        num_envs = self.hp.num_envs\n        num_minibatches = self.hp.num_minibatches\n        assert (num_envs * num_steps) % num_minibatches == 0\n        minibatches = shuffle_and_split(\n            batch,\n            minibatch_rng,\n            num_minibatches=num_minibatches,\n        )\n        return jax.lax.scan(self.ppo_update, ts, minibatches, length=self.hp.num_minibatches)\n\n    # @jit\n    def ppo_update(self, ts: PPOState[TEnvState], batch: AdvantageMinibatch):\n        actor_loss, actor_grads = jax.value_and_grad(actor_loss_fn)(\n            ts.actor_ts.params,\n            actor=self.actor,\n            batch=batch,\n            clip_eps=self.hp.clip_eps,\n            ent_coef=self.hp.ent_coef,\n        )\n        assert isinstance(actor_loss, jax.Array)\n        critic_loss, critic_grads = jax.value_and_grad(critic_loss_fn)(\n            ts.critic_ts.params,\n            critic=self.critic,\n            batch=batch,\n            clip_eps=self.hp.clip_eps,\n            vf_coef=self.hp.vf_coef,\n        )\n        assert isinstance(critic_loss, jax.Array)\n\n        # TODO: to log the loss here?\n        actor_ts = ts.actor_ts.apply_gradients(grads=actor_grads)\n        critic_ts = ts.critic_ts.apply_gradients(grads=critic_grads)\n\n        return ts.replace(actor_ts=actor_ts, critic_ts=critic_ts), (actor_loss, critic_loss)\n\n    def eval_callback(\n        self, ts: PPOState[TEnvState], rng: chex.PRNGKey | None = None\n    ) -> EvalMetrics:\n        if rng is None:\n            rng = ts.rng\n        actor = make_actor(ts=ts, hp=self.hp)\n        ep_lengths, cum_rewards = evaluate(\n            actor,\n            ts.rng,\n            self.env,\n            self.env_params,\n            num_seeds=self.hp.num_seeds_per_eval,\n            max_steps_in_episode=self.env_params.max_steps_in_episode,\n        )\n        return EvalMetrics(episode_length=ep_lengths, cumulative_reward=cum_rewards)\n\n    def get_batch(\n        self, ts: PPOState[TEnvState], batch_idx: int\n    ) -> tuple[PPOState[TEnvState], TrajectoryWithLastObs]:\n        data_collection_state, trajectories = self.collect_trajectories(\n            ts.data_collection_state,\n            actor_params=ts.actor_ts.params,\n            critic_params=ts.critic_ts.params,\n        )\n        ts = ts.replace(data_collection_state=data_collection_state)\n        return ts, trajectories\n\n    # @jit\n    def collect_trajectories(\n        self,\n        collection_state: TrajectoryCollectionState[TEnvState],\n        actor_params: FrozenVariableDict,\n        critic_params: FrozenVariableDict,\n    ):\n        env_step_fn = functools.partial(\n            self.env_step,\n            # env=self.env,\n            # env_params=self.env_params,\n            # actor=self.actor,\n            # critic=self.critic,\n            # num_envs=self.hp.num_envs,\n            actor_params=actor_params,\n            critic_params=critic_params,\n            # discrete=self.discrete,\n            # normalize_observations=self.hp.normalize_observations,\n        )\n        collection_state, trajectories = jax.lax.scan(\n            env_step_fn,\n            collection_state,\n            xs=jnp.arange(self.hp.num_steps),\n            length=self.hp.num_steps,\n        )\n        trajectories_with_last = TrajectoryWithLastObs(\n            trajectories=trajectories,\n            last_done=collection_state.last_done,\n            last_obs=collection_state.last_obs,\n        )\n        return collection_state, trajectories_with_last\n\n    # @jit\n    def env_step(\n        self,\n        collection_state: TrajectoryCollectionState[TEnvState],\n        step_index: jax.Array,\n        actor_params: FrozenVariableDict,\n        critic_params: FrozenVariableDict,\n    ):\n        # Get keys for sampling action and stepping environment\n        # doing it this way to try to get *exactly* the same rngs as in rejax.PPO.\n        rng, new_rngs = jax.random.split(collection_state.rng, 2)\n        rng_steps, rng_action = jax.random.split(new_rngs, 2)\n        rng_steps = jax.random.split(rng_steps, self.hp.num_envs)\n\n        # Sample action\n        unclipped_action, log_prob = self.actor.apply(\n            actor_params, collection_state.last_obs, rng_action, method=\"action_log_prob\"\n        )\n        assert isinstance(log_prob, jax.Array)\n        value = self.critic.apply(critic_params, collection_state.last_obs)\n        assert isinstance(value, jax.Array)\n\n        # Clip action\n        if self.discrete:\n            action = unclipped_action\n        else:\n            low = self.env.action_space(self.env_params).low\n            high = self.env.action_space(self.env_params).high\n            action = jnp.clip(unclipped_action, low, high)\n\n        # Step environment\n        next_obs, env_state, reward, done, _ = jax.vmap(self.env.step, in_axes=(0, 0, 0, None))(\n            rng_steps,\n            collection_state.env_state,\n            action,\n            self.env_params,\n        )\n\n        if self.hp.normalize_observations:\n            # rms_state, next_obs = learner.update_and_normalize(collection_state.rms_state, next_obs)\n            rms_state = _update_rms(collection_state.rms_state, obs=next_obs, batched=True)\n            next_obs = _normalize_obs(rms_state, obs=next_obs)\n\n            collection_state = collection_state.replace(rms_state=rms_state)\n\n        # Return updated runner state and transition\n        transition = Trajectory(\n            collection_state.last_obs, unclipped_action, log_prob, reward, value, done\n        )\n        collection_state = collection_state.replace(\n            env_state=env_state,\n            last_obs=next_obs,\n            last_done=done,\n            global_step=collection_state.global_step + self.hp.num_envs,\n            rng=rng,\n        )\n        return collection_state, transition\n\n    @property\n    def discrete(self) -> bool:\n        return isinstance(\n            self.env.action_space(self.env_params), gymnax.environments.spaces.Discrete\n        )\n\n    def visualize(self, ts: PPOState, gif_path: str | Path, eval_rng: chex.PRNGKey | None = None):\n        actor = make_actor(ts=ts, hp=self.hp)\n        render_episode(\n            actor=actor,\n            env=self.env,\n            env_params=self.env_params,\n            gif_path=Path(gif_path),\n            rng=eval_rng if eval_rng is not None else ts.rng,\n        )\n\n    ## These here aren't currently used. They are here to mirror rejax.PPO where the training loop\n    # is in the algorithm.\n\n    @functools.partial(jit, static_argnames=[\"skip_initial_evaluation\"])\n    def train(\n        self,\n        rng: jax.Array,\n        train_state: PPOState[TEnvState] | None = None,\n        skip_initial_evaluation: bool = False,\n    ) -> tuple[PPOState[TEnvState], EvalMetrics]:\n        \"\"\"Full training loop in jax.\n\n        This is only here to match the API of `rejax.PPO.train`. This doesn't get called when using\n        the `JaxTrainer`, since `JaxTrainer.fit` already does the same thing, but also with support\n        for some `JaxCallback`s (as well as some `lightning.Callback`s!).\n\n        Unfolded version of `rejax.PPO.train`.\n        \"\"\"\n        if train_state is None and rng is None:\n            raise ValueError(\"Either train_state or rng must be provided\")\n\n        ts = train_state if train_state is not None else self.init_train_state(rng)\n\n        initial_evaluation: EvalMetrics | None = None\n        if not skip_initial_evaluation:\n            initial_evaluation = self.eval_callback(ts)\n\n        num_evals = np.ceil(self.hp.total_timesteps / self.hp.eval_freq).astype(int)\n        ts, evaluation = jax.lax.scan(\n            self._training_epoch,\n            init=ts,\n            xs=None,\n            length=num_evals,\n        )\n\n        if not skip_initial_evaluation:\n            assert initial_evaluation is not None\n            evaluation = jax.tree.map(\n                lambda i, ev: jnp.concatenate((jnp.expand_dims(i, 0), ev)),\n                initial_evaluation,\n                evaluation,\n            )\n            assert isinstance(evaluation, EvalMetrics)\n\n        return ts, evaluation\n\n    # @jit\n    def _training_epoch(\n        self, ts: PPOState[TEnvState], epoch: int\n    ) -> tuple[PPOState[TEnvState], EvalMetrics]:\n        # Run a few training iterations\n        iteration_steps = self.hp.num_envs * self.hp.num_steps\n        num_iterations = np.ceil(self.hp.eval_freq / iteration_steps).astype(int)\n        ts = jax.lax.fori_loop(\n            0,\n            num_iterations,\n            # drop metrics for now\n            lambda i, train_state_i: self._fused_training_step(i, train_state_i)[0],\n            ts,\n        )\n        # Run evaluation\n        return ts, self.eval_callback(ts)\n\n    # @jit\n    def _fused_training_step(self, iteration: int, ts: PPOState[TEnvState]):\n        \"\"\"Fused training step in jax (joined data collection + training).\n\n        This is the equivalent of the training step from rejax.PPO. It is only used in tests to\n        verify the correctness of the training step.\n        \"\"\"\n\n        data_collection_state, trajectories = self.collect_trajectories(\n            # env=self.env,\n            # env_params=self.env_params,\n            # actor=self.actor,\n            # critic=self.critic,\n            collection_state=ts.data_collection_state,\n            actor_params=ts.actor_ts.params,\n            critic_params=ts.critic_ts.params,\n            # num_envs=self.hp.num_envs,\n            # num_steps=self.hp.num_steps,\n            # discrete=discrete,\n            # normalize_observations=self.hp.normalize_observations,\n        )\n        ts = ts.replace(data_collection_state=data_collection_state)\n        return self.training_step(iteration, ts, trajectories)\n
"},{"location":"examples/jax_rl/#jaxmodule","title":"JaxModule","text":"

The JaxModule class is made to look a bit like the lightning.LightningModule class:

@runtime_checkable\nclass JaxModule(Protocol[Ts, _B, _MetricsT]):\n    \"\"\"A protocol for algorithms that can be trained by the `JaxTrainer`.\n\n    The `JaxRLExample` is an example that follows this structure and can be trained with a\n    `JaxTrainer`.\n    \"\"\"\n\n    def init_train_state(self, rng: chex.PRNGKey) -> Ts:\n        \"\"\"Create the initial training state.\"\"\"\n        raise NotImplementedError\n\n    def get_batch(self, ts: Ts, batch_idx: int) -> tuple[Ts, _B]:\n        \"\"\"Produces a batch of data.\"\"\"\n        raise NotImplementedError\n\n    def training_step(\n        self, batch_idx: int, ts: Ts, batch: _B\n    ) -> tuple[Ts, flax.struct.PyTreeNode]:\n        \"\"\"Update the training state using a \"batch\" of data.\"\"\"\n        raise NotImplementedError\n\n    def eval_callback(self, ts: Ts) -> _MetricsT:\n        \"\"\"Perform evaluation and return metrics.\"\"\"\n        raise NotImplementedError\n
"},{"location":"examples/jax_rl/#jaxtrainer","title":"JaxTrainer","text":"

The JaxTrainer follows a roughly similar structure as the lightning.Trainer: - JaxTrainer.fit is called with a JaxModule to train the algorithm.

Click to show the code for JaxTrainer
class JaxTrainer(flax.struct.PyTreeNode):\n    \"\"\"A simplified version of the `lightning.Trainer` with a fully jitted training loop.\n\n    ## Assumptions:\n\n    - The algo object must match the `JaxModule` protocol (in other words, it should implement its\n      methods).\n\n    ## Training loop\n\n    This is the training loop, which is fully jitted:\n\n    ```python\n    ts = algo.init_train_state(rng)\n\n    setup(\"fit\")\n    on_fit_start()\n    on_train_start()\n\n    eval_metrics = []\n    for epoch in range(self.max_epochs):\n        on_train_epoch_start()\n\n        for step in range(self.training_steps_per_epoch):\n\n            batch = algo.get_batch(ts, step)\n\n            on_train_batch_start()\n\n            ts, metrics = algo.training_step(step, ts, batch)\n\n            on_train_batch_end()\n\n        on_train_epoch_end()\n\n        # Evaluation \"loop\"\n        on_validation_epoch_start()\n        epoch_eval_metrics = self.eval_epoch(ts, epoch, algo)\n        on_validation_epoch_start()\n\n        eval_metrics.append(epoch_eval_metrics)\n\n    return ts, eval_metrics\n    ```\n\n    ## Caveats\n\n    - Some lightning callbacks can be used with this trainer and work well, but not all of them.\n    - You can either use Regular pytorch-lightning callbacks, or use `jax.vmap` on the `fit` method,\n      but not both.\n      - If you want to use [jax.vmap][] on the `fit` method, just remove the callbacks on the\n        Trainer for now.\n\n    ## TODOs / ideas\n\n    - Add a checkpoint callback with orbax-checkpoint?\n    \"\"\"\n\n    max_epochs: int = flax.struct.field(pytree_node=False)\n\n    training_steps_per_epoch: int = flax.struct.field(pytree_node=False)\n\n    limit_val_batches: int = 0\n    limit_test_batches: int = 0\n\n    # TODO: Getting some errors with the schema generation for lightning.Callback and\n    # lightning.pytorch.loggers.logger.Logger here if we keep the type annotation.\n    callbacks: Sequence = dataclasses.field(metadata={\"pytree_node\": False}, default_factory=tuple)\n\n    logger: Any | None = flax.struct.field(pytree_node=False, default=None)\n\n    # accelerator: str = flax.struct.field(pytree_node=False, default=\"auto\")\n    # strategy: str = flax.struct.field(pytree_node=False, default=\"auto\")\n    # devices: int | str = flax.struct.field(pytree_node=False, default=\"auto\")\n\n    # path to output directory, created dynamically by hydra\n    # path generation pattern is specified in `configs/hydra/default.yaml`\n    # use it to store all files generated during the run, like checkpoints and metrics\n\n    default_root_dir: str | Path | None = flax.struct.field(\n        pytree_node=False,\n        default_factory=lambda: HydraConfig.get().runtime.output_dir,\n    )\n\n    # State variables:\n    # TODO: figure out how to cleanly store / update these.\n    current_epoch: int = flax.struct.field(pytree_node=True, default=0)\n    global_step: int = flax.struct.field(pytree_node=True, default=0)\n\n    logged_metrics: dict = flax.struct.field(pytree_node=True, default_factory=dict)\n    callback_metrics: dict = flax.struct.field(pytree_node=True, default_factory=dict)\n    # todo: get the metrics from the callbacks?\n    # lightning.pytorch.loggers.CSVLogger.log_metrics\n    # TODO: Take a look at this method:\n    # lightning.pytorch.callbacks.progress.rich_progress.RichProgressBar.get_metrics\n    # return lightning.Trainer._logger_connector.progress_bar_metrics\n    progress_bar_metrics: dict = flax.struct.field(pytree_node=True, default_factory=dict)\n\n    verbose: bool = flax.struct.field(pytree_node=False, default=False)\n\n    @functools.partial(jit, static_argnames=[\"skip_initial_evaluation\"])\n    def fit(\n        self,\n        algo: JaxModule[Ts, _B, _MetricsT],\n        rng: chex.PRNGKey,\n        train_state: Ts | None = None,\n        skip_initial_evaluation: bool = False,\n    ) -> tuple[Ts, _MetricsT]:\n        \"\"\"Full training loop in pure jax (a lot faster than when using pytorch-lightning).\n\n        Unfolded version of `rejax.PPO.train`.\n\n        Training loop in pure jax (a lot faster than when using pytorch-lightning).\n        \"\"\"\n\n        if train_state is None and rng is None:\n            raise ValueError(\"Either train_state or rng must be provided\")\n\n        train_state = train_state if train_state is not None else algo.init_train_state(rng)\n\n        if self.progress_bar_callback is not None:\n            if self.verbose:\n                jax.debug.print(\"Enabling the progress bar callback.\")\n            jax.experimental.io_callback(self.progress_bar_callback.enable, ())\n\n        self._callback_hook(\"setup\", self, algo, ts=train_state, partial_kwargs=dict(stage=\"fit\"))\n        self._callback_hook(\"on_fit_start\", self, algo, ts=train_state)\n        self._callback_hook(\"on_train_start\", self, algo, ts=train_state)\n\n        if self.logger:\n            jax.experimental.io_callback(\n                lambda algo: self.logger and self.logger.log_hyperparams(hparams_to_dict(algo)),\n                (),\n                algo,\n                ordered=True,\n            )\n\n        initial_evaluation: _MetricsT | None = None\n        if not skip_initial_evaluation:\n            initial_evaluation = algo.eval_callback(train_state)\n\n        # Run the epoch loop `self.max_epoch` times.\n        train_state, evaluations = jax.lax.scan(\n            functools.partial(self.epoch_loop, algo=algo),\n            init=train_state,\n            xs=jnp.arange(self.max_epochs),  # type: ignore\n            length=self.max_epochs,\n        )\n\n        if not skip_initial_evaluation:\n            assert initial_evaluation is not None\n            evaluations: _MetricsT = jax.tree.map(\n                lambda i, ev: jnp.concatenate((jnp.expand_dims(i, 0), ev)),\n                initial_evaluation,\n                evaluations,\n            )\n\n        if self.logger is not None:\n            jax.block_until_ready((train_state, evaluations))\n            # jax.debug.print(\"Saving...\")\n            jax.experimental.io_callback(\n                functools.partial(self.logger.finalize, status=\"success\"), ()\n            )\n\n        self._callback_hook(\"on_fit_end\", self, algo, ts=train_state)\n        self._callback_hook(\"on_train_end\", self, algo, ts=train_state)\n        self._callback_hook(\n            \"teardown\", self, algo, ts=train_state, partial_kwargs={\"stage\": \"fit\"}\n        )\n\n        return train_state, evaluations\n\n    # @jit\n    def epoch_loop(self, ts: Ts, epoch: int, algo: JaxModule[Ts, _B, _MetricsT]):\n        # todo: Some lightning callbacks try to get the \"trainer.current_epoch\".\n        # FIXME: Hacky: Present a trainer with a different value of `self.current_epoch` to\n        # the callbacks.\n        # chex.assert_scalar_in(epoch, 0, self.max_epochs)\n        # TODO: Can't just set current_epoch to `epoch` as `epoch` is a Traced value.\n        # todo: need to have the callback take in the actual int value.\n        # jax.debug.print(\"Starting epoch {epoch}\", epoch=epoch)\n\n        self = self.replace(current_epoch=epoch)  # doesn't quite work?\n        ts = self.training_epoch(ts=ts, epoch=epoch, algo=algo)\n        eval_metrics = self.eval_epoch(ts=ts, epoch=epoch, algo=algo)\n        return ts, eval_metrics\n\n    # @jit\n    def training_epoch(self, ts: Ts, epoch: int, algo: JaxModule[Ts, _B, _MetricsT]):\n        # Run a few training iterations\n        self._callback_hook(\"on_train_epoch_start\", self, algo, ts=ts)\n\n        ts = jax.lax.fori_loop(\n            0,\n            self.training_steps_per_epoch,\n            # drop training metrics for now.\n            functools.partial(self.training_step, algo=algo),\n            ts,\n        )\n\n        self._callback_hook(\"on_train_epoch_end\", self, algo, ts=ts)\n        return ts\n\n    # @jit\n    def eval_epoch(self, ts: Ts, epoch: int, algo: JaxModule[Ts, _B, _MetricsT]):\n        self._callback_hook(\"on_validation_epoch_start\", self, algo, ts=ts)\n\n        # todo: split up into eval batch and eval step?\n        eval_metrics = algo.eval_callback(ts=ts)\n\n        self._callback_hook(\"on_validation_epoch_end\", self, algo, ts=ts)\n\n        return eval_metrics\n\n    # @jit\n    def training_step(self, batch_idx: int, ts: Ts, algo: JaxModule[Ts, _B, _MetricsT]):\n        \"\"\"Training step in pure jax (joined data collection + training).\n\n        *MUCH* faster than using pytorch-lightning, but you lose the callbacks and such.\n        \"\"\"\n        # todo: rename to `get_training_batch`?\n        ts, batch = algo.get_batch(ts, batch_idx=batch_idx)\n\n        self._callback_hook(\"on_train_batch_start\", self, algo, batch, batch_idx, ts=ts)\n\n        ts, metrics = algo.training_step(batch_idx=batch_idx, ts=ts, batch=batch)\n\n        if self.logger is not None:\n            # todo: Clean this up. logs metrics.\n            jax.experimental.io_callback(\n                lambda metrics, batch_index: self.logger\n                and self.logger.log_metrics(\n                    jax.tree.map(lambda v: v.mean(), metrics), batch_index\n                ),\n                (),\n                dataclasses.asdict(metrics) if dataclasses.is_dataclass(metrics) else metrics,\n                batch_idx,\n            )\n\n        self._callback_hook(\"on_train_batch_end\", self, algo, metrics, batch, batch_idx, ts=ts)\n\n        return ts\n\n    ### Hooks to mimic those of lightning.Trainer\n\n    def _callback_hook(\n        self,\n        hook_name: str,\n        /,\n        *hook_args,\n        ts: Ts,\n        partial_kwargs: dict | None = None,\n        sharding: jax.sharding.SingleDeviceSharding | None = None,\n        ordered: bool = True,\n        **hook_kwargs,\n    ):\n        \"\"\"Call a hook on all callbacks.\"\"\"\n        # with jax.disable_jit():\n        for i, callback in enumerate(self.callbacks):\n            # assert hasattr(callback, hook_name)\n\n            method = getattr(callback, hook_name)\n            if partial_kwargs:\n                method = functools.partial(method, **partial_kwargs)\n            if self.verbose:\n                jax.debug.print(\n                    \"Epoch {current_epoch}/{max_epochs}: \"\n                    + f\"Calling hook {hook_name} on callback {callback}\"\n                    + \"{i}\",\n                    i=i,\n                    current_epoch=self.current_epoch,\n                    ordered=True,\n                    max_epochs=self.max_epochs,\n                )\n            jax.experimental.io_callback(\n                method,\n                (),\n                *hook_args,\n                **({\"ts\": ts} if isinstance(callback, JaxCallback) else {}),\n                **hook_kwargs,\n                sharding=sharding,\n                ordered=ordered if not isinstance(callback, JaxCallback) else False,\n            )\n\n    # Compat for RichProgressBar\n    @property\n    def is_global_zero(self) -> bool:\n        return True\n\n    @property\n    def num_training_batches(self) -> int:\n        return self.training_steps_per_epoch\n\n    @property\n    def loggers(self) -> list[lightning.pytorch.loggers.Logger]:\n        if isinstance(self.logger, list | tuple):\n            return list(self.logger)\n        if self.logger is not None:\n            return [self.logger]\n        return []\n\n    # @property\n    # def progress_bar_metrics(self) -> dict[str, float]:\n\n    #     return {}\n\n    @property\n    def progress_bar_callback(self) -> lightning.pytorch.callbacks.ProgressBar | None:\n        for c in self.callbacks:\n            if isinstance(c, lightning.pytorch.callbacks.ProgressBar):\n                return c\n        return None\n\n    @property\n    def state(self):\n        from lightning.pytorch.trainer.states import (\n            RunningStage,\n            TrainerFn,\n            TrainerState,\n            TrainerStatus,\n        )\n\n        return TrainerState(\n            fn=TrainerFn.FITTING,\n            status=TrainerStatus.RUNNING,\n            stage=RunningStage.TRAINING,\n        )\n        #     self._trainer.state.fn != \"fit\"\n        #     or self._trainer.sanity_checking\n        #     or self._trainer.progress_bar_callback.train_progress_bar_id != task.id\n        # ):\n\n    @property\n    def sanity_checking(self) -> bool:\n        from lightning.pytorch.trainer.states import RunningStage\n\n        return self.state.stage == RunningStage.SANITY_CHECKING\n\n    @property\n    def training(self) -> bool:\n        from lightning.pytorch.trainer.states import RunningStage\n\n        return self.state.stage == RunningStage.TRAINING\n\n    @property\n    def log_dir(self) -> Path | None:\n        # copied from lightning.Trainer\n        if len(self.loggers) > 0:\n            if not isinstance(\n                self.loggers[0],\n                lightning.pytorch.loggers.TensorBoardLogger | lightning.pytorch.loggers.CSVLogger,\n            ):\n                dirpath = self.loggers[0].save_dir\n            else:\n                dirpath = self.loggers[0].log_dir\n        else:\n            dirpath = self.default_root_dir\n        if dirpath:\n            return Path(dirpath)\n        return None\n
"},{"location":"examples/llm_finetuning/","title":"Fine-tuning an LLM (\ud83e\udd17+\u26a1)","text":""},{"location":"examples/llm_finetuning/#fine-tuning-llms","title":"Fine-tuning LLMs","text":"

This example is based on this language modeling example from the HuggingFace transformers documentation.

To better understand what's going on in this example, it is a good idea to read through these tutorials first:

  • Causal language modeling simple example - HuggingFace docs
  • Fine-tune a language model - Colab Notebook

The main difference between this example and the original example from HuggingFace is that the LLMFinetuningExample is a LightningModule, that is trained by a lightning.Trainer.

This also means that this example doesn't use accelerate or the HuggingFace Trainer.

"},{"location":"examples/llm_finetuning/#running-the-example","title":"Running the example","text":"
python project/main.py experiment=llm_finetuning_example\n
"},{"location":"examples/profiling/","title":"Profiling your code\ud83d\udcce","text":""},{"location":"examples/sweeps/","title":"Running sweeps","text":""},{"location":"examples/sweeps/#hyper-parameter-optimization","title":"Hyper-Parameter Optimization","text":"

Work-in-progress!

Please note that this is very much a work in progress!

This is a small example Hydra and submitit make it very easy to launch lots of jobs on SLURM clusters.

hyper-parameter optimization (HPO)

"},{"location":"examples/sweeps/#hyper-parameter-optimization-with-the-orion-hydra-sweeper","title":"Hyper-Parameter Optimization with the Orion Hydra Sweeper","text":"

Here is a configuration file that you can use to launch a hyper-parameter optimization (HPO) sweep

Click to show the yaml config file
# @package _global_\ndefaults:\n  - example.yaml # A configuration for a single run (that works!)\n  - override /hydra/sweeper: orion # Select the orion sweeper plugin\n\nlog_level: DEBUG\nname: \"local-sweep-example\"\nseed: 123\n\nalgorithm:\n  optimizer:\n    # This here will get overwritten by the sweeper.\n    lr: 0.002\n\ntrainer:\n  accelerator: auto\n  devices: 1\n  max_epochs: 1\n  logger:\n    wandb:\n      _target_: lightning.pytorch.loggers.wandb.WandbLogger\n      project: \"ResearchTemplate\"\n      # TODO: Use the Orion trial name?\n      # name: ${oc.env:SLURM_JOB_ID}_${oc.env:SLURM_ARRAY_TASK_ID,0}_${oc.env:SLURM_PROCID}\n      save_dir: \"${hydra:runtime.output_dir}\"\n      offline: False # set True to store all logs only locally\n      # id: ${oc.env:SLURM_JOB_ID}_${oc.env:SLURM_ARRAY_TASK_ID,0}_${oc.env:SLURM_PROCID} # pass correct id to resume experiment!\n      # entity: \"\"  # set to name of your wandb team\n      log_model: False\n      prefix: \"\"\n      job_type: \"train\"\n      group: [\"${name}\"]\n      tags: [\"${name}\"]\n\nhydra:\n  mode: MULTIRUN\n  run:\n    # output directory, generated dynamically on each run\n    dir: logs/${name}/runs\n  sweep:\n    dir: logs/${name}/multiruns/\n    # subdir: ${hydra.job.num}\n    subdir: ${hydra.job.id}/task${hydra.job.num}\n\n  sweeper:\n    params:\n      algorithm:\n        optimizer:\n          lr: \"loguniform(1e-6, 1.0, default_value=3e-4)\"\n          # weight_decay: \"loguniform(1e-6, 1e-2, default_value=0)\"\n\n    experiment:\n      name: \"${name}\"\n      version: 1\n\n    algorithm:\n      type: tpe\n      config:\n        seed: 1\n\n    worker:\n      n_workers: 1\n      max_broken: 10000\n      max_trials: 10\n\n    storage:\n      type: legacy\n      use_hydra_path: false\n      database:\n        type: pickleddb\n        host: \"logs/${name}/multiruns/database.pkl\"\n    parametrization: null\n

You can use it like so:

python project/main.py experiment=local_sweep_example\n
"},{"location":"examples/sweeps/#hyper-parameter-optimization-on-a-slurm-cluster","title":"Hyper-Parameter Optimization on a SLURM cluster","text":"Click to show the yaml config file
# @package _global_\n\n# This is an \"experiment\" config, that groups together other configs into a ready-to-run example.\n\ndefaults:\n  - example.yaml # A configuration for a single run (that works!)\n  - override /trainer/logger: wandb\n  - override /hydra/sweeper: orion\n  - override /resources: gpu\n  - override /cluster: ??? # use `current` if you are already on a cluster, otherwise use one of the `cluster` configs.\n\nlog_level: DEBUG\nname: \"sweep-example\"\n\n# Set the seed to be the SLURM_PROCID, so that if we run more than one task per GPU, we get\n# TODO: This should technically be something like the \"run_id\", which would be different than SLURM_PROCID when using >1 gpus per \"run\".\nseed: ${oc.env:SLURM_PROCID,123}\n\nalgorithm:\n  optimizer:\n    # This here will get overwritten by the sweeper.\n    lr: 0.002\n\ntrainer:\n  accelerator: gpu\n  devices: 1\n  max_epochs: 1\n  logger:\n    wandb:\n      project: \"ResearchTemplate\"\n      # TODO: Use the Orion trial name?\n      name: ${oc.env:SLURM_JOB_ID}_${oc.env:SLURM_ARRAY_TASK_ID,0}_${oc.env:SLURM_PROCID}\n      save_dir: \"${hydra:runtime.output_dir}\"\n      offline: False # set True to store all logs only locally\n      id: ${oc.env:SLURM_JOB_ID}_${oc.env:SLURM_ARRAY_TASK_ID,0}_${oc.env:SLURM_PROCID} # pass correct id to resume experiment!\n      # entity: \"\"  # set to name of your wandb team\n      log_model: False\n      prefix: \"\"\n      job_type: \"train\"\n      group: ${oc.env:SLURM_JOB_ID}\n      # tags: [\"${name}\"]\n\nhydra:\n  mode: MULTIRUN\n  # TODO: Make it so running the same command twice in the same job id resumes from the last checkpoint.\n  run:\n    # output directory, generated dynamically on each run\n    dir: logs/${name}/runs\n  sweep:\n    dir: logs/${name}/multiruns/\n    # subdir: ${hydra.job.num}\n    subdir: ${hydra.job.id}/task${oc.env:SLURM_PROCID,0}\n\n  launcher:\n    # todo: bump this up.\n    array_parallelism: 5 # max num of jobs to run in parallel\n    additional_parameters:\n      time: 0-00:10:00 # maximum wall time allocated for the job (D-HH:MM:SS)\n      # TODO: Pack more than one job on a single GPU, and support this with both a\n      # patched submitit launcher as well as our remote submitit launcher, as well as by patching the\n      # orion sweeper to not drop these other results.\n      # ntasks_per_gpu: 1\n  sweeper:\n    params:\n      algorithm:\n        optimizer:\n          lr: \"loguniform(1e-6, 1.0, default_value=3e-4)\"\n          # weight_decay: \"loguniform(1e-6, 1e-2, default_value=0)\"\n      # todo: setup a fidelity parameter. Seems to not be working right now.\n      # trainer:\n      #   # Let the HPO algorithm allocate more epochs to more promising HP configurations.\n      #   max_epochs: \"fidelity(1, 10, default_value=1)\"\n\n    parametrization: null\n    experiment:\n      name: \"${name}\"\n      version: 1\n\n    algorithm:\n      #  BUG: Getting a weird bug with TPE: KeyError in `dum_below_trials = [...]` at line 397.\n      type: tpe\n      config:\n        seed: 1\n\n    worker:\n      n_workers: ${hydra.launcher.array_parallelism}\n      max_broken: 10000\n      max_trials: 10\n\n    storage:\n      type: legacy\n      use_hydra_path: false\n      database:\n        type: pickleddb\n        host: \"logs/${name}/multiruns/database.pkl\"\n

Here's how you can easily launch a sweep remotely on the Mila cluster. If you are already on a slurm cluster, use the \"cluster=current\" config.

python project/main.py experiment=cluster_sweep_example cluster=mila\n
"},{"location":"examples/text_classification/","title":"Text Classification (\ud83e\udd17+\u26a1)","text":""},{"location":"examples/text_classification/#text-classification","title":"Text Classification (\u26a1 + \ud83e\udd17)","text":""},{"location":"examples/text_classification/#overview","title":"Overview","text":"

The TextClassifier is a LightningModule for a simple text classification task.

It accepts a TextClassificationDataModule as input, along with a network.

Click to show the code of the lightningmodule
class TextClassifier(LightningModule):\n    \"\"\"Example of a lightning module used to train a huggingface model for text classification.\"\"\"\n\n    def __init__(\n        self,\n        datamodule: TextClassificationDataModule,\n        network: HydraConfigFor[PreTrainedModel],\n        hf_metric_name: str,\n        learning_rate: float = 2e-5,\n        adam_epsilon: float = 1e-8,\n        warmup_steps: int = 0,\n        weight_decay: float = 0.0,\n        init_seed: int = 42,\n    ):\n        super().__init__()\n        self.network_config = network\n        self.num_labels = datamodule.num_classes\n        self.task_name = datamodule.task_name\n        self.init_seed = init_seed\n        self.hf_metric_name = hf_metric_name\n        self.learning_rate = learning_rate\n        self.adam_epsilon = adam_epsilon\n        self.warmup_steps = warmup_steps\n        self.weight_decay = weight_decay\n\n        self.metric = evaluate.load(\n            self.hf_metric_name,\n            self.task_name,\n            # todo: replace with hydra job id perhaps?\n            experiment_id=datetime.now().strftime(\"%d-%m-%Y_%H-%M-%S\"),\n        )\n\n        self.save_hyperparameters(ignore=[\"datamodule\"])\n\n    def configure_model(self) -> None:\n        with torch.random.fork_rng(devices=[self.device]):\n            # deterministic weight initialization\n            torch.manual_seed(self.init_seed)\n            self.network = hydra_zen.instantiate(self.network_config)\n\n        return super().configure_model()\n\n    def forward(self, inputs: dict[str, torch.Tensor]) -> BaseModelOutput:\n        return self.network(**inputs)\n\n    def shared_step(self, batch: dict[str, torch.Tensor], batch_idx: int, stage: str):\n        outputs: CausalLMOutput | SequenceClassifierOutput = self(batch)\n        loss = outputs.loss\n        assert isinstance(loss, torch.Tensor), loss\n        # todo: log the output of the metric.\n        self.log(f\"{stage}/loss\", loss, prog_bar=True)\n        if isinstance(outputs, SequenceClassifierOutput):\n            metric_value = self.metric.compute(\n                # logits=outputs.logits,\n                predictions=outputs.logits.argmax(-1),\n                references=batch[\"labels\"],\n            )\n            assert isinstance(metric_value, dict)\n            for k, v in metric_value.items():\n                self.log(\n                    f\"{stage}/{k}\",\n                    v,\n                    prog_bar=True,\n                )\n        return loss\n\n    def training_step(self, batch: dict[str, torch.Tensor], batch_idx: int):\n        return self.shared_step(batch, batch_idx, \"train\")\n\n    def validation_step(\n        self, batch: dict[str, torch.Tensor], batch_idx: int, dataloader_idx: int = 0\n    ):\n        return self.shared_step(batch, batch_idx, \"val\")\n\n    def configure_optimizers(self):\n        \"\"\"Prepare optimizer and schedule (linear warmup and decay)\"\"\"\n        model = self.network\n        no_decay = [\"bias\", \"LayerNorm.weight\"]\n        optimizer_grouped_parameters = [\n            {\n                \"params\": [\n                    p\n                    for n, p in model.named_parameters()\n                    if not any(nd_param in n for nd_param in no_decay)\n                ],\n                \"weight_decay\": self.weight_decay,\n            },\n            {\n                \"params\": [\n                    p\n                    for n, p in model.named_parameters()\n                    if any(nd_param in n for nd_param in no_decay)\n                ],\n                \"weight_decay\": 0.0,\n            },\n        ]\n        optimizer = AdamW(\n            optimizer_grouped_parameters,\n            lr=self.learning_rate,\n            eps=self.adam_epsilon,\n        )\n\n        scheduler = get_linear_schedule_with_warmup(\n            optimizer,\n            num_warmup_steps=self.warmup_steps,\n            num_training_steps=self.trainer.estimated_stepping_batches,\n        )\n        scheduler = {\"scheduler\": scheduler, \"interval\": \"step\", \"frequency\": 1}\n        return [optimizer], [scheduler]\n
"},{"location":"examples/text_classification/#config-files","title":"Config files","text":""},{"location":"examples/text_classification/#algorithm-config","title":"Algorithm config","text":"Click to show the Algorithm config

Source: project/configs/algorithm/text_classifier.yaml

# Config for the Text classification example algorithm\n_target_: project.algorithms.text_classifier.TextClassifier\n_recursive_: false\nnetwork:\n  _target_: transformers.models.auto.modeling_auto.AutoModelForSequenceClassification.from_pretrained\n  pretrained_model_name_or_path: albert-base-v2\n\n# NOTE: Why _partial_? Because the config doesn't create the algo directly, it creates a function\n# that will accept the datamodule and network and return the algo.\n_partial_: true\nhf_metric_name: glue\n
"},{"location":"examples/text_classification/#datamodule-config","title":"Datamodule config","text":"Click to show the Datamodule config

Source: project/configs/datamodule/glue_cola.yaml

_target_: project.datamodules.text.TextClassificationDataModule\ndata_dir: ${oc.env:SCRATCH,.}/data\nhf_dataset_path: glue\ntask_name: cola\ntext_fields:\n  - \"sentence\"\ntokenizer:\n  _target_: transformers.models.auto.tokenization_auto.AutoTokenizer.from_pretrained\n  use_fast: true\n  # Note: We could interpolate this value with `${/algorithm/network/pretrained_model_name_or_path}`\n  # to avoid duplicating a value, but this also makes it harder to use this by itself or with\n  # another algorithm.\n  pretrained_model_name_or_path: albert-base-v2\n  cache_dir: ${..data_dir}\n  trust_remote_code: true\nnum_classes: 2\nmax_seq_length: 128\ntrain_batch_size: 32\neval_batch_size: 32\n
"},{"location":"examples/text_classification/#running-the-example","title":"Running the example","text":"

Here is a configuration file that you can use to launch a simple experiment:

Click to show the yaml config file

Source: project/configs/experiment/text_classification_example.yaml

# @package _global_\ndefaults:\n  - override /algorithm: text_classifier\n  - override /datamodule: glue_cola\n  - override /trainer/callbacks: none\n\ntrainer:\n  min_epochs: 1\n  max_epochs: 2\n  limit_train_batches: 2\n  limit_val_batches: 1\n  num_sanity_val_steps: 0\n  enable_checkpointing: False\n

You can use it like so:

python project/main.py experiment=text_classification_example\n
"},{"location":"features/","title":"Features \ud83d\udd25","text":""},{"location":"features/#features-unique-to-this-project-template","title":"Features unique to this project template","text":"

Here are some cool features that are unique to this particular template:

  • Support for both Jax and Torch with PyTorch-Lightning (See the Jax example)
  • Your Hydra configs will have an Auto-Generated YAML schemas \ud83d\udd25
  • A comprehensive suite of automated tests for new algorithms, datasets and networks
    • \ud83e\udd16 Thoroughly tested on the Mila directly with GitHub CI
    • Automated testing on the DRAC clusters will also be added soon.
  • Easy development inside a [devcontainer with VsCode]
  • Tailor-made for ML researchers that run their jobs on SLURM clusters (with default configurations for the Mila and DRAC clusters.)
  • Rich typing of all parts of the source code

This template is aimed for ML researchers that run their jobs on SLURM clusters. The target audience is researchers and students at Mila. This template should still be useful for others outside of Mila that use PyTorch-Lightning and Hydra.

"},{"location":"features/auto_schema/","title":"Magic Config Schemas","text":""},{"location":"features/auto_schema/#auto-schema-for-hydra-configs","title":"Auto Schema for Hydra Configs","text":"

\ud83d\udd25 NOTE: This is a feature that is entirely unique to this template! \ud83d\udd25

This project template comes with a really neat feature: Your Hydra config files automatically get a Schema associated with them.

This greatly improves the experience of developing a project with Hydra:

  • Saves you time by preventing errors caused by unexpected keys in your config files, or values that are of the wrong type This can often happen after moving files or renaming a function, for example.
  • While writing a config file you get to see:
    • the list of available configuration options in a given config
    • the default values for each value
    • the documentation for each value (taken from the source code of the function!)

Here's a quick demo of what this looks like in practice:

Here we have a config that will be used to configure the lightning.Trainer class, but any config file in the project will also get a schema automatically, even if it doesn't have a \"_target_\" key directly!

"},{"location":"features/jax/","title":"Jax and Torch support with Lightning \u26a1","text":""},{"location":"features/jax/#using-jax-with-pytorch-lightning","title":"Using Jax with PyTorch-Lightning","text":"

\ud83d\udd25 NOTE: This is a feature that is entirely unique to this template! \ud83d\udd25

This template includes examples that use either Jax, PyTorch, or both! There's a table describing each example here.

You can mix and match both Jax and Torch code. For example, you can use Jax for your dataloading, your network, or the learning algorithm, all while still benefiting from the nice stuff that comes from using PyTorch-Lightning.

How does this work?

Well, we use torch-jax-interop, another package developed here at Mila \ud83d\ude0e, that allows easy interop between torch and jax code. Feel free to take a look at it if you'd like to use it as part of your own project. \ud83d\ude01

"},{"location":"features/jax/#using-pytorch-lightning-to-train-a-jax-network","title":"Using PyTorch-Lightning to train a Jax network","text":"

If you'd like to use Jax in your network or learning algorithm, while keeping the same style of training loop as usual, you can!

  • Use Jax for the forward / backward passes, the parameter updates, dataset preprocessing, etc.
  • Leave the training loop / callbacks / logging / checkpointing / etc to Lightning

The lightning.Trainer will not be able to tell that you're using Jax!

Take a look at this image classification example that uses a Jax network.

"},{"location":"features/jax/#end-to-end-training-in-jax-the-jaxtrainer","title":"End-to-end training in Jax: the JaxTrainer","text":"

The JaxTrainer, used in the Jax RL Example, follows a similar structure as the lightning Trainer. However, instead of training LightningModules, it trains JaxModules, which are a simplified, jax-based look-alike of lightning.LightningModules.

The \"algorithm\" needs to match the JaxModule protocol: - JaxModule.training_step: train using a batch of data

"},{"location":"features/remote_slurm_launcher/","title":"Launching Jobs on Remote Clusters","text":""},{"location":"features/remote_slurm_launcher/#remote-slurm-submitit-launcher","title":"Remote Slurm Submitit Launcher","text":"

\ud83d\udd25 NOTE: This is a feature that is entirely unique to this template! \ud83d\udd25

This template includes a custom submitit launcher, that can be used to launch jobs on remote slurm clusters. This allows you to develop code locally, and easily ship it to a different cluster. The only prerequisite is that you must have ssh access to the remote cluster.

Under the hood, this uses a custom remote-slurm-executor submitit plugin.

This feature allows you to launch jobs on remote slurm clusters using two config groups:

  • The resources config group is used to select the job resources:
    • cpu: CPU job
    • gpu: GPU job
  • The cluster config group controls where to run the job:
    • current: Run on the current cluster. Use this if you're already on a SLURM cluster (e.g. when using mila code). This uses the usual submitit_slurm launcher.
    • mila: Launches the job on the Mila cluster.
    • narval: Remotely launches the job on the Narval cluster
    • cedar: Remotely launches the job on the Cedar cluster
    • beluga: Remotely launches the job on the Beluga cluster
"},{"location":"features/remote_slurm_launcher/#examples","title":"Examples","text":"

This assumes that you've already setup SSH access to the clusters (for example using mila init).

"},{"location":"features/remote_slurm_launcher/#local-machine-mila","title":"Local machine -> Mila","text":"
python project/main.py experiment=example resources=gpu cluster=mila\n
"},{"location":"features/remote_slurm_launcher/#local-machine-drac-narval","title":"Local machine -> DRAC (narval)","text":"
python project/main.py experiment=example resources=gpu cluster=narval\n
"},{"location":"features/remote_slurm_launcher/#mila-drac-narval","title":"Mila -> DRAC (narval)","text":"

This assumes that you've already setup SSH access from mila to the DRAC clusters.

Note that command is exactly the same as above.

python project/main.py experiment=example resources=gpu cluster=narval\n

Warning

If you want to launch jobs on a remote cluster, it is (currently) necessary to place the \"resources\" config before the \"cluster\" config on the command-line.

"},{"location":"features/remote_slurm_launcher/#launching-jobs-on-the-current-slurm-cluster","title":"Launching jobs on the current SLURM cluster","text":"

If you develop on a SLURM cluster, you can use the cluster=current, or simply omit the cluster config group and only use a config from the resources group.

(mila) $ python project/main.py experiment=example resources=gpu cluster=current\n
"},{"location":"features/testing/","title":"Thorough automated testing on SLURM clusters","text":""},{"location":"features/testing/#automated-testing","title":"Automated Testing","text":"

Tests are a vital part of any good codebase, especially in Machine Learning. They make it easier to explore and try out new ideas, by giving you the security that your codebase works as intended.

This template comes with some easy-to-use test suites as well as some pre-configured GitHub Actions workflows to run them:

  • Unit tests: quick to run and check small functions / modules / classes.
  • Regression tests: check that your code is reproducible and to let you know if something changed while you were developing your code.
  • integration tests: run your code end-to-end to make sure that all the individually-tested components work together as expected.
  • GitHub Actions runs all these tests before you merge your code.
"},{"location":"features/testing/#automated-testing-on-slurm-clusters-with-github-ci","title":"Automated testing on SLURM clusters with GitHub CI","text":"

\ud83d\udd25 NOTE: This is a feature that is entirely unique to this template! \ud83d\udd25

This template runs all the above-mentioned tests on an actual Compute Node of the Mila cluster automatically. Assuming that you have access to the Mila / DRAC or other Slurm clusters, all you need to do is to setup a local self-hosted GitHub runner for your fork of this repository, launch it on your local machine with access to a Slurm cluster, and voila: Your code will now be tested on an ACTUAL slurm cluster whenever you push or update a PR in your project GitHub repository.

Detailed instructions on how to set this up in your project will be added soon.

"},{"location":"features/testing/#test-suites","title":"Test-suites","text":"

Unit testing in this template is done with pytest.

To run tests, simply use pytest on the command-line. You may want to add some useful flags like pytest -x -v. See the pytest docs for more info.

The built-in tests cover the following:

  • For each datamodule config, for each data split
    • test that the first batch is always the same
  • For each algorithm config, for all compatible network / datamodule config combinations:
    • initialization is deterministic & reproducibile;
    • forward pass is deterministic & reproducibile;
    • backward pass is deterministic & reproducibile;

Take a look at project.algorithms.testsuites.lightning_module_tests to see the included base tests for algorithms.

If you use Visual Studio Code, you may want to look into adding the \"test explorer\" tab to your editor. Then, you'll be able to see and debug the tests using the GUI.

"},{"location":"features/testing/#unit-tests","title":"Unit tests","text":"
pytest -x -v\n
"},{"location":"features/testing/#regression-tests","title":"Regression Tests","text":"

We use pytest-regressions to test that code changes don't break things.

  • --gen-missing: Use this flag when you might be missing some of the regression files (for example on the first test run).
  • --regen-all: Use this when you want to intentionally re-create the regression files. This should hopefully not be used often!
"},{"location":"features/testing/#first-run","title":"First run","text":"

On the first run, you might want to run test with the --gen-missing files, like so:

pytest --regen-all\n
"},{"location":"features/testing/#integration-tests","title":"integration-tests","text":"

To run slower integration tests, use the following:

pytest -x -v --slow\n
"},{"location":"features/testing/#continuous-integration","title":"Continuous Integration","text":""},{"location":"reference/SUMMARY/","title":"Reference \ud83e\udd13","text":"
  • project
    • algorithms
      • callbacks
        • classification_metrics
        • samples_per_second
      • image_classifier
      • image_classifier_test
      • jax_image_classifier
      • jax_image_classifier_test
      • jax_ppo
      • jax_ppo_test
      • llm_finetuning
      • llm_finetuning_test
      • no_op
      • testsuites
        • lightning_module_tests
      • text_classifier
      • text_classifier_test
    • configs
      • algorithm
        • lr_scheduler
        • network
        • optimizer
      • config
      • config_test
      • datamodule
    • conftest
    • datamodules
      • datamodules_test
      • image_classification
        • cifar10
        • fashion_mnist
        • image_classification
        • imagenet
        • inaturalist
        • inaturalist_test
        • mnist
      • text
        • text_classification
        • text_classification_test
      • vision
    • experiment
    • main
    • main_test
    • networks
      • fcnet
    • trainers
      • jax_trainer
    • utils
      • env_vars
      • hydra_utils
      • remote_launcher_plugin
      • remote_launcher_plugin_test
      • testutils
      • typing_utils
        • jax_typing_utils
        • protocols
      • utils
"},{"location":"reference/project/","title":"Project","text":"

Root module for this research project.

"},{"location":"reference/project/conftest/","title":"Conftest","text":"

Fixtures and test utilities.

This module contains PyTest fixtures that are used by tests.

"},{"location":"reference/project/conftest/#project.conftest--how-this-works","title":"How this works","text":"

Our goal here is to make sure that the way we create networks/datasets/algorithms during tests match as closely as possible how they are created normally in a real run. For example, when running python project/main.py algorithm=image_classifier.

We achieve this like so: All the components of an experiment are created using fixtures. The first fixtures to be invoked are the ones that would correspond to command-line arguments. The fixtures for command-line arguments

For example, one of the fixtures which is created first is datamodule_config.

The first fixtures to be created are the datamodule_config, network_config and algorithm_config, along with overrides. From these, the experiment_dictconfig is created

---\ntitle: Fixture dependency graph\n---\nflowchart TD\ndatamodule_config[\n    <a href=\"#project.conftest.datamodule_config\">datamodule_config</a>\n] -- 'datamodule=A' --> command_line_arguments\nalgorithm_config[\n    <a href=\"#project.conftest.algorithm_config\">algorithm_config</a>\n] -- 'algorithm=B' --> command_line_arguments\ncommand_line_overrides[\n    <a href=\"#project.conftest.command_line_overrides\">command_line_overrides</a>\n] -- 'seed=123' --> command_line_arguments\ncommand_line_arguments[\n    <a href=\"#project.conftest.command_line_arguments\">command_line_arguments</a>\n] -- load configs for 'datamodule=A algorithm=B seed=123' --> experiment_dictconfig\nexperiment_dictconfig[\n    <a href=\"#project.conftest.experiment_dictconfig\">experiment_dictconfig</a>\n] -- instantiate objects from configs --> experiment_config\nexperiment_config[\n    <a href=\"#project.conftest.experiment_config\">experiment_config</a>\n] --> datamodule & algorithm\ndatamodule[\n    <a href=\"#project.conftest.datamodule\">datamodule</a>\n] --> algorithm\nalgorithm[\n    <a href=\"#project.conftest.algorithm\">algorithm</a>\n] -- is used by --> some_test\nalgorithm & datamodule -- is used by --> some_other_test
"},{"location":"reference/project/conftest/#project.conftest.original_datadir","title":"original_datadir","text":"
original_datadir(original_datadir: Path)\n

Overwrite the original_datadir fixture value to change where regression files are created.

By default, they are in a folder next to the source. Here instead we move them to $SCRATCH if available, or to a .regression_files folder at the root of the repo otherwise.

"},{"location":"reference/project/conftest/#project.conftest.algorithm_config","title":"algorithm_config","text":"
algorithm_config(request: FixtureRequest) -> str | None\n

The algorithm config to use in the experiment, as if algorithm=<value> was passed.

This is parametrized with all the configurations for a given algorithm type when using the included tests, for example as is done in project.algorithms.image_classifier_test.

"},{"location":"reference/project/conftest/#project.conftest.datamodule_config","title":"datamodule_config","text":"
datamodule_config(request: FixtureRequest) -> str | None\n

The datamodule config to use in the experiment, as if datamodule=<value> was passed.

"},{"location":"reference/project/conftest/#project.conftest.algorithm_network_config","title":"algorithm_network_config","text":"
algorithm_network_config(\n    request: FixtureRequest,\n) -> str | None\n

The network config to use in the experiment, as in algorithm/network=<value>.

"},{"location":"reference/project/conftest/#project.conftest.command_line_arguments","title":"command_line_arguments","text":"
command_line_arguments(\n    algorithm_config: str | None,\n    datamodule_config: str | None,\n    algorithm_network_config: str | None,\n    command_line_overrides: tuple[str, ...],\n    request: FixtureRequest,\n)\n

Fixture that returns the command-line arguments that will be passed to Hydra to run the experiment.

The algorithm_config, network_config and datamodule_config values here are parametrized indirectly by most tests using the project.utils.testutils.run_for_all_configs_of_type function so that the respective components are created in the same way as they would be by Hydra in a regular run.

"},{"location":"reference/project/conftest/#project.conftest.experiment_dictconfig","title":"experiment_dictconfig","text":"
experiment_dictconfig(\n    command_line_arguments: tuple[str, ...],\n    tmp_path_factory: TempPathFactory,\n) -> DictConfig\n

The omegaconf.DictConfig that is created by Hydra from the command-line arguments.

Any interpolations in the configs will not have been resolved at this point.

"},{"location":"reference/project/conftest/#project.conftest.experiment_config","title":"experiment_config","text":"
experiment_config(\n    experiment_dictconfig: DictConfig,\n) -> Config\n

The experiment configuration, with all interpolations resolved.

"},{"location":"reference/project/conftest/#project.conftest.datamodule","title":"datamodule","text":"
datamodule(\n    experiment_dictconfig: DictConfig,\n) -> LightningDataModule | None\n

Fixture that creates the datamodule for the given config.

"},{"location":"reference/project/conftest/#project.conftest.algorithm","title":"algorithm","text":"
algorithm(\n    experiment_config: Config,\n    datamodule: LightningDataModule | None,\n    trainer: Trainer | JaxTrainer,\n    seed: int,\n    device: device,\n)\n

Fixture that creates the \"algorithm\" (a LightningModule).

"},{"location":"reference/project/conftest/#project.conftest.seed","title":"seed","text":"
seed(\n    request: FixtureRequest, make_torch_deterministic: None\n)\n

Fixture that seeds everything for reproducibility and yields the random seed used.

"},{"location":"reference/project/conftest/#project.conftest.accelerator","title":"accelerator","text":"
accelerator(request: FixtureRequest)\n

Returns the accelerator to use during unit tests.

By default, if cuda is available, returns \"cuda\". If the tests are run with -vvv, then also runs CPU.

"},{"location":"reference/project/conftest/#project.conftest.devices","title":"devices","text":"
devices(\n    accelerator: str, request: FixtureRequest\n) -> Generator[\n    list[int] | int | Literal[\"auto\"], None, None\n]\n

Fixture that creates the 'devices' argument for the Trainer config.

Splits up the GPUs between pytest-xdist workers when using distributed testing. This isn't currently used in the CI.

TODO: Design dilemna here: Should we be parametrizing the devices command-line override and force experiments to run with this value during tests? Or should we be changing things based on this value in the config?

"},{"location":"reference/project/conftest/#project.conftest.command_line_overrides","title":"command_line_overrides","text":"
command_line_overrides(\n    request: FixtureRequest,\n) -> tuple[str, ...]\n

Fixture that makes it possible to specify command-line overrides to use in a given test.

Tests that require running an experiment should use the experiment_config fixture below.

Multiple test using the same overrides will use the same experiment.

"},{"location":"reference/project/conftest/#project.conftest.make_torch_deterministic","title":"make_torch_deterministic","text":"
make_torch_deterministic()\n

Set torch to deterministic mode for unit tests that use the tensor_regression fixture.

"},{"location":"reference/project/conftest/#project.conftest.pytest_runtest_makereport","title":"pytest_runtest_makereport","text":"
pytest_runtest_makereport(item: Function, call: CallInfo)\n

Used to setup the pytest.mark.incremental mark, as described in the pytest docs.

See this page

"},{"location":"reference/project/conftest/#project.conftest.pytest_runtest_setup","title":"pytest_runtest_setup","text":"
pytest_runtest_setup(item: Function)\n

Used to setup the pytest.mark.incremental mark, as described in this page.

"},{"location":"reference/project/conftest/#project.conftest.pytest_generate_tests","title":"pytest_generate_tests","text":"
pytest_generate_tests(metafunc: Metafunc) -> None\n

Allows one to define custom parametrization schemes or extensions.

This is used to implement the parametrize_when_used mark, which allows one to parametrize an argument when it is used.

See https://docs.pytest.org/en/7.1.x/how-to/parametrize.html#how-to-parametrize-fixtures-and-test-functions

"},{"location":"reference/project/experiment/","title":"Experiment","text":"

Module containing the functions which create experiment components from Hydra configs.

This is essentially just calling hydra.utils.instantiate on the datamodule, network, trainer, and algorithm configs in a certain order.

This also adds the instance_attr custom resolver, which allows you to retrieve an attribute of an instantiated object instead of a config.

"},{"location":"reference/project/experiment/#project.experiment.instantiate_datamodule","title":"instantiate_datamodule","text":"
instantiate_datamodule(\n    datamodule_config: (\n        Builds[type[LightningDataModule]]\n        | LightningDataModule\n        | None\n    ),\n) -> LightningDataModule | None\n

Instantiate the datamodule from the configuration dict.

Any interpolations in the config will have already been resolved by the time we get here.

"},{"location":"reference/project/experiment/#project.experiment.instantiate_algorithm","title":"instantiate_algorithm","text":"
instantiate_algorithm(\n    algorithm_config: Config,\n    datamodule: LightningDataModule | None,\n) -> LightningModule | JaxModule\n

Function used to instantiate the algorithm.

It is suggested that your algorithm (LightningModule) take in the datamodule and network as arguments, to make it easier to swap out different networks and datamodules during experiments.

The instantiated datamodule and network will be passed to the algorithm's constructor.

"},{"location":"reference/project/main/","title":"Main","text":"

Training script using Hydra.

This does the following: 1. Parses the config using Hydra; 2. Instantiated the components (trainer / algorithm), optionally datamodule and network; 3. Trains the model; 4. Optionally runs an evaluation loop.

"},{"location":"reference/project/main/#project.main.main","title":"main","text":"
main(dict_config: DictConfig) -> dict\n

Main entry point for training a model.

This does roughly the same thing as https://github.com/ashleve/lightning-hydra-template/blob/main/src/train.py

  1. Instantiates the experiment components from the Hydra configuration:
    • trainer
    • algorithm
    • datamodule (optional)
  2. Calls train to train the algorithm
  3. Calls evaluation to evaluate the model
  4. Returns the evaluation metrics.
"},{"location":"reference/project/main/#project.main.instantiate_values","title":"instantiate_values","text":"
instantiate_values(\n    config_dict: DictConfig | None,\n) -> list[Any] | None\n

Returns the list of objects at the values in this dict of configs.

This is used for the config of the trainer/logger and trainer/callbacks fields, where we can combine multiple config groups by adding entries in a dict.

For example, using trainer/logger=wandb and trainer/logger=tensorboard would result in a dict with wandb and tensorboard as keys, and the corresponding config groups as values.

This would then return a list with the instantiated WandbLogger and TensorBoardLogger objects.

"},{"location":"reference/project/main/#project.main.evaluate_lightningmodule","title":"evaluate_lightningmodule","text":"
evaluate_lightningmodule(\n    algorithm: LightningModule,\n    trainer: Trainer,\n    datamodule: LightningDataModule | None,\n) -> tuple[MetricName, float | None, dict]\n

Evaluates the algorithm and returns the metrics.

By default, if validation is to be performed, returns the validation error. Returns the training error when trainer.overfit_batches != 0 (e.g. when debugging or testing). Otherwise, if trainer.limit_val_batches == 0, returns the test error.

"},{"location":"reference/project/main/#project.main.get_error_from_metrics","title":"get_error_from_metrics","text":"
get_error_from_metrics(\n    metrics: _MetricsT,\n) -> tuple[str, float, dict]\n

Returns the main metric name, its value, and the full metrics dictionary.

"},{"location":"reference/project/main_test/","title":"Main test","text":""},{"location":"reference/project/main_test/#project.main_test.test_jax_can_use_the_GPU","title":"test_jax_can_use_the_GPU","text":"
test_jax_can_use_the_GPU()\n

Test that Jax can use the GPU if it we have one.

"},{"location":"reference/project/main_test/#project.main_test.test_torch_can_use_the_GPU","title":"test_torch_can_use_the_GPU","text":"
test_torch_can_use_the_GPU()\n

Test that torch can use the GPU if it we have one.

"},{"location":"reference/project/main_test/#project.main_test.test_setting_just_algorithm_isnt_enough","title":"test_setting_just_algorithm_isnt_enough","text":"
test_setting_just_algorithm_isnt_enough(\n    experiment_dictconfig: DictConfig,\n) -> None\n

Test to check that the datamodule is required (even when just the example algorithm is set).

TODO: We could probably move the datamodule config under algorithm/datamodule. Maybe that would be better?

"},{"location":"reference/project/main_test/#project.main_test.test_run_auto_schema_via_cli_without_errors","title":"test_run_auto_schema_via_cli_without_errors","text":"
test_run_auto_schema_via_cli_without_errors()\n

Checks that the command completes without errors.

"},{"location":"reference/project/algorithms/","title":"Algorithms","text":""},{"location":"reference/project/algorithms/#project.algorithms.ImageClassifier","title":"ImageClassifier","text":"

Bases: LightningModule

Example learning algorithm for image classification.

"},{"location":"reference/project/algorithms/#project.algorithms.ImageClassifier.__init__","title":"__init__","text":"
__init__(\n    datamodule: ImageClassificationDataModule,\n    network: HydraConfigFor[Module],\n    optimizer: HydraConfigFor[partial[Optimizer]],\n    init_seed: int = 42,\n)\n

Create a new instance of the algorithm.

Parameters:

Name Type Description Default datamodule ImageClassificationDataModule

Object used to load train/val/test data. See the lightning docs for LightningDataModule for more info.

required network HydraConfigFor[Module]

The config of the network to instantiate and train.

required optimizer HydraConfigFor[partial[Optimizer]]

The config for the Optimizer. Instantiating this will return a function (a functools.partial) that will create the Optimizer given the hyper-parameters.

required init_seed int

The seed to use when initializing the weights of the network.

42"},{"location":"reference/project/algorithms/#project.algorithms.ImageClassifier.forward","title":"forward","text":"
forward(input: Tensor) -> Tensor\n

Forward pass of the network.

"},{"location":"reference/project/algorithms/#project.algorithms.ImageClassifier.configure_optimizers","title":"configure_optimizers","text":"
configure_optimizers()\n

Creates the optimizers.

See lightning.pytorch.core.LightningModule.configure_optimizers for more information.

"},{"location":"reference/project/algorithms/#project.algorithms.ImageClassifier.configure_callbacks","title":"configure_callbacks","text":"
configure_callbacks() -> Sequence[Callback] | Callback\n

Creates callbacks to be used by default during training.

"},{"location":"reference/project/algorithms/#project.algorithms.JaxImageClassifier","title":"JaxImageClassifier","text":"

Bases: LightningModule

Example of a learning algorithm (LightningModule) that uses Jax.

In this case, the network is a flax.linen.Module, and its forward and backward passes are written in Jax, and the loss function is in pytorch.

"},{"location":"reference/project/algorithms/#project.algorithms.JaxImageClassifier.configure_optimizers","title":"configure_optimizers","text":"
configure_optimizers()\n

Creates the optimizers.

See lightning.pytorch.core.LightningModule.configure_optimizers for more information.

"},{"location":"reference/project/algorithms/#project.algorithms.JaxRLExample","title":"JaxRLExample","text":"

Bases: PyTreeNode, JaxModule[PPOState[TEnvState], TrajectoryWithLastObs, EvalMetrics], Generic[TEnvState, TEnvParams]

Example of an RL algorithm written in Jax: PPO, based on rejax.PPO.

"},{"location":"reference/project/algorithms/#project.algorithms.JaxRLExample--differences-wrt-rejaxppo","title":"Differences w.r.t. rejax.PPO:","text":"
  • The state / hparams are split into different, fully-typed structs:
    • The algorithm state is in a typed PPOState struct (vs an untyped, dynamically-generated struct in rejax).
    • The hyper-parameters are in a typed PPOHParams struct.
    • The state variables related to the collection of data from the environment is a TrajectoryCollectionState instead of everything being bunched up together.
      • This makes it easier to call the collect_episodes function with just what it needs.
  • The seeds for the networks and the environment data collection are separated.

The logic is exactly the same: The losses / updates are computed in the exact same way.

"},{"location":"reference/project/algorithms/#project.algorithms.JaxRLExample.training_step","title":"training_step","text":"
training_step(\n    batch_idx: int,\n    ts: PPOState[TEnvState],\n    batch: TrajectoryWithLastObs,\n)\n

Training step in pure jax.

"},{"location":"reference/project/algorithms/#project.algorithms.JaxRLExample.train","title":"train","text":"
train(\n    rng: Array,\n    train_state: PPOState[TEnvState] | None = None,\n    skip_initial_evaluation: bool = False,\n) -> tuple[PPOState[TEnvState], EvalMetrics]\n

Full training loop in jax.

This is only here to match the API of rejax.PPO.train. This doesn't get called when using the JaxTrainer, since JaxTrainer.fit already does the same thing, but also with support for some JaxCallbacks (as well as some lightning.Callbacks!).

Unfolded version of rejax.PPO.train.

"},{"location":"reference/project/algorithms/#project.algorithms.NoOp","title":"NoOp","text":"

Bases: LightningModule

Algorithm that does no learning and is used to benchmark the dataloading speed.

"},{"location":"reference/project/algorithms/#project.algorithms.TextClassifier","title":"TextClassifier","text":"

Bases: LightningModule

Example of a lightning module used to train a huggingface model for text classification.

"},{"location":"reference/project/algorithms/#project.algorithms.TextClassifier.configure_optimizers","title":"configure_optimizers","text":"
configure_optimizers()\n

Prepare optimizer and schedule (linear warmup and decay)

"},{"location":"reference/project/algorithms/image_classifier/","title":"Image classifier","text":"

Example of a simple algorithm for image classification.

This can be run from the command-line like so:

python project/main.py algorithm=image_classification datamodule=cifar10\n
"},{"location":"reference/project/algorithms/image_classifier/#project.algorithms.image_classifier.ImageClassifier","title":"ImageClassifier","text":"

Bases: LightningModule

Example learning algorithm for image classification.

"},{"location":"reference/project/algorithms/image_classifier/#project.algorithms.image_classifier.ImageClassifier.__init__","title":"__init__","text":"
__init__(\n    datamodule: ImageClassificationDataModule,\n    network: HydraConfigFor[Module],\n    optimizer: HydraConfigFor[partial[Optimizer]],\n    init_seed: int = 42,\n)\n

Create a new instance of the algorithm.

Parameters:

Name Type Description Default datamodule ImageClassificationDataModule

Object used to load train/val/test data. See the lightning docs for LightningDataModule for more info.

required network HydraConfigFor[Module]

The config of the network to instantiate and train.

required optimizer HydraConfigFor[partial[Optimizer]]

The config for the Optimizer. Instantiating this will return a function (a functools.partial) that will create the Optimizer given the hyper-parameters.

required init_seed int

The seed to use when initializing the weights of the network.

42"},{"location":"reference/project/algorithms/image_classifier/#project.algorithms.image_classifier.ImageClassifier.forward","title":"forward","text":"
forward(input: Tensor) -> Tensor\n

Forward pass of the network.

"},{"location":"reference/project/algorithms/image_classifier/#project.algorithms.image_classifier.ImageClassifier.configure_optimizers","title":"configure_optimizers","text":"
configure_optimizers()\n

Creates the optimizers.

See lightning.pytorch.core.LightningModule.configure_optimizers for more information.

"},{"location":"reference/project/algorithms/image_classifier/#project.algorithms.image_classifier.ImageClassifier.configure_callbacks","title":"configure_callbacks","text":"
configure_callbacks() -> Sequence[Callback] | Callback\n

Creates callbacks to be used by default during training.

"},{"location":"reference/project/algorithms/image_classifier_test/","title":"Image classifier test","text":"

Example showing how the test suite can be used to add tests for a new algorithm.

"},{"location":"reference/project/algorithms/image_classifier_test/#project.algorithms.image_classifier_test.TestImageClassifier","title":"TestImageClassifier","text":"

Bases: LightningModuleTests[ImageClassifier]

Tests for the ImageClassifier.

This runs all the tests included in the base class, with the given parametrizations:

  • algorithm_config will take the value \"image_classifier\"
    • This is because there is an image_classifier.yaml config file in project/configs/algorithms whose _target_ is the ImageClassifier.
  • datamodule_config will take these values: ['cifar10', 'fashion_mnist', 'imagenet', 'inaturalist', 'mnist']
    • These are all the configs whose target is an ImageClassificationDataModule.
  • Similarly, network_config will be parametrized by the names of all configs which produce an nn.Module, except those that would create a PreTrainedModel from HuggingFace.
    • This is currently the easiest way for us to say \"any network for image classification.

Take a look at the LightningModuleTests class if you want to see the actual test code.

"},{"location":"reference/project/algorithms/image_classifier_test/#project.algorithms.image_classifier_test.test_example_experiment_defaults","title":"test_example_experiment_defaults","text":"
test_example_experiment_defaults(\n    experiment_config: Config,\n) -> None\n

Test to check that the datamodule is required (even when just an algorithm is set?!).

"},{"location":"reference/project/algorithms/jax_image_classifier/","title":"Jax image classifier","text":""},{"location":"reference/project/algorithms/jax_image_classifier/#project.algorithms.jax_image_classifier.JaxCNN","title":"JaxCNN","text":"

Bases: Module

A simple CNN model.

Taken from https://flax.readthedocs.io/en/latest/quick_start.html#define-network

"},{"location":"reference/project/algorithms/jax_image_classifier/#project.algorithms.jax_image_classifier.JaxImageClassifier","title":"JaxImageClassifier","text":"

Bases: LightningModule

Example of a learning algorithm (LightningModule) that uses Jax.

In this case, the network is a flax.linen.Module, and its forward and backward passes are written in Jax, and the loss function is in pytorch.

"},{"location":"reference/project/algorithms/jax_image_classifier/#project.algorithms.jax_image_classifier.JaxImageClassifier.configure_optimizers","title":"configure_optimizers","text":"
configure_optimizers()\n

Creates the optimizers.

See lightning.pytorch.core.LightningModule.configure_optimizers for more information.

"},{"location":"reference/project/algorithms/jax_image_classifier_test/","title":"Jax image classifier test","text":""},{"location":"reference/project/algorithms/jax_image_classifier_test/#project.algorithms.jax_image_classifier_test.TestJaxImageClassifier","title":"TestJaxImageClassifier","text":"

Bases: LightningModuleTests[JaxImageClassifier]

Tests for the Jax image classification algorithm.

This simply reuses all the tests in the base test suite, specifying that the datamodule passed to the JaxImageClassifier should be for image classification and the network should be a flax.linen.Module.

"},{"location":"reference/project/algorithms/jax_image_classifier_test/#project.algorithms.jax_image_classifier_test.test_demo","title":"test_demo","text":"
test_demo(tmp_path: Path)\n

Test the demo at the bottom of the module.

"},{"location":"reference/project/algorithms/jax_ppo/","title":"Jax ppo","text":"

Example of an RL algorithm (PPO) written entirely in Jax.

This is based on rejax.PPO. See the JaxRLExample class for a description of the differences w.r.t. rejax.PPO.

"},{"location":"reference/project/algorithms/jax_ppo/#project.algorithms.jax_ppo.TEnvParams","title":"TEnvParams module-attribute","text":"
TEnvParams = TypeVar(\n    \"TEnvParams\", bound=EnvParams, default=EnvParams\n)\n

Type variable for the env params (gymnax.EnvParams).

"},{"location":"reference/project/algorithms/jax_ppo/#project.algorithms.jax_ppo.Trajectory","title":"Trajectory","text":"

Bases: PyTreeNode

A sequence of interactions between an agent and an environment.

"},{"location":"reference/project/algorithms/jax_ppo/#project.algorithms.jax_ppo.TrajectoryWithLastObs","title":"TrajectoryWithLastObs","text":"

Bases: PyTreeNode

Trajectory with the last observation and whether the last step is the end of an episode.

"},{"location":"reference/project/algorithms/jax_ppo/#project.algorithms.jax_ppo.AdvantageMinibatch","title":"AdvantageMinibatch","text":"

Bases: PyTreeNode

Annotated trajectories with advantages and targets for the critic.

"},{"location":"reference/project/algorithms/jax_ppo/#project.algorithms.jax_ppo.TrajectoryCollectionState","title":"TrajectoryCollectionState","text":"

Bases: Generic[TEnvState], PyTreeNode

Struct containing the state related to the collection of data from the environment.

"},{"location":"reference/project/algorithms/jax_ppo/#project.algorithms.jax_ppo.PPOState","title":"PPOState","text":"

Bases: Generic[TEnvState], PyTreeNode

Contains all the state of the JaxRLExample algorithm.

"},{"location":"reference/project/algorithms/jax_ppo/#project.algorithms.jax_ppo.PPOHParams","title":"PPOHParams","text":"

Bases: PyTreeNode

Hyper-parameters for this PPO example.

These are taken from rejax.PPO algorithm class.

"},{"location":"reference/project/algorithms/jax_ppo/#project.algorithms.jax_ppo.JaxRLExample","title":"JaxRLExample","text":"

Bases: PyTreeNode, JaxModule[PPOState[TEnvState], TrajectoryWithLastObs, EvalMetrics], Generic[TEnvState, TEnvParams]

Example of an RL algorithm written in Jax: PPO, based on rejax.PPO.

"},{"location":"reference/project/algorithms/jax_ppo/#project.algorithms.jax_ppo.JaxRLExample--differences-wrt-rejaxppo","title":"Differences w.r.t. rejax.PPO:","text":"
  • The state / hparams are split into different, fully-typed structs:
    • The algorithm state is in a typed PPOState struct (vs an untyped, dynamically-generated struct in rejax).
    • The hyper-parameters are in a typed PPOHParams struct.
    • The state variables related to the collection of data from the environment is a TrajectoryCollectionState instead of everything being bunched up together.
      • This makes it easier to call the collect_episodes function with just what it needs.
  • The seeds for the networks and the environment data collection are separated.

The logic is exactly the same: The losses / updates are computed in the exact same way.

"},{"location":"reference/project/algorithms/jax_ppo/#project.algorithms.jax_ppo.JaxRLExample.training_step","title":"training_step","text":"
training_step(\n    batch_idx: int,\n    ts: PPOState[TEnvState],\n    batch: TrajectoryWithLastObs,\n)\n

Training step in pure jax.

"},{"location":"reference/project/algorithms/jax_ppo/#project.algorithms.jax_ppo.JaxRLExample.train","title":"train","text":"
train(\n    rng: Array,\n    train_state: PPOState[TEnvState] | None = None,\n    skip_initial_evaluation: bool = False,\n) -> tuple[PPOState[TEnvState], EvalMetrics]\n

Full training loop in jax.

This is only here to match the API of rejax.PPO.train. This doesn't get called when using the JaxTrainer, since JaxTrainer.fit already does the same thing, but also with support for some JaxCallbacks (as well as some lightning.Callbacks!).

Unfolded version of rejax.PPO.train.

"},{"location":"reference/project/algorithms/jax_ppo_test/","title":"Jax ppo test","text":""},{"location":"reference/project/algorithms/jax_ppo_test/#project.algorithms.jax_ppo_test.PPOLightningModule","title":"PPOLightningModule","text":"

Bases: LightningModule

Uses the same code as JaxRLExample, but the training loop is run with pytorch-lightning.

This is currently only meant to be used to compare the difference fully-jitted training loop and lightning.

"},{"location":"reference/project/algorithms/jax_ppo_test/#project.algorithms.jax_ppo_test.RlThroughputCallback","title":"RlThroughputCallback","text":"

Bases: MeasureSamplesPerSecondCallback

A callback to measure the throughput of RL algorithms.

"},{"location":"reference/project/algorithms/jax_ppo_test/#project.algorithms.jax_ppo_test.test_rejax","title":"test_rejax","text":"
test_rejax(\n    rng: PRNGKey,\n    results_rejax: tuple[PPO, Any, EvalMetrics],\n    tensor_regression: TensorRegressionFixture,\n    original_datadir: Path,\n    seed: int | Sequence[int],\n)\n

Train rejax.PPO with the same parameters.

"},{"location":"reference/project/algorithms/llm_finetuning/","title":"Llm finetuning","text":"

Example: fine-tuning a language model (GPT, GPT-2, CTRL, OPT, etc.) on a text dataset.

Large chunks of the code here are taken from this example script in the transformers GitHub repository.

If you haven't already, you should definitely check out this walkthrough of that script from the HuggingFace docs.

"},{"location":"reference/project/algorithms/llm_finetuning/#project.algorithms.llm_finetuning.NetworkConfig","title":"NetworkConfig","text":"

Configuration options related to the choice of network.

When instantiated by Hydra, this calls the target function passed to the decorator. In this case, this creates pulls the pretrained network weights from the HuggingFace model hub.

"},{"location":"reference/project/algorithms/llm_finetuning/#project.algorithms.llm_finetuning.TokenizerConfig","title":"TokenizerConfig","text":"

Configuration options for the tokenizer.

"},{"location":"reference/project/algorithms/llm_finetuning/#project.algorithms.llm_finetuning.DatasetConfig","title":"DatasetConfig dataclass","text":"

Configuration options related to the dataset preparation.

"},{"location":"reference/project/algorithms/llm_finetuning/#project.algorithms.llm_finetuning.DatasetConfig.dataset_path","title":"dataset_path instance-attribute","text":"
dataset_path: str\n

Name of the dataset \"family\"?

For example, to load \"wikitext/wikitext-103-v1\", this would be \"wikitext\".

"},{"location":"reference/project/algorithms/llm_finetuning/#project.algorithms.llm_finetuning.DatasetConfig.dataset_name","title":"dataset_name class-attribute instance-attribute","text":"
dataset_name: str | None = None\n

Name of the specific dataset?

For example, to load \"wikitext/wikitext-103-v1\", this would be \"wikitext-103-v1\".

"},{"location":"reference/project/algorithms/llm_finetuning/#project.algorithms.llm_finetuning.DatasetConfig.validation_split_percentage","title":"validation_split_percentage class-attribute instance-attribute","text":"
validation_split_percentage: int = 10\n

Fraction of the train dataset to use for validation if there isn't already a validation split.

"},{"location":"reference/project/algorithms/llm_finetuning/#project.algorithms.llm_finetuning.LLMFinetuningExample","title":"LLMFinetuningExample","text":"

Bases: LightningModule

Example of a lightning module used to fine-tune a huggingface model.

"},{"location":"reference/project/algorithms/llm_finetuning/#project.algorithms.llm_finetuning.LLMFinetuningExample.setup","title":"setup","text":"
setup(stage: str)\n

Hook from Lightning that is called at the start of training, validation and testing.

TODO: Later perhaps we could do the preprocessing in a distributed manner like this: https://discuss.huggingface.co/t/how-to-save-datasets-as-distributed-with-save-to-disk/25674/2

"},{"location":"reference/project/algorithms/llm_finetuning/#project.algorithms.llm_finetuning.LLMFinetuningExample.configure_optimizers","title":"configure_optimizers","text":"
configure_optimizers()\n

Prepare optimizer and schedule (linear warmup and decay)

"},{"location":"reference/project/algorithms/llm_finetuning_test/","title":"Llm finetuning test","text":"

Unit tests for the llm finetuning example.

"},{"location":"reference/project/algorithms/llm_finetuning_test/#project.algorithms.llm_finetuning_test.TestLLMFinetuningExample","title":"TestLLMFinetuningExample","text":"

Bases: LightningModuleTests[LLMFinetuningExample]

Tests for the LLM fine-tuning example.

"},{"location":"reference/project/algorithms/no_op/","title":"No op","text":""},{"location":"reference/project/algorithms/no_op/#project.algorithms.no_op.NoOp","title":"NoOp","text":"

Bases: LightningModule

Algorithm that does no learning and is used to benchmark the dataloading speed.

"},{"location":"reference/project/algorithms/text_classifier/","title":"Text classifier","text":""},{"location":"reference/project/algorithms/text_classifier/#project.algorithms.text_classifier.TextClassifier","title":"TextClassifier","text":"

Bases: LightningModule

Example of a lightning module used to train a huggingface model for text classification.

"},{"location":"reference/project/algorithms/text_classifier/#project.algorithms.text_classifier.TextClassifier.configure_optimizers","title":"configure_optimizers","text":"
configure_optimizers()\n

Prepare optimizer and schedule (linear warmup and decay)

"},{"location":"reference/project/algorithms/text_classifier_test/","title":"Text classifier test","text":""},{"location":"reference/project/algorithms/text_classifier_test/#project.algorithms.text_classifier_test.TestTextClassifier","title":"TestTextClassifier","text":"

Bases: LightningModuleTests[TextClassifier]

Tests for the HF example.

"},{"location":"reference/project/algorithms/text_classifier_test/#project.algorithms.text_classifier_test.TestTextClassifier.test_overfit_batch","title":"test_overfit_batch","text":"
test_overfit_batch(\n    algorithm: TextClassifier,\n    datamodule: TextClassificationDataModule,\n    tmp_path: Path,\n    num_steps: int = 3,\n)\n

Test that the loss decreases on a single batch.

"},{"location":"reference/project/algorithms/callbacks/","title":"Callbacks","text":""},{"location":"reference/project/algorithms/callbacks/#project.algorithms.callbacks.ClassificationMetricsCallback","title":"ClassificationMetricsCallback","text":"

Bases: Callback

Callback that adds classification metrics to a LightningModule.

"},{"location":"reference/project/algorithms/callbacks/classification_metrics/","title":"Classification metrics","text":""},{"location":"reference/project/algorithms/callbacks/classification_metrics/#project.algorithms.callbacks.classification_metrics.ClassificationOutputs","title":"ClassificationOutputs","text":"

Bases: TypedDict

The outputs that should be minimally returned from the training/val/test_step of classification LightningModules so that metrics can be added aumatically by the ClassificationMetricsCallback.

"},{"location":"reference/project/algorithms/callbacks/classification_metrics/#project.algorithms.callbacks.classification_metrics.ClassificationOutputs.loss","title":"loss instance-attribute","text":"
loss: NotRequired[Tensor | float]\n

The loss at this step.

"},{"location":"reference/project/algorithms/callbacks/classification_metrics/#project.algorithms.callbacks.classification_metrics.ClassificationOutputs.logits","title":"logits instance-attribute","text":"
logits: Required[Tensor]\n

The un-normalized logits.

"},{"location":"reference/project/algorithms/callbacks/classification_metrics/#project.algorithms.callbacks.classification_metrics.ClassificationOutputs.y","title":"y instance-attribute","text":"
y: Required[Tensor]\n

The class labels.

"},{"location":"reference/project/algorithms/callbacks/classification_metrics/#project.algorithms.callbacks.classification_metrics.ClassificationMetricsCallback","title":"ClassificationMetricsCallback","text":"

Bases: Callback

Callback that adds classification metrics to a LightningModule.

"},{"location":"reference/project/algorithms/callbacks/samples_per_second/","title":"Samples per second","text":""},{"location":"reference/project/algorithms/testsuites/","title":"Testsuites","text":""},{"location":"reference/project/algorithms/testsuites/#project.algorithms.testsuites.LightningModuleTests","title":"LightningModuleTests","text":"

Bases: Generic[AlgorithmType], ABC

Suite of generic tests for a LightningModule.

Simply inherit from this class and decorate the class with the appropriate markers to get a set of decent unit tests that should apply to any LightningModule.

See the project.algorithms.image_classifier_test module for an example.

Other ideas: - pytest-benchmark for regression tests on forward / backward pass / training step speed - pytest-profiling for profiling the training step? (pytorch variant?) - Dataset splits: check some basic stats about the train/val/test inputs, are they somewhat similar? - Define the input as a space, check that the dataset samples are in that space and not too many samples are statistically OOD?

"},{"location":"reference/project/algorithms/testsuites/#project.algorithms.testsuites.LightningModuleTests.experiment_config","title":"experiment_config","text":"
experiment_config(\n    experiment_dictconfig: DictConfig,\n) -> Config\n

The experiment configuration, with all interpolations resolved.

"},{"location":"reference/project/algorithms/testsuites/#project.algorithms.testsuites.LightningModuleTests.algorithm","title":"algorithm","text":"
algorithm(\n    experiment_config: Config,\n    datamodule: LightningDataModule | None,\n    trainer: Trainer | JaxTrainer,\n    device: device,\n)\n

Fixture that creates the \"algorithm\" (a LightningModule).

"},{"location":"reference/project/algorithms/testsuites/#project.algorithms.testsuites.LightningModuleTests.make_torch_deterministic","title":"make_torch_deterministic","text":"
make_torch_deterministic()\n

Set torch to deterministic mode for unit tests that use the tensor_regression fixture.

"},{"location":"reference/project/algorithms/testsuites/#project.algorithms.testsuites.LightningModuleTests.seed","title":"seed","text":"
seed(request: FixtureRequest)\n

Fixture that seeds everything for reproducibility and yields the random seed used.

"},{"location":"reference/project/algorithms/testsuites/#project.algorithms.testsuites.LightningModuleTests.training_step_content","title":"training_step_content","text":"
training_step_content(\n    datamodule: LightningDataModule | None,\n    algorithm: AlgorithmType,\n    seed: int,\n    accelerator: str,\n    devices: int | list[int],\n    tmp_path_factory: TempPathFactory,\n)\n

Check that the backward pass is reproducible given the same weights, inputs and random seed.

"},{"location":"reference/project/algorithms/testsuites/#project.algorithms.testsuites.LightningModuleTests.test_initialization_is_reproducible","title":"test_initialization_is_reproducible","text":"
test_initialization_is_reproducible(\n    training_step_content: tuple[\n        AlgorithmType,\n        GetStuffFromFirstTrainingStep,\n        list[Any],\n        list[Any],\n    ],\n    tensor_regression: TensorRegressionFixture,\n    accelerator: str,\n)\n

Check that the network initialization is reproducible given the same random seed.

"},{"location":"reference/project/algorithms/testsuites/#project.algorithms.testsuites.LightningModuleTests.test_forward_pass_is_reproducible","title":"test_forward_pass_is_reproducible","text":"
test_forward_pass_is_reproducible(\n    training_step_content: tuple[\n        AlgorithmType,\n        GetStuffFromFirstTrainingStep,\n        list[Any],\n        list[Any],\n    ],\n    tensor_regression: TensorRegressionFixture,\n)\n

Check that the forward pass is reproducible given the same input and random seed.

"},{"location":"reference/project/algorithms/testsuites/#project.algorithms.testsuites.LightningModuleTests.test_backward_pass_is_reproducible","title":"test_backward_pass_is_reproducible","text":"
test_backward_pass_is_reproducible(\n    training_step_content: tuple[\n        AlgorithmType,\n        GetStuffFromFirstTrainingStep,\n        list[Any],\n        list[Any],\n    ],\n    tensor_regression: TensorRegressionFixture,\n    accelerator: str,\n)\n

Check that the backward pass is reproducible given the same weights, inputs and random seed.

"},{"location":"reference/project/algorithms/testsuites/#project.algorithms.testsuites.LightningModuleTests.forward_pass_input","title":"forward_pass_input","text":"
forward_pass_input(\n    training_batch: PyTree[Tensor], device: device\n)\n

Extracts the model input from a batch of data coming from the dataloader.

Overwrite this if your batches are not tuples of tensors (i.e. if your algorithm isn't a simple supervised learning algorithm like the example).

"},{"location":"reference/project/algorithms/testsuites/#project.algorithms.testsuites.LightningModuleTests.do_one_step_of_training","title":"do_one_step_of_training","text":"
do_one_step_of_training(\n    algorithm: AlgorithmType,\n    datamodule: LightningDataModule | None,\n    accelerator: str,\n    devices: int | list[int] | Literal[\"auto\"],\n    callbacks: list[Callback],\n    tmp_path: Path,\n)\n

Performs one step of training.

Overwrite this if you train your algorithm differently.

"},{"location":"reference/project/algorithms/testsuites/lightning_module_tests/","title":"Lightning module tests","text":"

Suite of tests for an a LightningModule.

See the project.algorithms.image_classifier_test module for an example of how to use this.

"},{"location":"reference/project/algorithms/testsuites/lightning_module_tests/#project.algorithms.testsuites.lightning_module_tests.LightningModuleTests","title":"LightningModuleTests","text":"

Bases: Generic[AlgorithmType], ABC

Suite of generic tests for a LightningModule.

Simply inherit from this class and decorate the class with the appropriate markers to get a set of decent unit tests that should apply to any LightningModule.

See the project.algorithms.image_classifier_test module for an example.

Other ideas: - pytest-benchmark for regression tests on forward / backward pass / training step speed - pytest-profiling for profiling the training step? (pytorch variant?) - Dataset splits: check some basic stats about the train/val/test inputs, are they somewhat similar? - Define the input as a space, check that the dataset samples are in that space and not too many samples are statistically OOD?

"},{"location":"reference/project/algorithms/testsuites/lightning_module_tests/#project.algorithms.testsuites.lightning_module_tests.LightningModuleTests.experiment_config","title":"experiment_config","text":"
experiment_config(\n    experiment_dictconfig: DictConfig,\n) -> Config\n

The experiment configuration, with all interpolations resolved.

"},{"location":"reference/project/algorithms/testsuites/lightning_module_tests/#project.algorithms.testsuites.lightning_module_tests.LightningModuleTests.algorithm","title":"algorithm","text":"
algorithm(\n    experiment_config: Config,\n    datamodule: LightningDataModule | None,\n    trainer: Trainer | JaxTrainer,\n    device: device,\n)\n

Fixture that creates the \"algorithm\" (a LightningModule).

"},{"location":"reference/project/algorithms/testsuites/lightning_module_tests/#project.algorithms.testsuites.lightning_module_tests.LightningModuleTests.make_torch_deterministic","title":"make_torch_deterministic","text":"
make_torch_deterministic()\n

Set torch to deterministic mode for unit tests that use the tensor_regression fixture.

"},{"location":"reference/project/algorithms/testsuites/lightning_module_tests/#project.algorithms.testsuites.lightning_module_tests.LightningModuleTests.seed","title":"seed","text":"
seed(request: FixtureRequest)\n

Fixture that seeds everything for reproducibility and yields the random seed used.

"},{"location":"reference/project/algorithms/testsuites/lightning_module_tests/#project.algorithms.testsuites.lightning_module_tests.LightningModuleTests.training_step_content","title":"training_step_content","text":"
training_step_content(\n    datamodule: LightningDataModule | None,\n    algorithm: AlgorithmType,\n    seed: int,\n    accelerator: str,\n    devices: int | list[int],\n    tmp_path_factory: TempPathFactory,\n)\n

Check that the backward pass is reproducible given the same weights, inputs and random seed.

"},{"location":"reference/project/algorithms/testsuites/lightning_module_tests/#project.algorithms.testsuites.lightning_module_tests.LightningModuleTests.test_initialization_is_reproducible","title":"test_initialization_is_reproducible","text":"
test_initialization_is_reproducible(\n    training_step_content: tuple[\n        AlgorithmType,\n        GetStuffFromFirstTrainingStep,\n        list[Any],\n        list[Any],\n    ],\n    tensor_regression: TensorRegressionFixture,\n    accelerator: str,\n)\n

Check that the network initialization is reproducible given the same random seed.

"},{"location":"reference/project/algorithms/testsuites/lightning_module_tests/#project.algorithms.testsuites.lightning_module_tests.LightningModuleTests.test_forward_pass_is_reproducible","title":"test_forward_pass_is_reproducible","text":"
test_forward_pass_is_reproducible(\n    training_step_content: tuple[\n        AlgorithmType,\n        GetStuffFromFirstTrainingStep,\n        list[Any],\n        list[Any],\n    ],\n    tensor_regression: TensorRegressionFixture,\n)\n

Check that the forward pass is reproducible given the same input and random seed.

"},{"location":"reference/project/algorithms/testsuites/lightning_module_tests/#project.algorithms.testsuites.lightning_module_tests.LightningModuleTests.test_backward_pass_is_reproducible","title":"test_backward_pass_is_reproducible","text":"
test_backward_pass_is_reproducible(\n    training_step_content: tuple[\n        AlgorithmType,\n        GetStuffFromFirstTrainingStep,\n        list[Any],\n        list[Any],\n    ],\n    tensor_regression: TensorRegressionFixture,\n    accelerator: str,\n)\n

Check that the backward pass is reproducible given the same weights, inputs and random seed.

"},{"location":"reference/project/algorithms/testsuites/lightning_module_tests/#project.algorithms.testsuites.lightning_module_tests.LightningModuleTests.forward_pass_input","title":"forward_pass_input","text":"
forward_pass_input(\n    training_batch: PyTree[Tensor], device: device\n)\n

Extracts the model input from a batch of data coming from the dataloader.

Overwrite this if your batches are not tuples of tensors (i.e. if your algorithm isn't a simple supervised learning algorithm like the example).

"},{"location":"reference/project/algorithms/testsuites/lightning_module_tests/#project.algorithms.testsuites.lightning_module_tests.LightningModuleTests.do_one_step_of_training","title":"do_one_step_of_training","text":"
do_one_step_of_training(\n    algorithm: AlgorithmType,\n    datamodule: LightningDataModule | None,\n    accelerator: str,\n    devices: int | list[int] | Literal[\"auto\"],\n    callbacks: list[Callback],\n    tmp_path: Path,\n)\n

Performs one step of training.

Overwrite this if you train your algorithm differently.

"},{"location":"reference/project/algorithms/testsuites/lightning_module_tests/#project.algorithms.testsuites.lightning_module_tests.GetStuffFromFirstTrainingStep","title":"GetStuffFromFirstTrainingStep","text":"

Bases: Callback

Callback used in tests to get things from the first call to training_step.

"},{"location":"reference/project/algorithms/testsuites/lightning_module_tests/#project.algorithms.testsuites.lightning_module_tests.convert_list_and_tuples_to_dicts","title":"convert_list_and_tuples_to_dicts","text":"
convert_list_and_tuples_to_dicts(value: Any) -> Any\n

Converts all lists and tuples in a nested structure to dictionaries.

convert_list_and_tuples_to_dicts([1, 2, 3]) {'0': 1, '1': 2, '2': 3} convert_list_and_tuples_to_dicts((1, 2, 3)) {'0': 1, '1': 2, '2': 3} convert_list_and_tuples_to_dicts({\"a\": [1, 2, 3], \"b\": (4, 5, 6)}) {'a': {'0': 1, '1': 2, '2': 3}, 'b': {'0': 4, '1': 5, '2': 6}}

"},{"location":"reference/project/configs/","title":"Configs","text":"

All the configuration classes for the project.

"},{"location":"reference/project/configs/#project.configs.Config","title":"Config dataclass","text":"

The options required for a run. This dataclass acts as a structure for the Hydra configs.

For more info, see https://hydra.cc/docs/tutorials/structured_config/schema/

"},{"location":"reference/project/configs/#project.configs.Config.algorithm","title":"algorithm instance-attribute","text":"
algorithm: Any\n

Configuration for the algorithm (a LightningModule).

It is suggested for this class to accept a datamodule and network as arguments. The instantiated datamodule and network will be passed to the algorithm's constructor.

For more info, see the instantiate_algorithm function.

"},{"location":"reference/project/configs/#project.configs.Config.datamodule","title":"datamodule class-attribute instance-attribute","text":"
datamodule: Optional[Any] = None\n

Configuration for the datamodule (dataset + transforms + dataloader creation).

This should normally create a LightningDataModule. See the MNISTDataModule for an example.

"},{"location":"reference/project/configs/#project.configs.Config.trainer","title":"trainer class-attribute instance-attribute","text":"
trainer: dict = field(default_factory=dict)\n

Keyword arguments for the Trainer constructor.

"},{"location":"reference/project/configs/#project.configs.Config.log_level","title":"log_level class-attribute instance-attribute","text":"
log_level: str = 'info'\n

Logging level.

"},{"location":"reference/project/configs/#project.configs.Config.seed","title":"seed class-attribute instance-attribute","text":"
seed: int = field(\n    default_factory=lambda: randint(0, int(100000.0))\n)\n

Random seed for reproducibility.

If None, a random seed is generated.

"},{"location":"reference/project/configs/#project.configs.Config.ckpt_path","title":"ckpt_path class-attribute instance-attribute","text":"
ckpt_path: str | None = None\n

Path to a checkpoint to load the training state and resume the training run.

This is the same as the ckpt_path argument in the lightning.Trainer.fit method.

"},{"location":"reference/project/configs/#project.configs.add_configs_to_hydra_store","title":"add_configs_to_hydra_store","text":"
add_configs_to_hydra_store()\n

Adds all configs to the Hydra Config store.

"},{"location":"reference/project/configs/config/","title":"Config","text":""},{"location":"reference/project/configs/config/#project.configs.config.Config","title":"Config dataclass","text":"

The options required for a run. This dataclass acts as a structure for the Hydra configs.

For more info, see https://hydra.cc/docs/tutorials/structured_config/schema/

"},{"location":"reference/project/configs/config/#project.configs.config.Config.algorithm","title":"algorithm instance-attribute","text":"
algorithm: Any\n

Configuration for the algorithm (a LightningModule).

It is suggested for this class to accept a datamodule and network as arguments. The instantiated datamodule and network will be passed to the algorithm's constructor.

For more info, see the instantiate_algorithm function.

"},{"location":"reference/project/configs/config/#project.configs.config.Config.datamodule","title":"datamodule class-attribute instance-attribute","text":"
datamodule: Optional[Any] = None\n

Configuration for the datamodule (dataset + transforms + dataloader creation).

This should normally create a LightningDataModule. See the MNISTDataModule for an example.

"},{"location":"reference/project/configs/config/#project.configs.config.Config.trainer","title":"trainer class-attribute instance-attribute","text":"
trainer: dict = field(default_factory=dict)\n

Keyword arguments for the Trainer constructor.

"},{"location":"reference/project/configs/config/#project.configs.config.Config.log_level","title":"log_level class-attribute instance-attribute","text":"
log_level: str = 'info'\n

Logging level.

"},{"location":"reference/project/configs/config/#project.configs.config.Config.seed","title":"seed class-attribute instance-attribute","text":"
seed: int = field(\n    default_factory=lambda: randint(0, int(100000.0))\n)\n

Random seed for reproducibility.

If None, a random seed is generated.

"},{"location":"reference/project/configs/config/#project.configs.config.Config.ckpt_path","title":"ckpt_path class-attribute instance-attribute","text":"
ckpt_path: str | None = None\n

Path to a checkpoint to load the training state and resume the training run.

This is the same as the ckpt_path argument in the lightning.Trainer.fit method.

"},{"location":"reference/project/configs/config_test/","title":"Config test","text":"

TODO: Add tests for the configurations?

"},{"location":"reference/project/configs/config_test/#project.configs.config_test.test_can_use_algo_that_doesnt_use_a_datamodule","title":"test_can_use_algo_that_doesnt_use_a_datamodule","text":"
test_can_use_algo_that_doesnt_use_a_datamodule(\n    register_dummy_configs: None, algorithm: LightningModule\n)\n

Test that we can use an algorithm without a datamodule.

"},{"location":"reference/project/configs/algorithm/","title":"Algorithm","text":"

Configs for algorithms.

"},{"location":"reference/project/configs/algorithm/lr_scheduler/","title":"Lr scheduler","text":"

Configs for learning rate schedulers.

You can add configurations either with a config file or in code using hydra-zen.builds.

"},{"location":"reference/project/configs/algorithm/network/","title":"Network","text":""},{"location":"reference/project/configs/algorithm/optimizer/","title":"Optimizer","text":"

Configurations for optimizers.

You can add configurations either with a config file or by registering structured configs in code.

Here is an example of how you could register a new configuration in code using hydra-zen.builds:

import hydra_zen\nfrom torch.optim import Adam  # type: ignore\n\noptimizers_store = hydra_zen.store(group=\"algorithm/optimizer\")\n\nAdamConfig = optimizers_store(\n    hydra_zen.builds(\n        Adam,\n        zen_partial=True,\n        populate_full_signature=True,\n        zen_exclude=[\"params\"],\n        zen_dataclass={\"cls_name\": \"AdamConfig\", \"frozen\": False},\n    ),\n    name=\"base_adam\",\n)\n

From the command-line, you can select both configs that are yaml files as well as structured config (dataclasses).

This works the same way as creating config files for each optimizer under configs/algorithm/optimizer. Config files can also use structured configs in their defaults list.

"},{"location":"reference/project/configs/datamodule/","title":"Datamodule","text":""},{"location":"reference/project/datamodules/","title":"Datamodules","text":"

Datamodules (datasets + preprocessing + dataloading)

See the lightning.LightningDataModule class for more information.

"},{"location":"reference/project/datamodules/#project.datamodules.ImageClassificationDataModule","title":"ImageClassificationDataModule","text":"

Bases: VisionDataModule[ImageBatchType], ClassificationDataModule[ImageBatchType]

Lightning data modules for image classification.

"},{"location":"reference/project/datamodules/#project.datamodules.ImageClassificationDataModule.num_classes","title":"num_classes instance-attribute","text":"
num_classes: int\n

Number of classes in the dataset.

"},{"location":"reference/project/datamodules/#project.datamodules.ImageClassificationDataModule.dims","title":"dims instance-attribute","text":"
dims: tuple[C, H, W]\n

A tuple describing the shape of the data.

"},{"location":"reference/project/datamodules/#project.datamodules.CIFAR10DataModule","title":"CIFAR10DataModule","text":"

Bases: ImageClassificationDataModule

.. figure:: https://3qeqpr26caki16dnhd19sv6by6v-wpengine.netdna-ssl.com/wp-content/uploads/2019/01/ Plot-of-a-Subset-of-Images-from-the-CIFAR-10-Dataset.png :width: 400 :alt: CIFAR-10

Specs
  • 10 classes (1 per class)
  • Each image is (3 x 32 x 32)

Standard CIFAR10, train, val, test splits and transforms

Transforms::

transforms = transform_lib.Compose([\n    transform_lib.ToImage(),\n    transform_lib.ToDtype(torch.float32, scale=True),\n    transform_lib.Normalize(\n        mean=[x / 255.0 for x in [125.3, 123.0, 113.9]],\n        std=[x / 255.0 for x in [63.0, 62.1, 66.7]]\n    )\n])\n

Example::

from pl_bolts.datamodules import CIFAR10DataModule\n\ndm = CIFAR10DataModule(PATH)\nmodel = LitModel()\n\nTrainer().fit(model, datamodule=dm)\n

Or you can set your own transforms

Example::

dm.train_transforms = ...\ndm.test_transforms = ...\ndm.val_transforms  = ...\n
"},{"location":"reference/project/datamodules/#project.datamodules.FashionMNISTDataModule","title":"FashionMNISTDataModule","text":"

Bases: MNISTDataModule

.. figure:: https://storage.googleapis.com/kaggle-datasets-images/2243/3791/9384af51de8baa77f6320901f53bd26b/dataset-cover.png :width: 400 :alt: Fashion MNIST

Specs
  • 10 classes (1 per type)
  • Each image is (1 x 28 x 28)

Standard FashionMNIST, train, val, test splits and transforms

Transforms::

mnist_transforms = transform_lib.Compose([\n    transform_lib.ToTensor()\n])\n

Example::

from pl_bolts.datamodules import FashionMNISTDataModule\n\ndm = FashionMNISTDataModule('.')\nmodel = LitModel()\n\nTrainer().fit(model, datamodule=dm)\n
"},{"location":"reference/project/datamodules/#project.datamodules.ImageNetDataModule","title":"ImageNetDataModule","text":"

Bases: ImageClassificationDataModule

ImageNet datamodule.

Extracted from https://github.com/Lightning-Universe/lightning-bolts/blob/master/src/pl_bolts/datamodules/imagenet_datamodule.py - Made this a subclass of VisionDataModule

Notes:

  • train_dataloader uses the train split of imagenet2012 and puts away a portion of it for the validation split.
  • val_dataloader uses the part of the train split of imagenet2012 that was not used for training via num_imgs_per_val_class
  • test_dataloader uses the validation split of imagenet2012 for testing.
    • TODO: need to pass num_imgs_per_class=-1 for test dataset and split=\"test\".
"},{"location":"reference/project/datamodules/#project.datamodules.ImageNetDataModule.name","title":"name class-attribute instance-attribute","text":"
name: str | None = 'imagenet'\n

Dataset name.

"},{"location":"reference/project/datamodules/#project.datamodules.ImageNetDataModule.dataset_cls","title":"dataset_cls class-attribute","text":"
dataset_cls: type[VisionDataset] = ImageNet\n

Dataset class to use.

"},{"location":"reference/project/datamodules/#project.datamodules.ImageNetDataModule.dims","title":"dims class-attribute instance-attribute","text":"
dims: tuple[C, H, W] = (\n    C(3),\n    H(image_size),\n    W(image_size),\n)\n

A tuple describing the shape of the data.

"},{"location":"reference/project/datamodules/#project.datamodules.ImageNetDataModule.__init__","title":"__init__","text":"
__init__(\n    data_dir: str | Path = DATA_DIR,\n    *,\n    val_split: int | float = 0.01,\n    num_workers: int = NUM_WORKERS,\n    normalize: bool = False,\n    image_size: int = 224,\n    batch_size: int = 32,\n    seed: int = 42,\n    shuffle: bool = True,\n    pin_memory: bool = True,\n    drop_last: bool = False,\n    train_transforms: Callable | None = None,\n    val_transforms: Callable | None = None,\n    test_transforms: Callable | None = None,\n    **kwargs\n)\n

Creates an ImageNet datamodule (doesn't load or prepare the dataset yet).

Parameters:

Name Type Description Default data_dir str | Path

path to the imagenet dataset file

DATA_DIR val_split int | float

save val_split% of the training data of each class for validation.

0.01 image_size int

final image size

224 num_workers int

how many data workers

NUM_WORKERS batch_size int

batch_size

32 shuffle bool

If true shuffles the data every epoch

True pin_memory bool

If true, the data loader will copy Tensors into CUDA pinned memory before returning them

True drop_last bool

If true drops the last incomplete batch

False"},{"location":"reference/project/datamodules/#project.datamodules.ImageNetDataModule.train_transform","title":"train_transform","text":"
train_transform() -> Module\n

The standard imagenet transforms.

transforms.Compose([\n    transforms.RandomResizedCrop(self.image_size),\n    transforms.RandomHorizontalFlip(),\n    transforms.ToTensor(),\n    transforms.Normalize(\n        mean=[0.485, 0.456, 0.406],\n        std=[0.229, 0.224, 0.225]\n    ),\n])\n
"},{"location":"reference/project/datamodules/#project.datamodules.ImageNetDataModule.val_transform","title":"val_transform","text":"
val_transform() -> Compose\n

The standard imagenet transforms for validation.

.. code-block:: python

transforms.Compose([\n    transforms.Resize(self.image_size + 32),\n    transforms.CenterCrop(self.image_size),\n    transforms.ToTensor(),\n    transforms.Normalize(\n        mean=[0.485, 0.456, 0.406],\n        std=[0.229, 0.224, 0.225]\n    ),\n])\n
"},{"location":"reference/project/datamodules/#project.datamodules.INaturalistDataModule","title":"INaturalistDataModule","text":"

Bases: VisionDataModule

"},{"location":"reference/project/datamodules/#project.datamodules.INaturalistDataModule.name","title":"name class-attribute instance-attribute","text":"
name: str | None = 'inaturalist'\n

Dataset name.

"},{"location":"reference/project/datamodules/#project.datamodules.INaturalistDataModule.dataset_cls","title":"dataset_cls class-attribute","text":"
dataset_cls: type[VisionDataset] = INaturalist\n

Dataset class to use.

"},{"location":"reference/project/datamodules/#project.datamodules.INaturalistDataModule.dims","title":"dims class-attribute instance-attribute","text":"
dims: tuple[C, H, W] = (C(3), H(224), W(224))\n

A tuple describing the shape of the data.

"},{"location":"reference/project/datamodules/#project.datamodules.INaturalistDataModule.default_transforms","title":"default_transforms","text":"
default_transforms() -> Callable\n

Default transform for the dataset.

"},{"location":"reference/project/datamodules/#project.datamodules.MNISTDataModule","title":"MNISTDataModule","text":"

Bases: ImageClassificationDataModule

.. figure:: https://miro.medium.com/max/744/1*AO2rIhzRYzFVQlFLx9DM9A.png :width: 400 :alt: MNIST

Specs
  • 10 classes (1 per digit)
  • Each image is (1 x 28 x 28)

Standard MNIST, train, val, test splits and transforms

Transforms::

mnist_transforms = transform_lib.Compose([\n    transform_lib.ToTensor()\n])\n

Example::

from pl_bolts.datamodules import MNISTDataModule\n\ndm = MNISTDataModule('.')\nmodel = LitModel()\n\nTrainer().fit(model, datamodule=dm)\n
"},{"location":"reference/project/datamodules/#project.datamodules.MNISTDataModule.__init__","title":"__init__","text":"
__init__(\n    data_dir: str | Path = DATA_DIR,\n    val_split: int | float = 0.2,\n    num_workers: int = 0,\n    normalize: bool = False,\n    batch_size: int = 32,\n    seed: int = 42,\n    shuffle: bool = True,\n    pin_memory: bool = True,\n    drop_last: bool = False,\n    *args: Any,\n    **kwargs: Any\n) -> None\n

Parameters:

Name Type Description Default data_dir str | Path

Where to save/load the data

DATA_DIR val_split int | float

Percent (float) or number (int) of samples to use for the validation split

0.2 num_workers int

How many workers to use for loading data

0 normalize bool

If true applies image normalize

False batch_size int

How many samples per batch to load

32 seed int

Random seed to be used for train/val/test splits

42 shuffle bool

If true shuffles the train data every epoch

True pin_memory bool

If true, the data loader will copy Tensors into CUDA pinned memory before returning them

True drop_last bool

If true drops the last incomplete batch

False"},{"location":"reference/project/datamodules/#project.datamodules.TextClassificationDataModule","title":"TextClassificationDataModule","text":"

Bases: LightningDataModule

Lightning data module for HF text classification datasets.

This is based on this tutorial: https://lightning.ai/docs/pytorch/stable/notebooks/lightning_examples/text-transformers.html

"},{"location":"reference/project/datamodules/#project.datamodules.VisionDataModule","title":"VisionDataModule","text":"

Bases: LightningDataModule, DataModule[BatchType_co]

A LightningDataModule for image datasets.

(Taken from pl_bolts which is not very well maintained.)

"},{"location":"reference/project/datamodules/#project.datamodules.VisionDataModule.name","title":"name class-attribute instance-attribute","text":"
name: str | None = ''\n

Dataset name.

"},{"location":"reference/project/datamodules/#project.datamodules.VisionDataModule.dataset_cls","title":"dataset_cls class-attribute","text":"
dataset_cls: type[VisionDataset]\n

Dataset class to use.

"},{"location":"reference/project/datamodules/#project.datamodules.VisionDataModule.dims","title":"dims instance-attribute","text":"
dims: tuple[C, H, W]\n

A tuple describing the shape of the data.

"},{"location":"reference/project/datamodules/#project.datamodules.VisionDataModule.__init__","title":"__init__","text":"
__init__(\n    data_dir: str | Path = DATA_DIR,\n    val_split: int | float = 0.2,\n    num_workers: int = NUM_WORKERS,\n    normalize: bool = False,\n    batch_size: int = 32,\n    seed: int = 42,\n    shuffle: bool = True,\n    pin_memory: bool = True,\n    drop_last: bool = False,\n    train_transforms: Callable | None = None,\n    val_transforms: Callable | None = None,\n    test_transforms: Callable | None = None,\n    **kwargs\n) -> None\n

Parameters:

Name Type Description Default data_dir str | Path

Where to save/load the data

DATA_DIR val_split int | float

Percent (float) or number (int) of samples to use for the validation split

0.2 num_workers int

How many workers to use for loading data

NUM_WORKERS normalize bool

If true applies image normalize

False batch_size int

How many samples per batch to load

32 seed int

Random seed to be used for train/val/test splits

42 shuffle bool

If true shuffles the train data every epoch

True pin_memory bool

If true, the data loader will copy Tensors into CUDA pinned memory before returning them

True drop_last bool

If true drops the last incomplete batch

False train_transforms Callable | None

transformations you can apply to train dataset

None val_transforms Callable | None

transformations you can apply to validation dataset

None test_transforms Callable | None

transformations you can apply to test dataset

None"},{"location":"reference/project/datamodules/#project.datamodules.VisionDataModule.prepare_data","title":"prepare_data","text":"
prepare_data() -> None\n

Saves files to data_dir.

"},{"location":"reference/project/datamodules/#project.datamodules.VisionDataModule.default_transforms","title":"default_transforms abstractmethod","text":"
default_transforms() -> Callable\n

Default transform for the dataset.

"},{"location":"reference/project/datamodules/#project.datamodules.VisionDataModule.train_dataloader","title":"train_dataloader","text":"
train_dataloader(\n    _dataloader_fn: Callable[\n        Concatenate[Dataset, P], DataLoader\n    ] = DataLoader,\n    *args: args,\n    **kwargs: kwargs\n) -> DataLoader\n

The train dataloader.

"},{"location":"reference/project/datamodules/#project.datamodules.VisionDataModule.val_dataloader","title":"val_dataloader","text":"
val_dataloader(\n    _dataloader_fn: Callable[\n        Concatenate[Dataset, P], DataLoader\n    ] = DataLoader,\n    *args: args,\n    **kwargs: kwargs\n) -> DataLoader\n

The val dataloader.

"},{"location":"reference/project/datamodules/#project.datamodules.VisionDataModule.test_dataloader","title":"test_dataloader","text":"
test_dataloader(\n    _dataloader_fn: Callable[\n        Concatenate[Dataset, P], DataLoader\n    ] = DataLoader,\n    *args: args,\n    **kwargs: kwargs\n) -> DataLoader\n

The test dataloader.

"},{"location":"reference/project/datamodules/datamodules_test/","title":"Datamodules test","text":""},{"location":"reference/project/datamodules/vision/","title":"Vision","text":""},{"location":"reference/project/datamodules/vision/#project.datamodules.vision.VisionDataModule","title":"VisionDataModule","text":"

Bases: LightningDataModule, DataModule[BatchType_co]

A LightningDataModule for image datasets.

(Taken from pl_bolts which is not very well maintained.)

"},{"location":"reference/project/datamodules/vision/#project.datamodules.vision.VisionDataModule.name","title":"name class-attribute instance-attribute","text":"
name: str | None = ''\n

Dataset name.

"},{"location":"reference/project/datamodules/vision/#project.datamodules.vision.VisionDataModule.dataset_cls","title":"dataset_cls class-attribute","text":"
dataset_cls: type[VisionDataset]\n

Dataset class to use.

"},{"location":"reference/project/datamodules/vision/#project.datamodules.vision.VisionDataModule.dims","title":"dims instance-attribute","text":"
dims: tuple[C, H, W]\n

A tuple describing the shape of the data.

"},{"location":"reference/project/datamodules/vision/#project.datamodules.vision.VisionDataModule.__init__","title":"__init__","text":"
__init__(\n    data_dir: str | Path = DATA_DIR,\n    val_split: int | float = 0.2,\n    num_workers: int = NUM_WORKERS,\n    normalize: bool = False,\n    batch_size: int = 32,\n    seed: int = 42,\n    shuffle: bool = True,\n    pin_memory: bool = True,\n    drop_last: bool = False,\n    train_transforms: Callable | None = None,\n    val_transforms: Callable | None = None,\n    test_transforms: Callable | None = None,\n    **kwargs\n) -> None\n

Parameters:

Name Type Description Default data_dir str | Path

Where to save/load the data

DATA_DIR val_split int | float

Percent (float) or number (int) of samples to use for the validation split

0.2 num_workers int

How many workers to use for loading data

NUM_WORKERS normalize bool

If true applies image normalize

False batch_size int

How many samples per batch to load

32 seed int

Random seed to be used for train/val/test splits

42 shuffle bool

If true shuffles the train data every epoch

True pin_memory bool

If true, the data loader will copy Tensors into CUDA pinned memory before returning them

True drop_last bool

If true drops the last incomplete batch

False train_transforms Callable | None

transformations you can apply to train dataset

None val_transforms Callable | None

transformations you can apply to validation dataset

None test_transforms Callable | None

transformations you can apply to test dataset

None"},{"location":"reference/project/datamodules/vision/#project.datamodules.vision.VisionDataModule.prepare_data","title":"prepare_data","text":"
prepare_data() -> None\n

Saves files to data_dir.

"},{"location":"reference/project/datamodules/vision/#project.datamodules.vision.VisionDataModule.default_transforms","title":"default_transforms abstractmethod","text":"
default_transforms() -> Callable\n

Default transform for the dataset.

"},{"location":"reference/project/datamodules/vision/#project.datamodules.vision.VisionDataModule.train_dataloader","title":"train_dataloader","text":"
train_dataloader(\n    _dataloader_fn: Callable[\n        Concatenate[Dataset, P], DataLoader\n    ] = DataLoader,\n    *args: args,\n    **kwargs: kwargs\n) -> DataLoader\n

The train dataloader.

"},{"location":"reference/project/datamodules/vision/#project.datamodules.vision.VisionDataModule.val_dataloader","title":"val_dataloader","text":"
val_dataloader(\n    _dataloader_fn: Callable[\n        Concatenate[Dataset, P], DataLoader\n    ] = DataLoader,\n    *args: args,\n    **kwargs: kwargs\n) -> DataLoader\n

The val dataloader.

"},{"location":"reference/project/datamodules/vision/#project.datamodules.vision.VisionDataModule.test_dataloader","title":"test_dataloader","text":"
test_dataloader(\n    _dataloader_fn: Callable[\n        Concatenate[Dataset, P], DataLoader\n    ] = DataLoader,\n    *args: args,\n    **kwargs: kwargs\n) -> DataLoader\n

The test dataloader.

"},{"location":"reference/project/datamodules/image_classification/","title":"Image classification","text":""},{"location":"reference/project/datamodules/image_classification/#project.datamodules.image_classification.ImageClassificationDataModule","title":"ImageClassificationDataModule","text":"

Bases: VisionDataModule[ImageBatchType], ClassificationDataModule[ImageBatchType]

Lightning data modules for image classification.

"},{"location":"reference/project/datamodules/image_classification/#project.datamodules.image_classification.ImageClassificationDataModule.num_classes","title":"num_classes instance-attribute","text":"
num_classes: int\n

Number of classes in the dataset.

"},{"location":"reference/project/datamodules/image_classification/#project.datamodules.image_classification.ImageClassificationDataModule.dims","title":"dims instance-attribute","text":"
dims: tuple[C, H, W]\n

A tuple describing the shape of the data.

"},{"location":"reference/project/datamodules/image_classification/cifar10/","title":"Cifar10","text":""},{"location":"reference/project/datamodules/image_classification/cifar10/#project.datamodules.image_classification.cifar10.CIFAR10DataModule","title":"CIFAR10DataModule","text":"

Bases: ImageClassificationDataModule

.. figure:: https://3qeqpr26caki16dnhd19sv6by6v-wpengine.netdna-ssl.com/wp-content/uploads/2019/01/ Plot-of-a-Subset-of-Images-from-the-CIFAR-10-Dataset.png :width: 400 :alt: CIFAR-10

Specs
  • 10 classes (1 per class)
  • Each image is (3 x 32 x 32)

Standard CIFAR10, train, val, test splits and transforms

Transforms::

transforms = transform_lib.Compose([\n    transform_lib.ToImage(),\n    transform_lib.ToDtype(torch.float32, scale=True),\n    transform_lib.Normalize(\n        mean=[x / 255.0 for x in [125.3, 123.0, 113.9]],\n        std=[x / 255.0 for x in [63.0, 62.1, 66.7]]\n    )\n])\n

Example::

from pl_bolts.datamodules import CIFAR10DataModule\n\ndm = CIFAR10DataModule(PATH)\nmodel = LitModel()\n\nTrainer().fit(model, datamodule=dm)\n

Or you can set your own transforms

Example::

dm.train_transforms = ...\ndm.test_transforms = ...\ndm.val_transforms  = ...\n
"},{"location":"reference/project/datamodules/image_classification/fashion_mnist/","title":"Fashion mnist","text":""},{"location":"reference/project/datamodules/image_classification/fashion_mnist/#project.datamodules.image_classification.fashion_mnist.FashionMNISTDataModule","title":"FashionMNISTDataModule","text":"

Bases: MNISTDataModule

.. figure:: https://storage.googleapis.com/kaggle-datasets-images/2243/3791/9384af51de8baa77f6320901f53bd26b/dataset-cover.png :width: 400 :alt: Fashion MNIST

Specs
  • 10 classes (1 per type)
  • Each image is (1 x 28 x 28)

Standard FashionMNIST, train, val, test splits and transforms

Transforms::

mnist_transforms = transform_lib.Compose([\n    transform_lib.ToTensor()\n])\n

Example::

from pl_bolts.datamodules import FashionMNISTDataModule\n\ndm = FashionMNISTDataModule('.')\nmodel = LitModel()\n\nTrainer().fit(model, datamodule=dm)\n
"},{"location":"reference/project/datamodules/image_classification/image_classification/","title":"Image classification","text":""},{"location":"reference/project/datamodules/image_classification/image_classification/#project.datamodules.image_classification.image_classification.ImageClassificationDataModule","title":"ImageClassificationDataModule","text":"

Bases: VisionDataModule[ImageBatchType], ClassificationDataModule[ImageBatchType]

Lightning data modules for image classification.

"},{"location":"reference/project/datamodules/image_classification/image_classification/#project.datamodules.image_classification.image_classification.ImageClassificationDataModule.num_classes","title":"num_classes instance-attribute","text":"
num_classes: int\n

Number of classes in the dataset.

"},{"location":"reference/project/datamodules/image_classification/image_classification/#project.datamodules.image_classification.image_classification.ImageClassificationDataModule.dims","title":"dims instance-attribute","text":"
dims: tuple[C, H, W]\n

A tuple describing the shape of the data.

"},{"location":"reference/project/datamodules/image_classification/imagenet/","title":"Imagenet","text":""},{"location":"reference/project/datamodules/image_classification/imagenet/#project.datamodules.image_classification.imagenet.ImageNetDataModule","title":"ImageNetDataModule","text":"

Bases: ImageClassificationDataModule

ImageNet datamodule.

Extracted from https://github.com/Lightning-Universe/lightning-bolts/blob/master/src/pl_bolts/datamodules/imagenet_datamodule.py - Made this a subclass of VisionDataModule

Notes:

  • train_dataloader uses the train split of imagenet2012 and puts away a portion of it for the validation split.
  • val_dataloader uses the part of the train split of imagenet2012 that was not used for training via num_imgs_per_val_class
  • test_dataloader uses the validation split of imagenet2012 for testing.
    • TODO: need to pass num_imgs_per_class=-1 for test dataset and split=\"test\".
"},{"location":"reference/project/datamodules/image_classification/imagenet/#project.datamodules.image_classification.imagenet.ImageNetDataModule.name","title":"name class-attribute instance-attribute","text":"
name: str | None = 'imagenet'\n

Dataset name.

"},{"location":"reference/project/datamodules/image_classification/imagenet/#project.datamodules.image_classification.imagenet.ImageNetDataModule.dataset_cls","title":"dataset_cls class-attribute","text":"
dataset_cls: type[VisionDataset] = ImageNet\n

Dataset class to use.

"},{"location":"reference/project/datamodules/image_classification/imagenet/#project.datamodules.image_classification.imagenet.ImageNetDataModule.dims","title":"dims class-attribute instance-attribute","text":"
dims: tuple[C, H, W] = (\n    C(3),\n    H(image_size),\n    W(image_size),\n)\n

A tuple describing the shape of the data.

"},{"location":"reference/project/datamodules/image_classification/imagenet/#project.datamodules.image_classification.imagenet.ImageNetDataModule.__init__","title":"__init__","text":"
__init__(\n    data_dir: str | Path = DATA_DIR,\n    *,\n    val_split: int | float = 0.01,\n    num_workers: int = NUM_WORKERS,\n    normalize: bool = False,\n    image_size: int = 224,\n    batch_size: int = 32,\n    seed: int = 42,\n    shuffle: bool = True,\n    pin_memory: bool = True,\n    drop_last: bool = False,\n    train_transforms: Callable | None = None,\n    val_transforms: Callable | None = None,\n    test_transforms: Callable | None = None,\n    **kwargs\n)\n

Creates an ImageNet datamodule (doesn't load or prepare the dataset yet).

Parameters:

Name Type Description Default data_dir str | Path

path to the imagenet dataset file

DATA_DIR val_split int | float

save val_split% of the training data of each class for validation.

0.01 image_size int

final image size

224 num_workers int

how many data workers

NUM_WORKERS batch_size int

batch_size

32 shuffle bool

If true shuffles the data every epoch

True pin_memory bool

If true, the data loader will copy Tensors into CUDA pinned memory before returning them

True drop_last bool

If true drops the last incomplete batch

False"},{"location":"reference/project/datamodules/image_classification/imagenet/#project.datamodules.image_classification.imagenet.ImageNetDataModule.train_transform","title":"train_transform","text":"
train_transform() -> Module\n

The standard imagenet transforms.

transforms.Compose([\n    transforms.RandomResizedCrop(self.image_size),\n    transforms.RandomHorizontalFlip(),\n    transforms.ToTensor(),\n    transforms.Normalize(\n        mean=[0.485, 0.456, 0.406],\n        std=[0.229, 0.224, 0.225]\n    ),\n])\n
"},{"location":"reference/project/datamodules/image_classification/imagenet/#project.datamodules.image_classification.imagenet.ImageNetDataModule.val_transform","title":"val_transform","text":"
val_transform() -> Compose\n

The standard imagenet transforms for validation.

.. code-block:: python

transforms.Compose([\n    transforms.Resize(self.image_size + 32),\n    transforms.CenterCrop(self.image_size),\n    transforms.ToTensor(),\n    transforms.Normalize(\n        mean=[0.485, 0.456, 0.406],\n        std=[0.229, 0.224, 0.225]\n    ),\n])\n
"},{"location":"reference/project/datamodules/image_classification/imagenet/#project.datamodules.image_classification.imagenet.prepare_imagenet","title":"prepare_imagenet","text":"
prepare_imagenet(\n    root: Path,\n    *,\n    split: Literal[\"train\", \"val\"] = \"train\",\n    network_imagenet_dir: Path\n) -> None\n

Custom preparation function for ImageNet, using @obilaniu's tar magic in Python form.

The core of this is equivalent to these bash commands:

mkdir -p $SLURM_TMPDIR/imagenet/val\ncd       $SLURM_TMPDIR/imagenet/val\ntar  -xf /network/scratch/b/bilaniuo/ILSVRC2012_img_val.tar\nmkdir -p $SLURM_TMPDIR/imagenet/train\ncd       $SLURM_TMPDIR/imagenet/train\ntar  -xf /network/datasets/imagenet/ILSVRC2012_img_train.tar          --to-command='mkdir ${TAR_REALNAME%.tar}; tar -xC ${TAR_REALNAME%.tar}'\n
"},{"location":"reference/project/datamodules/image_classification/inaturalist/","title":"Inaturalist","text":""},{"location":"reference/project/datamodules/image_classification/inaturalist/#project.datamodules.image_classification.inaturalist.INaturalistDataModule","title":"INaturalistDataModule","text":"

Bases: VisionDataModule

"},{"location":"reference/project/datamodules/image_classification/inaturalist/#project.datamodules.image_classification.inaturalist.INaturalistDataModule.name","title":"name class-attribute instance-attribute","text":"
name: str | None = 'inaturalist'\n

Dataset name.

"},{"location":"reference/project/datamodules/image_classification/inaturalist/#project.datamodules.image_classification.inaturalist.INaturalistDataModule.dataset_cls","title":"dataset_cls class-attribute","text":"
dataset_cls: type[VisionDataset] = INaturalist\n

Dataset class to use.

"},{"location":"reference/project/datamodules/image_classification/inaturalist/#project.datamodules.image_classification.inaturalist.INaturalistDataModule.dims","title":"dims class-attribute instance-attribute","text":"
dims: tuple[C, H, W] = (C(3), H(224), W(224))\n

A tuple describing the shape of the data.

"},{"location":"reference/project/datamodules/image_classification/inaturalist/#project.datamodules.image_classification.inaturalist.INaturalistDataModule.default_transforms","title":"default_transforms","text":"
default_transforms() -> Callable\n

Default transform for the dataset.

"},{"location":"reference/project/datamodules/image_classification/inaturalist_test/","title":"Inaturalist test","text":""},{"location":"reference/project/datamodules/image_classification/mnist/","title":"Mnist","text":""},{"location":"reference/project/datamodules/image_classification/mnist/#project.datamodules.image_classification.mnist.MNISTDataModule","title":"MNISTDataModule","text":"

Bases: ImageClassificationDataModule

.. figure:: https://miro.medium.com/max/744/1*AO2rIhzRYzFVQlFLx9DM9A.png :width: 400 :alt: MNIST

Specs
  • 10 classes (1 per digit)
  • Each image is (1 x 28 x 28)

Standard MNIST, train, val, test splits and transforms

Transforms::

mnist_transforms = transform_lib.Compose([\n    transform_lib.ToTensor()\n])\n

Example::

from pl_bolts.datamodules import MNISTDataModule\n\ndm = MNISTDataModule('.')\nmodel = LitModel()\n\nTrainer().fit(model, datamodule=dm)\n
"},{"location":"reference/project/datamodules/image_classification/mnist/#project.datamodules.image_classification.mnist.MNISTDataModule.__init__","title":"__init__","text":"
__init__(\n    data_dir: str | Path = DATA_DIR,\n    val_split: int | float = 0.2,\n    num_workers: int = 0,\n    normalize: bool = False,\n    batch_size: int = 32,\n    seed: int = 42,\n    shuffle: bool = True,\n    pin_memory: bool = True,\n    drop_last: bool = False,\n    *args: Any,\n    **kwargs: Any\n) -> None\n

Parameters:

Name Type Description Default data_dir str | Path

Where to save/load the data

DATA_DIR val_split int | float

Percent (float) or number (int) of samples to use for the validation split

0.2 num_workers int

How many workers to use for loading data

0 normalize bool

If true applies image normalize

False batch_size int

How many samples per batch to load

32 seed int

Random seed to be used for train/val/test splits

42 shuffle bool

If true shuffles the train data every epoch

True pin_memory bool

If true, the data loader will copy Tensors into CUDA pinned memory before returning them

True drop_last bool

If true drops the last incomplete batch

False"},{"location":"reference/project/datamodules/text/","title":"Text","text":""},{"location":"reference/project/datamodules/text/#project.datamodules.text.TextClassificationDataModule","title":"TextClassificationDataModule","text":"

Bases: LightningDataModule

Lightning data module for HF text classification datasets.

This is based on this tutorial: https://lightning.ai/docs/pytorch/stable/notebooks/lightning_examples/text-transformers.html

"},{"location":"reference/project/datamodules/text/text_classification/","title":"Text classification","text":"

Example algorithm that can train a huggingface model.

Also check out this link for more detailed example script:

https://github.com/lebrice/mila-docs/blob/llm_training/docs/examples/distributed/LLM_training/main.py

"},{"location":"reference/project/datamodules/text/text_classification/#project.datamodules.text.text_classification.TextClassificationDataModule","title":"TextClassificationDataModule","text":"

Bases: LightningDataModule

Lightning data module for HF text classification datasets.

This is based on this tutorial: https://lightning.ai/docs/pytorch/stable/notebooks/lightning_examples/text-transformers.html

"},{"location":"reference/project/datamodules/text/text_classification_test/","title":"Text classification test","text":""},{"location":"reference/project/datamodules/text/text_classification_test/#project.datamodules.text.text_classification_test.datamodule","title":"datamodule","text":"
datamodule(request: FixtureRequest) -> LightningDataModule\n

Fixture that creates the datamodule for the given config.

"},{"location":"reference/project/datamodules/text/text_classification_test/#project.datamodules.text.text_classification_test.test_dataset_location","title":"test_dataset_location","text":"
test_dataset_location(\n    prepared_datamodule: TextClassificationDataModule,\n)\n

Test that the dataset is downloaded to the correct location.

"},{"location":"reference/project/networks/","title":"Networks","text":"

Network definitions.

"},{"location":"reference/project/networks/#project.networks.FcNet","title":"FcNet","text":"

Bases: Sequential

"},{"location":"reference/project/networks/#project.networks.FcNet.HParams","title":"HParams","text":"

Dataclass containing the network hyper-parameters.

This is an example of how Pydantic can be used to validate configs and command-line arguments.

"},{"location":"reference/project/networks/#project.networks.FcNet.HParams.dropout_rate","title":"dropout_rate class-attribute instance-attribute","text":"
dropout_rate: float = 0.5\n

Dropout rate.

Set to 0 to disable dropout.

"},{"location":"reference/project/networks/fcnet/","title":"Fcnet","text":"

An example of a simple fully connected network.

"},{"location":"reference/project/networks/fcnet/#project.networks.fcnet.FcNet","title":"FcNet","text":"

Bases: Sequential

"},{"location":"reference/project/networks/fcnet/#project.networks.fcnet.FcNet.HParams","title":"HParams","text":"

Dataclass containing the network hyper-parameters.

This is an example of how Pydantic can be used to validate configs and command-line arguments.

"},{"location":"reference/project/networks/fcnet/#project.networks.fcnet.FcNet.HParams.dropout_rate","title":"dropout_rate class-attribute instance-attribute","text":"
dropout_rate: float = 0.5\n

Dropout rate.

Set to 0 to disable dropout.

"},{"location":"reference/project/trainers/","title":"Trainers","text":"

Trainers: actually run the training loop.

You can define custom trainers here.

"},{"location":"reference/project/trainers/#project.trainers.JaxTrainer","title":"JaxTrainer","text":"

Bases: PyTreeNode

A simplified version of the lightning.Trainer with a fully jitted training loop.

"},{"location":"reference/project/trainers/#project.trainers.JaxTrainer--assumptions","title":"Assumptions:","text":"
  • The algo object must match the JaxModule protocol (in other words, it should implement its methods).
"},{"location":"reference/project/trainers/#project.trainers.JaxTrainer--training-loop","title":"Training loop","text":"

This is the training loop, which is fully jitted:

ts = algo.init_train_state(rng)\n\nsetup(\"fit\")\non_fit_start()\non_train_start()\n\neval_metrics = []\nfor epoch in range(self.max_epochs):\n    on_train_epoch_start()\n\n    for step in range(self.training_steps_per_epoch):\n\n        batch = algo.get_batch(ts, step)\n\n        on_train_batch_start()\n\n        ts, metrics = algo.training_step(step, ts, batch)\n\n        on_train_batch_end()\n\n    on_train_epoch_end()\n\n    # Evaluation \"loop\"\n    on_validation_epoch_start()\n    epoch_eval_metrics = self.eval_epoch(ts, epoch, algo)\n    on_validation_epoch_start()\n\n    eval_metrics.append(epoch_eval_metrics)\n\nreturn ts, eval_metrics\n
"},{"location":"reference/project/trainers/#project.trainers.JaxTrainer--caveats","title":"Caveats","text":"
  • Some lightning callbacks can be used with this trainer and work well, but not all of them.
  • You can either use Regular pytorch-lightning callbacks, or use jax.vmap on the fit method, but not both.
  • If you want to use jax.vmap on the fit method, just remove the callbacks on the Trainer for now.
"},{"location":"reference/project/trainers/#project.trainers.JaxTrainer--todos-ideas","title":"TODOs / ideas","text":"
  • Add a checkpoint callback with orbax-checkpoint?
"},{"location":"reference/project/trainers/#project.trainers.JaxTrainer.fit","title":"fit","text":"
fit(\n    algo: JaxModule[Ts, _B, _MetricsT],\n    rng: PRNGKey,\n    train_state: Ts | None = None,\n    skip_initial_evaluation: bool = False,\n) -> tuple[Ts, _MetricsT]\n

Full training loop in pure jax (a lot faster than when using pytorch-lightning).

Unfolded version of rejax.PPO.train.

Training loop in pure jax (a lot faster than when using pytorch-lightning).

"},{"location":"reference/project/trainers/#project.trainers.JaxTrainer.training_step","title":"training_step","text":"
training_step(\n    batch_idx: int,\n    ts: Ts,\n    algo: JaxModule[Ts, _B, _MetricsT],\n)\n

Training step in pure jax (joined data collection + training).

MUCH faster than using pytorch-lightning, but you lose the callbacks and such.

"},{"location":"reference/project/trainers/jax_trainer/","title":"Jax trainer","text":""},{"location":"reference/project/trainers/jax_trainer/#project.trainers.jax_trainer.Ts","title":"Ts module-attribute","text":"
Ts = TypeVar('Ts', bound=PyTreeNode, default=PyTreeNode)\n

Type Variable for the training state.

"},{"location":"reference/project/trainers/jax_trainer/#project.trainers.jax_trainer.JaxModule","title":"JaxModule","text":"

Bases: Protocol[Ts, _B, _MetricsT]

A protocol for algorithms that can be trained by the JaxTrainer.

The JaxRLExample is an example that follows this structure and can be trained with a JaxTrainer.

"},{"location":"reference/project/trainers/jax_trainer/#project.trainers.jax_trainer.JaxModule.init_train_state","title":"init_train_state","text":"
init_train_state(rng: PRNGKey) -> Ts\n

Create the initial training state.

"},{"location":"reference/project/trainers/jax_trainer/#project.trainers.jax_trainer.JaxModule.get_batch","title":"get_batch","text":"
get_batch(ts: Ts, batch_idx: int) -> tuple[Ts, _B]\n

Produces a batch of data.

"},{"location":"reference/project/trainers/jax_trainer/#project.trainers.jax_trainer.JaxModule.training_step","title":"training_step","text":"
training_step(\n    batch_idx: int, ts: Ts, batch: _B\n) -> tuple[Ts, PyTreeNode]\n

Update the training state using a \"batch\" of data.

"},{"location":"reference/project/trainers/jax_trainer/#project.trainers.jax_trainer.JaxModule.eval_callback","title":"eval_callback","text":"
eval_callback(ts: Ts) -> _MetricsT\n

Perform evaluation and return metrics.

"},{"location":"reference/project/trainers/jax_trainer/#project.trainers.jax_trainer.JaxTrainer","title":"JaxTrainer","text":"

Bases: PyTreeNode

A simplified version of the lightning.Trainer with a fully jitted training loop.

"},{"location":"reference/project/trainers/jax_trainer/#project.trainers.jax_trainer.JaxTrainer--assumptions","title":"Assumptions:","text":"
  • The algo object must match the JaxModule protocol (in other words, it should implement its methods).
"},{"location":"reference/project/trainers/jax_trainer/#project.trainers.jax_trainer.JaxTrainer--training-loop","title":"Training loop","text":"

This is the training loop, which is fully jitted:

ts = algo.init_train_state(rng)\n\nsetup(\"fit\")\non_fit_start()\non_train_start()\n\neval_metrics = []\nfor epoch in range(self.max_epochs):\n    on_train_epoch_start()\n\n    for step in range(self.training_steps_per_epoch):\n\n        batch = algo.get_batch(ts, step)\n\n        on_train_batch_start()\n\n        ts, metrics = algo.training_step(step, ts, batch)\n\n        on_train_batch_end()\n\n    on_train_epoch_end()\n\n    # Evaluation \"loop\"\n    on_validation_epoch_start()\n    epoch_eval_metrics = self.eval_epoch(ts, epoch, algo)\n    on_validation_epoch_start()\n\n    eval_metrics.append(epoch_eval_metrics)\n\nreturn ts, eval_metrics\n
"},{"location":"reference/project/trainers/jax_trainer/#project.trainers.jax_trainer.JaxTrainer--caveats","title":"Caveats","text":"
  • Some lightning callbacks can be used with this trainer and work well, but not all of them.
  • You can either use Regular pytorch-lightning callbacks, or use jax.vmap on the fit method, but not both.
  • If you want to use jax.vmap on the fit method, just remove the callbacks on the Trainer for now.
"},{"location":"reference/project/trainers/jax_trainer/#project.trainers.jax_trainer.JaxTrainer--todos-ideas","title":"TODOs / ideas","text":"
  • Add a checkpoint callback with orbax-checkpoint?
"},{"location":"reference/project/trainers/jax_trainer/#project.trainers.jax_trainer.JaxTrainer.fit","title":"fit","text":"
fit(\n    algo: JaxModule[Ts, _B, _MetricsT],\n    rng: PRNGKey,\n    train_state: Ts | None = None,\n    skip_initial_evaluation: bool = False,\n) -> tuple[Ts, _MetricsT]\n

Full training loop in pure jax (a lot faster than when using pytorch-lightning).

Unfolded version of rejax.PPO.train.

Training loop in pure jax (a lot faster than when using pytorch-lightning).

"},{"location":"reference/project/trainers/jax_trainer/#project.trainers.jax_trainer.JaxTrainer.training_step","title":"training_step","text":"
training_step(\n    batch_idx: int,\n    ts: Ts,\n    algo: JaxModule[Ts, _B, _MetricsT],\n)\n

Training step in pure jax (joined data collection + training).

MUCH faster than using pytorch-lightning, but you lose the callbacks and such.

"},{"location":"reference/project/trainers/jax_trainer/#project.trainers.jax_trainer.hparams_to_dict","title":"hparams_to_dict","text":"
hparams_to_dict(algo: PyTreeNode) -> dict\n

Convert the learner struct to a serializable dict.

"},{"location":"reference/project/utils/","title":"Utils","text":""},{"location":"reference/project/utils/env_vars/","title":"Env vars","text":""},{"location":"reference/project/utils/env_vars/#project.utils.env_vars.SLURM_JOB_ID","title":"SLURM_JOB_ID module-attribute","text":"
SLURM_JOB_ID: int | None = (\n    int(environ[\"SLURM_JOB_ID\"])\n    if \"SLURM_JOB_ID\" in environ\n    else None\n)\n

The value of the 'SLURM_JOB_ID' environment variable.

See https://slurm.schedmd.com/sbatch.html#OPT_SLURM_JOB_ID.

"},{"location":"reference/project/utils/env_vars/#project.utils.env_vars.SLURM_TMPDIR","title":"SLURM_TMPDIR module-attribute","text":"
SLURM_TMPDIR: Path | None = (\n    Path(environ[\"SLURM_TMPDIR\"])\n    if \"SLURM_TMPDIR\" in environ\n    else (\n        tmp\n        if SLURM_JOB_ID is not None and exists()\n        else None\n    )\n)\n

The SLURM temporary directory, the fastest storage available.

  • Extract your dataset to this directory at the start of your job.
  • Remember to move any files created here to $SCRATCH since everything gets deleted at the end of the job.

See https://docs.mila.quebec/Information.html#slurm-tmpdir for more information.

"},{"location":"reference/project/utils/env_vars/#project.utils.env_vars.SCRATCH","title":"SCRATCH module-attribute","text":"
SCRATCH = (\n    Path(environ[\"SCRATCH\"])\n    if \"SCRATCH\" in environ\n    else None\n)\n

Network directory where temporary logs / checkpoints / custom datasets should be saved.

Note that this is temporary storage. Files that you wish to be saved long-term should be saved to the ARCHIVE directory.

See https://docs.mila.quebec/Information.html#scratch for more information.

"},{"location":"reference/project/utils/env_vars/#project.utils.env_vars.ARCHIVE","title":"ARCHIVE module-attribute","text":"
ARCHIVE = (\n    Path(environ[\"ARCHIVE\"])\n    if \"ARCHIVE\" in environ\n    else None\n)\n

Network directory for long-term storage. Only accessible from the login or cpu-only compute nodes.

See https://docs.mila.quebec/Information.html#archive for more information.

"},{"location":"reference/project/utils/env_vars/#project.utils.env_vars.NETWORK_DIR","title":"NETWORK_DIR module-attribute","text":"
NETWORK_DIR = (\n    Path(environ[\"NETWORK_DIR\"])\n    if \"NETWORK_DIR\" in environ\n    else _network_dir if exists() else None\n)\n

The (read-only) network directory that contains datasets/weights/etc.

todo: adapt this for the DRAC clusters.

When running outside of the mila/DRAC clusters, this will be None, but can be mocked by setting the NETWORK_DIR environment variable.

"},{"location":"reference/project/utils/env_vars/#project.utils.env_vars.REPO_ROOTDIR","title":"REPO_ROOTDIR module-attribute","text":"
REPO_ROOTDIR = parent\n

The root directory of this repository on this machine.

"},{"location":"reference/project/utils/env_vars/#project.utils.env_vars.DATA_DIR","title":"DATA_DIR module-attribute","text":"
DATA_DIR = Path(\n    get(\n        \"DATA_DIR\",\n        SLURM_TMPDIR or SCRATCH or REPO_ROOTDIR / \"data\",\n    )\n)\n

Local Directory where datasets should be extracted on this machine.

"},{"location":"reference/project/utils/env_vars/#project.utils.env_vars.torchvision_dir","title":"torchvision_dir module-attribute","text":"
torchvision_dir: Path | None = None\n

Network directory with torchvision datasets.

"},{"location":"reference/project/utils/env_vars/#project.utils.env_vars.NUM_WORKERS","title":"NUM_WORKERS module-attribute","text":"
NUM_WORKERS = int(\n    get(\n        \"SLURM_CPUS_PER_TASK\",\n        get(\n            \"SLURM_CPUS_ON_NODE\",\n            (\n                len(sched_getaffinity(0))\n                if hasattr(os, \"sched_getaffinity\")\n                else cpu_count()\n            ),\n        ),\n    )\n)\n

Default number of workers to be used by dataloaders, based on the number of CPUs and/or tasks.

"},{"location":"reference/project/utils/env_vars/#project.utils.env_vars.get_constant","title":"get_constant","text":"
get_constant(*names: str)\n

Resolver for Hydra to get the value of a constant in this file.

"},{"location":"reference/project/utils/hydra_utils/","title":"Hydra utils","text":"

Utility functions related to working with Hydra.

"},{"location":"reference/project/utils/hydra_utils/#project.utils.hydra_utils.get_attr","title":"get_attr","text":"
get_attr(obj: Any, *attributes: str)\n

Recursive version of getattr when the attribute is like 'a.b.c'.

"},{"location":"reference/project/utils/hydra_utils/#project.utils.hydra_utils.register_instance_attr_resolver","title":"register_instance_attr_resolver","text":"
register_instance_attr_resolver(\n    instantiated_objects_cache: dict[str, Any]\n) -> None\n

Registers the instance_attr custom resolver with OmegaConf.

"},{"location":"reference/project/utils/hydra_utils/#project.utils.hydra_utils.resolve_dictconfig","title":"resolve_dictconfig","text":"
resolve_dictconfig(dict_config: DictConfig) -> Config\n

Resolve all interpolations in the DictConfig.

Returns a Config object, which is a simple dataclass used to give nicer type hints for the contents of an experiment config.

"},{"location":"reference/project/utils/hydra_utils/#project.utils.hydra_utils.instance_attr","title":"instance_attr","text":"
instance_attr(\n    *attributes: str,\n    _instantiated_objects_cache: (\n        MutableMapping[str, Any] | None\n    ) = None\n)\n

Allows interpolations of the instantiated objects attributes (rather than configs).

This is very hacky

This is quite hacky and very dependent on the code of Hydra / OmegaConf not changing too much in the future. For this reason, consider pinning the versions of these libraries in your project if you intend do use this feature.

This works during a call to hydra.utils.instantiate, by looking at the stack trace to find the instantiated objects, which are in a variable in that function.

If there is a ${instance_attr:datamodule.num_classes} interpolation in a config, this will:

  1. instantiate the datamodule config
  2. store it at the key 'datamodule' in the instantiated objects cache dict (if passed).

    (This is useful since it makes it possible for us to later reuse this instantiated datamodule instead of re-instantiating it.)

  3. Retrieve the value of the attribute (getattr(datamodule, 'num_classes')) and return it.

"},{"location":"reference/project/utils/hydra_utils/#project.utils.hydra_utils.make_config_and_store","title":"make_config_and_store","text":"
make_config_and_store(\n    target: Callable[..., Target],\n    *,\n    store: ZenStore,\n    **overrides\n)\n

Creates a config dataclass for the given target and stores it in the config store.

This uses hydra_zen.builds to create the config dataclass and stores it at the name config_name, or target.__name__.

"},{"location":"reference/project/utils/remote_launcher_plugin/","title":"Remote launcher plugin","text":""},{"location":"reference/project/utils/remote_launcher_plugin/#project.utils.remote_launcher_plugin.PatchedSlurmQueueConf","title":"PatchedSlurmQueueConf dataclass","text":"

Bases: _AddedArgumentsConf, SlurmQueueConf

Adds more SLURM parameters to the config for the SLURM submitit launcher of Hydra.

"},{"location":"reference/project/utils/remote_launcher_plugin/#project.utils.remote_launcher_plugin.PatchedSlurmQueueConf.signal_delay_s","title":"signal_delay_s class-attribute instance-attribute","text":"
signal_delay_s: int = 120\n

USR1 signal delay before timeout.

"},{"location":"reference/project/utils/remote_launcher_plugin/#project.utils.remote_launcher_plugin.PatchedSlurmQueueConf.max_num_timeout","title":"max_num_timeout class-attribute instance-attribute","text":"
max_num_timeout: int = 0\n

Maximum number of retries on job timeout.

Change this only after you confirmed your code can handle re-submission by properly resuming from the latest stored checkpoint. check the following for more info on slurm_max_num_timeout https://github.com/facebookincubator/submitit/blob/master/docs/checkpointing.md

"},{"location":"reference/project/utils/remote_launcher_plugin/#project.utils.remote_launcher_plugin.PatchedSlurmQueueConf.additional_parameters","title":"additional_parameters class-attribute instance-attribute","text":"
additional_parameters: dict[str, Any] = field(\n    default_factory=dict\n)\n

Useful to add parameters which are not currently available in the plugin.

Eg: {\"mail-user\": \"blublu@fb.com\", \"mail-type\": \"BEGIN\"}

"},{"location":"reference/project/utils/remote_launcher_plugin/#project.utils.remote_launcher_plugin.PatchedSlurmQueueConf.array_parallelism","title":"array_parallelism class-attribute instance-attribute","text":"
array_parallelism: int = 256\n

Maximum number of jobs running in parallel.

"},{"location":"reference/project/utils/remote_launcher_plugin/#project.utils.remote_launcher_plugin.PatchedSlurmQueueConf.setup","title":"setup class-attribute instance-attribute","text":"
setup: list[str] | None = None\n

A list of commands to run in sbatch before running srun.

"},{"location":"reference/project/utils/remote_launcher_plugin/#project.utils.remote_launcher_plugin.get_slurm_accounts","title":"get_slurm_accounts","text":"
get_slurm_accounts(cluster: str) -> list[str]\n

Gets the SLURM accounts of the user using sacctmgr on the slurm cluster.

"},{"location":"reference/project/utils/remote_launcher_plugin_test/","title":"Remote launcher plugin test","text":""},{"location":"reference/project/utils/remote_launcher_plugin_test/#project.utils.remote_launcher_plugin_test.test_can_load_configs","title":"test_can_load_configs","text":"
test_can_load_configs(command_line_args: str)\n

Test that the cluster and resource configs can be loaded without errors.

"},{"location":"reference/project/utils/testutils/","title":"Testutils","text":"

Utility functions useful for testing.

"},{"location":"reference/project/utils/testutils/#project.utils.testutils.default_marks_for_config_name","title":"default_marks_for_config_name module-attribute","text":"
default_marks_for_config_name: dict[\n    str, list[MarkDecorator]\n] = {\n    \"inaturalist\": [\n        slow,\n        skipif(\n            not NETWORK_DIR and exists(),\n            reason=\"Expects to be run on the Mila cluster for now\",\n        ),\n    ],\n    \"imagenet\": [\n        slow,\n        skipif(\n            not NETWORK_DIR and exists(),\n            reason=\"Expects to be run on a cluster with the ImageNet dataset.\",\n        ),\n    ],\n    \"vision\": [\n        skip(\n            reason=\"Base class, shouldn't be instantiated.\"\n        )\n    ],\n}\n

Dict with some default marks for some configs name.

"},{"location":"reference/project/utils/testutils/#project.utils.testutils.default_marks_for_config_combinations","title":"default_marks_for_config_combinations module-attribute","text":"
default_marks_for_config_combinations: dict[\n    tuple[str, ...], list[MarkDecorator]\n] = {\n    (\"imagenet\", \"fcnet\"): [\n        xfail(\n            reason=\"FcNet shouldn't be applied to the ImageNet datamodule. It can lead to nans in the parameters.\"\n        )\n    ],\n    (\"imagenet\", \"jax_fcnet\"): [\n        xfail(\n            reason=\"FcNet shouldn't be applied to the ImageNet datamodule. It can lead to nans in the parameters.\"\n        )\n    ],\n    (\"imagenet\", \"jax_cnn\"): [\n        xfail(\n            reason=\"todo: parameters contain nans when overfitting on one batch? Maybe we're using too many iterations?\"\n        )\n    ],\n    None: {\n        (resnet_config, mnist_dataset_config): [\n            skip(\n                reason=\"ResNets don't work with MNIST datasets because the image resolution is too small.\"\n            )\n        ]\n        for (\n            resnet_config,\n            mnist_dataset_config,\n        ) in product(\n            get_all_configs_in_group_of_type(\n                \"algorithm/network\", ResNet\n            ),\n            get_all_configs_in_group_of_type(\n                \"datamodule\",\n                (MNISTDataModule, FashionMNISTDataModule),\n            ),\n        )\n    },\n}\n

Dict with some default marks to add to tests when some config combinations are present.

For example, ResNet networks can't be applied to the MNIST datasets.

"},{"location":"reference/project/utils/testutils/#project.utils.testutils.get_target_of_config","title":"get_target_of_config","text":"
get_target_of_config(\n    config_group: str,\n    config_name: str,\n    _cs: ConfigStore | None = None,\n) -> Callable\n

Returns the class that is to be instantiated by the given config name.

In the case of inner dataclasses (e.g. Model.HParams), this returns the outer class (Model).

"},{"location":"reference/project/utils/testutils/#project.utils.testutils.get_all_configs_in_group_of_type","title":"get_all_configs_in_group_of_type","text":"
get_all_configs_in_group_of_type(\n    config_group: str,\n    config_target_type: type | tuple[type, ...],\n    include_subclasses: bool = True,\n    excluding: type | tuple[type, ...] = (),\n) -> list[str]\n

Returns the names of all the configs in the given config group that have this target or a subclass of it.

"},{"location":"reference/project/utils/testutils/#project.utils.testutils.run_for_all_configs_of_type","title":"run_for_all_configs_of_type","text":"
run_for_all_configs_of_type(\n    config_group: str,\n    target_type: type,\n    excluding: type | tuple[type, ...] = (),\n)\n

Parametrizes a test to run with all the configs in the given group that have targets which are subclasses of the given type.

For example:

@run_for_all_configs_of_type(\"algorithm\", torch.nn.Module)\ndef test_something_about_the_algorithm(algorithm: torch.nn.Module):\n    ''' This test will run with all the configs in the 'algorithm' group that create nn.Modules! '''\n

Concretely, this works by indirectly parametrizing the f\"{config_group}_config\" fixture. To learn more about indirect parametrization in PyTest, take a look at https://docs.pytest.org/en/stable/example/parametrize.html#indirect-parametrization

"},{"location":"reference/project/utils/testutils/#project.utils.testutils.parametrize_when_used","title":"parametrize_when_used","text":"
parametrize_when_used(\n    arg_name_or_fixture: str | Callable,\n    values: list,\n    indirect: bool | None = None,\n) -> MarkDecorator\n

Fixture that applies pytest.mark.parametrize only when the argument is used (directly or indirectly).

When pytest.mark.parametrize is applied to a class, all test methods in that class need to use the parametrized argument, otherwise an error is raised. This function exists to work around this and allows writing test methods that don't use the parametrized argument.

For example, this works, but would not be possible with pytest.mark.parametrize:

import pytest\n\n@parametrize_when_used(\"value\", [1, 2, 3])\nclass TestFoo:\n    def test_foo(self, value):\n        ...\n\n    def test_bar(self, value):\n        ...\n\n    def test_something_else(self):  # This will cause an error!\n        pass\n

Parameters:

Name Type Description Default arg_name_or_fixture str | Callable

The name of the argument to parametrize, or a fixture to parametrize indirectly.

required values list

The values to be used to parametrize the test.

required

Returns:

Type Description MarkDecorator

A pytest.MarkDecorator that parametrizes the test with the given values only when the argument is used (directly or indirectly) by the test.

"},{"location":"reference/project/utils/testutils/#project.utils.testutils.run_for_all_configs_in_group","title":"run_for_all_configs_in_group","text":"
run_for_all_configs_in_group(\n    group_name: str,\n    config_name_to_marks: (\n        Mapping[str, MarkDecorator | list[MarkDecorator]]\n        | None\n    ) = None,\n)\n

Apply this marker to a test to make it run with all configs in a given group.

This assumes that a \"group_name_config\" fixture is defined, for example, algorithm_config, datamodule_config, network_config. This then does an indirect parametrization of that fixture, so that it receives the config name as a parameter and returns it.

The test wrapped test will uses all config from that group if they are used either as an input argument to the test function or if it the input argument to a fixture function.

Parameters:

Name Type Description Default group_name str

List of datamodule names to use for tests. By default, lists out the generic datamodules (the datamodules that aren't specific to a single algorithm, for example the InfGendatamodules of WakeSleep.)

required config_name_to_marks Mapping[str, MarkDecorator | list[MarkDecorator]] | None

Dictionary from config names to pytest marks (e.g. pytest.mark.xfail, pytest.mark.skip) to use for that particular config.

None"},{"location":"reference/project/utils/testutils/#project.utils.testutils.total_vram_gb","title":"total_vram_gb","text":"
total_vram_gb() -> float\n

Returns the total VRAM in GB.

"},{"location":"reference/project/utils/utils/","title":"Utils","text":""},{"location":"reference/project/utils/utils/#project.utils.utils.print_config","title":"print_config","text":"
print_config(\n    config: DictConfig,\n    print_order: Sequence[str] = (\n        \"algorithm\",\n        \"datamodule\",\n        \"trainer\",\n    ),\n    resolve: bool = True,\n) -> None\n

Prints content of DictConfig using Rich library and its tree structure.

TAKEN FROM https://github.com/ashleve/lightning-hydra-template/blob/6a92395ed6afd573fa44dd3a054a603acbdcac06/src/utils/__init__.py#L56

Parameters:

Name Type Description Default config DictConfig

Configuration composed by Hydra.

required print_order Sequence[str]

Determines in what order config components are printed.

('algorithm', 'datamodule', 'trainer') resolve bool

Whether to resolve reference fields of DictConfig.

True"},{"location":"reference/project/utils/typing_utils/","title":"Typing utils","text":"

Utilities to help annotate the types of values in the project.

"},{"location":"reference/project/utils/typing_utils/#project.utils.typing_utils.HydraConfigFor","title":"HydraConfigFor module-attribute","text":"
HydraConfigFor = Builds[type[T]]\n

Type annotation to say \"a hydra config that returns an object of type T when instantiated\".

"},{"location":"reference/project/utils/typing_utils/#project.utils.typing_utils.DataModule","title":"DataModule","text":"

Bases: Protocol[BatchType]

Protocol that shows the minimal attributes / methods of the LightningDataModule class.

This is used to type hint the batches that are yielded by the DataLoaders.

"},{"location":"reference/project/utils/typing_utils/#project.utils.typing_utils.is_sequence_of","title":"is_sequence_of","text":"
is_sequence_of(\n    object: Any, item_type: type[V] | tuple[type[V], ...]\n) -> TypeGuard[Sequence[V]]\n

Used to check (and tell the type checker) that object is a sequence of items of this type.

"},{"location":"reference/project/utils/typing_utils/#project.utils.typing_utils.is_mapping_of","title":"is_mapping_of","text":"
is_mapping_of(\n    object: Any, key_type: type[K], value_type: type[V]\n) -> TypeGuard[Mapping[K, V]]\n

Used to check (and tell the type checker) that object is a mapping with keys and values of the given types.

"},{"location":"reference/project/utils/typing_utils/jax_typing_utils/","title":"Jax typing utils","text":"

Small typing helpers for Jax.

This makes jax.jit preserve the signature of the wrapped callable.

"},{"location":"reference/project/utils/typing_utils/jax_typing_utils/#project.utils.typing_utils.jax_typing_utils.field","title":"field","text":"
field(\n    *,\n    default: _T,\n    init: bool = True,\n    repr: bool = True,\n    hash: bool | None = None,\n    compare: bool = True,\n    metadata: Mapping[Any, Any] | None = None,\n    kw_only: bool = ...,\n    pytree_node: bool = True\n) -> _T\n
field(\n    *,\n    default_factory: Callable[[], _T],\n    init: bool = True,\n    repr: bool = True,\n    hash: bool | None = None,\n    compare: bool = True,\n    metadata: Mapping[Any, Any] | None = None,\n    kw_only: bool = ...,\n    pytree_node: bool = True\n) -> _T\n
field(\n    *,\n    init: bool = True,\n    repr: bool = True,\n    hash: bool | None = None,\n    compare: bool = True,\n    metadata: Mapping[Any, Any] | None = None,\n    kw_only: bool = ...,\n    pytree_node: bool = True\n) -> Any\n
field(\n    *,\n    default=MISSING,\n    default_factory=MISSING,\n    init=True,\n    repr=True,\n    hash=None,\n    compare=True,\n    metadata: Mapping[Any, Any] | None = None,\n    kw_only=MISSING,\n    pytree_node: bool | None = None\n)\n

Small Typing fix for flax.struct.field.

  • Add type annotations so it doesn't drop the signature of the dataclasses.field function.
  • Make the pytree_node has a default value of False for ints and bools, and True for everything else.
"},{"location":"reference/project/utils/typing_utils/protocols/","title":"Protocols","text":""},{"location":"reference/project/utils/typing_utils/protocols/#project.utils.typing_utils.protocols.Module","title":"Module","text":"

Bases: Protocol[P, OutT]

Small protocol that can be used to annotate the input/output types of torch.nn.Modules.

"},{"location":"reference/project/utils/typing_utils/protocols/#project.utils.typing_utils.protocols.DataModule","title":"DataModule","text":"

Bases: Protocol[BatchType]

Protocol that shows the minimal attributes / methods of the LightningDataModule class.

This is used to type hint the batches that are yielded by the DataLoaders.

"},{"location":"reference/project/utils/typing_utils/protocols/#project.utils.typing_utils.protocols.ClassificationDataModule","title":"ClassificationDataModule","text":"

Bases: DataModule[BatchType], Protocol

Protocol that matches \"datamodules with a 'num_classes' int attribute.

"}]} \ No newline at end of file diff --git a/sitemap.xml b/sitemap.xml new file mode 100644 index 00000000..902a9d85 --- /dev/null +++ b/sitemap.xml @@ -0,0 +1,315 @@ + + + + https://mila-iqia.github.io/ResearchTemplate/ + 2024-12-04 + + + https://mila-iqia.github.io/ResearchTemplate/SUMMARY/ + 2024-12-04 + + + https://mila-iqia.github.io/ResearchTemplate/contributing/ + 2024-12-04 + + + https://mila-iqia.github.io/ResearchTemplate/help/ + 2024-12-04 + + + https://mila-iqia.github.io/ResearchTemplate/intro/ + 2024-12-04 + + + https://mila-iqia.github.io/ResearchTemplate/resources/ + 2024-12-04 + + + https://mila-iqia.github.io/ResearchTemplate/examples/ + 2024-12-04 + + + https://mila-iqia.github.io/ResearchTemplate/examples/image_classification/ + 2024-12-04 + + + https://mila-iqia.github.io/ResearchTemplate/examples/jax_image_classification/ + 2024-12-04 + + + https://mila-iqia.github.io/ResearchTemplate/examples/jax_rl/ + 2024-12-04 + + + https://mila-iqia.github.io/ResearchTemplate/examples/llm_finetuning/ + 2024-12-04 + + + https://mila-iqia.github.io/ResearchTemplate/examples/profiling/ + 2024-12-04 + + + https://mila-iqia.github.io/ResearchTemplate/examples/sweeps/ + 2024-12-04 + + + https://mila-iqia.github.io/ResearchTemplate/examples/text_classification/ + 2024-12-04 + + + https://mila-iqia.github.io/ResearchTemplate/features/ + 2024-12-04 + + + https://mila-iqia.github.io/ResearchTemplate/features/auto_schema/ + 2024-12-04 + + + https://mila-iqia.github.io/ResearchTemplate/features/jax/ + 2024-12-04 + + + https://mila-iqia.github.io/ResearchTemplate/features/remote_slurm_launcher/ + 2024-12-04 + + + https://mila-iqia.github.io/ResearchTemplate/features/testing/ + 2024-12-04 + + + https://mila-iqia.github.io/ResearchTemplate/reference/SUMMARY/ + 2024-12-04 + + + https://mila-iqia.github.io/ResearchTemplate/reference/project/ + 2024-12-04 + + + https://mila-iqia.github.io/ResearchTemplate/reference/project/conftest/ + 2024-12-04 + + + https://mila-iqia.github.io/ResearchTemplate/reference/project/experiment/ + 2024-12-04 + + + https://mila-iqia.github.io/ResearchTemplate/reference/project/main/ + 2024-12-04 + + + https://mila-iqia.github.io/ResearchTemplate/reference/project/main_test/ + 2024-12-04 + + + https://mila-iqia.github.io/ResearchTemplate/reference/project/algorithms/ + 2024-12-04 + + + https://mila-iqia.github.io/ResearchTemplate/reference/project/algorithms/image_classifier/ + 2024-12-04 + + + https://mila-iqia.github.io/ResearchTemplate/reference/project/algorithms/image_classifier_test/ + 2024-12-04 + + + https://mila-iqia.github.io/ResearchTemplate/reference/project/algorithms/jax_image_classifier/ + 2024-12-04 + + + https://mila-iqia.github.io/ResearchTemplate/reference/project/algorithms/jax_image_classifier_test/ + 2024-12-04 + + + https://mila-iqia.github.io/ResearchTemplate/reference/project/algorithms/jax_ppo/ + 2024-12-04 + + + https://mila-iqia.github.io/ResearchTemplate/reference/project/algorithms/jax_ppo_test/ + 2024-12-04 + + + https://mila-iqia.github.io/ResearchTemplate/reference/project/algorithms/llm_finetuning/ + 2024-12-04 + + + https://mila-iqia.github.io/ResearchTemplate/reference/project/algorithms/llm_finetuning_test/ + 2024-12-04 + + + https://mila-iqia.github.io/ResearchTemplate/reference/project/algorithms/no_op/ + 2024-12-04 + + + https://mila-iqia.github.io/ResearchTemplate/reference/project/algorithms/text_classifier/ + 2024-12-04 + + + https://mila-iqia.github.io/ResearchTemplate/reference/project/algorithms/text_classifier_test/ + 2024-12-04 + + + https://mila-iqia.github.io/ResearchTemplate/reference/project/algorithms/callbacks/ + 2024-12-04 + + + https://mila-iqia.github.io/ResearchTemplate/reference/project/algorithms/callbacks/classification_metrics/ + 2024-12-04 + + + https://mila-iqia.github.io/ResearchTemplate/reference/project/algorithms/callbacks/samples_per_second/ + 2024-12-04 + + + https://mila-iqia.github.io/ResearchTemplate/reference/project/algorithms/testsuites/ + 2024-12-04 + + + https://mila-iqia.github.io/ResearchTemplate/reference/project/algorithms/testsuites/lightning_module_tests/ + 2024-12-04 + + + https://mila-iqia.github.io/ResearchTemplate/reference/project/configs/ + 2024-12-04 + + + https://mila-iqia.github.io/ResearchTemplate/reference/project/configs/config/ + 2024-12-04 + + + https://mila-iqia.github.io/ResearchTemplate/reference/project/configs/config_test/ + 2024-12-04 + + + https://mila-iqia.github.io/ResearchTemplate/reference/project/configs/algorithm/ + 2024-12-04 + + + https://mila-iqia.github.io/ResearchTemplate/reference/project/configs/algorithm/lr_scheduler/ + 2024-12-04 + + + https://mila-iqia.github.io/ResearchTemplate/reference/project/configs/algorithm/network/ + 2024-12-04 + + + https://mila-iqia.github.io/ResearchTemplate/reference/project/configs/algorithm/optimizer/ + 2024-12-04 + + + https://mila-iqia.github.io/ResearchTemplate/reference/project/configs/datamodule/ + 2024-12-04 + + + https://mila-iqia.github.io/ResearchTemplate/reference/project/datamodules/ + 2024-12-04 + + + https://mila-iqia.github.io/ResearchTemplate/reference/project/datamodules/datamodules_test/ + 2024-12-04 + + + https://mila-iqia.github.io/ResearchTemplate/reference/project/datamodules/vision/ + 2024-12-04 + + + https://mila-iqia.github.io/ResearchTemplate/reference/project/datamodules/image_classification/ + 2024-12-04 + + + https://mila-iqia.github.io/ResearchTemplate/reference/project/datamodules/image_classification/cifar10/ + 2024-12-04 + + + https://mila-iqia.github.io/ResearchTemplate/reference/project/datamodules/image_classification/fashion_mnist/ + 2024-12-04 + + + https://mila-iqia.github.io/ResearchTemplate/reference/project/datamodules/image_classification/image_classification/ + 2024-12-04 + + + https://mila-iqia.github.io/ResearchTemplate/reference/project/datamodules/image_classification/imagenet/ + 2024-12-04 + + + https://mila-iqia.github.io/ResearchTemplate/reference/project/datamodules/image_classification/inaturalist/ + 2024-12-04 + + + https://mila-iqia.github.io/ResearchTemplate/reference/project/datamodules/image_classification/inaturalist_test/ + 2024-12-04 + + + https://mila-iqia.github.io/ResearchTemplate/reference/project/datamodules/image_classification/mnist/ + 2024-12-04 + + + https://mila-iqia.github.io/ResearchTemplate/reference/project/datamodules/text/ + 2024-12-04 + + + https://mila-iqia.github.io/ResearchTemplate/reference/project/datamodules/text/text_classification/ + 2024-12-04 + + + https://mila-iqia.github.io/ResearchTemplate/reference/project/datamodules/text/text_classification_test/ + 2024-12-04 + + + https://mila-iqia.github.io/ResearchTemplate/reference/project/networks/ + 2024-12-04 + + + https://mila-iqia.github.io/ResearchTemplate/reference/project/networks/fcnet/ + 2024-12-04 + + + https://mila-iqia.github.io/ResearchTemplate/reference/project/trainers/ + 2024-12-04 + + + https://mila-iqia.github.io/ResearchTemplate/reference/project/trainers/jax_trainer/ + 2024-12-04 + + + https://mila-iqia.github.io/ResearchTemplate/reference/project/utils/ + 2024-12-04 + + + https://mila-iqia.github.io/ResearchTemplate/reference/project/utils/env_vars/ + 2024-12-04 + + + https://mila-iqia.github.io/ResearchTemplate/reference/project/utils/hydra_utils/ + 2024-12-04 + + + https://mila-iqia.github.io/ResearchTemplate/reference/project/utils/remote_launcher_plugin/ + 2024-12-04 + + + https://mila-iqia.github.io/ResearchTemplate/reference/project/utils/remote_launcher_plugin_test/ + 2024-12-04 + + + https://mila-iqia.github.io/ResearchTemplate/reference/project/utils/testutils/ + 2024-12-04 + + + https://mila-iqia.github.io/ResearchTemplate/reference/project/utils/utils/ + 2024-12-04 + + + https://mila-iqia.github.io/ResearchTemplate/reference/project/utils/typing_utils/ + 2024-12-04 + + + https://mila-iqia.github.io/ResearchTemplate/reference/project/utils/typing_utils/jax_typing_utils/ + 2024-12-04 + + + https://mila-iqia.github.io/ResearchTemplate/reference/project/utils/typing_utils/protocols/ + 2024-12-04 + + \ No newline at end of file diff --git a/sitemap.xml.gz b/sitemap.xml.gz new file mode 100644 index 00000000..5b7b1338 Binary files /dev/null and b/sitemap.xml.gz differ