diff --git a/2.6/404.html b/2.6/404.html new file mode 100644 index 0000000..2f71bc9 --- /dev/null +++ b/2.6/404.html @@ -0,0 +1 @@ + CARLISLE
\ No newline at end of file diff --git a/2.6/assets/images/favicon.png b/2.6/assets/images/favicon.png new file mode 100644 index 0000000..1cf13b9 Binary files /dev/null and b/2.6/assets/images/favicon.png differ diff --git a/2.6/assets/javascripts/bundle.51198bba.min.js b/2.6/assets/javascripts/bundle.51198bba.min.js new file mode 100644 index 0000000..31bd041 --- /dev/null +++ b/2.6/assets/javascripts/bundle.51198bba.min.js @@ -0,0 +1,29 @@ +"use strict";(()=>{var Ri=Object.create;var gr=Object.defineProperty;var ki=Object.getOwnPropertyDescriptor;var Hi=Object.getOwnPropertyNames,Ht=Object.getOwnPropertySymbols,Pi=Object.getPrototypeOf,yr=Object.prototype.hasOwnProperty,on=Object.prototype.propertyIsEnumerable;var nn=(e,t,r)=>t in e?gr(e,t,{enumerable:!0,configurable:!0,writable:!0,value:r}):e[t]=r,P=(e,t)=>{for(var r in t||(t={}))yr.call(t,r)&&nn(e,r,t[r]);if(Ht)for(var r of Ht(t))on.call(t,r)&&nn(e,r,t[r]);return e};var an=(e,t)=>{var r={};for(var n in e)yr.call(e,n)&&t.indexOf(n)<0&&(r[n]=e[n]);if(e!=null&&Ht)for(var n of Ht(e))t.indexOf(n)<0&&on.call(e,n)&&(r[n]=e[n]);return r};var Pt=(e,t)=>()=>(t||e((t={exports:{}}).exports,t),t.exports);var $i=(e,t,r,n)=>{if(t&&typeof t=="object"||typeof t=="function")for(let o of Hi(t))!yr.call(e,o)&&o!==r&&gr(e,o,{get:()=>t[o],enumerable:!(n=ki(t,o))||n.enumerable});return e};var yt=(e,t,r)=>(r=e!=null?Ri(Pi(e)):{},$i(t||!e||!e.__esModule?gr(r,"default",{value:e,enumerable:!0}):r,e));var cn=Pt((xr,sn)=>{(function(e,t){typeof xr=="object"&&typeof sn!="undefined"?t():typeof define=="function"&&define.amd?define(t):t()})(xr,function(){"use strict";function e(r){var n=!0,o=!1,i=null,s={text:!0,search:!0,url:!0,tel:!0,email:!0,password:!0,number:!0,date:!0,month:!0,week:!0,time:!0,datetime:!0,"datetime-local":!0};function a(T){return!!(T&&T!==document&&T.nodeName!=="HTML"&&T.nodeName!=="BODY"&&"classList"in T&&"contains"in T.classList)}function c(T){var Qe=T.type,De=T.tagName;return!!(De==="INPUT"&&s[Qe]&&!T.readOnly||De==="TEXTAREA"&&!T.readOnly||T.isContentEditable)}function f(T){T.classList.contains("focus-visible")||(T.classList.add("focus-visible"),T.setAttribute("data-focus-visible-added",""))}function u(T){T.hasAttribute("data-focus-visible-added")&&(T.classList.remove("focus-visible"),T.removeAttribute("data-focus-visible-added"))}function p(T){T.metaKey||T.altKey||T.ctrlKey||(a(r.activeElement)&&f(r.activeElement),n=!0)}function m(T){n=!1}function d(T){a(T.target)&&(n||c(T.target))&&f(T.target)}function h(T){a(T.target)&&(T.target.classList.contains("focus-visible")||T.target.hasAttribute("data-focus-visible-added"))&&(o=!0,window.clearTimeout(i),i=window.setTimeout(function(){o=!1},100),u(T.target))}function v(T){document.visibilityState==="hidden"&&(o&&(n=!0),G())}function G(){document.addEventListener("mousemove",N),document.addEventListener("mousedown",N),document.addEventListener("mouseup",N),document.addEventListener("pointermove",N),document.addEventListener("pointerdown",N),document.addEventListener("pointerup",N),document.addEventListener("touchmove",N),document.addEventListener("touchstart",N),document.addEventListener("touchend",N)}function oe(){document.removeEventListener("mousemove",N),document.removeEventListener("mousedown",N),document.removeEventListener("mouseup",N),document.removeEventListener("pointermove",N),document.removeEventListener("pointerdown",N),document.removeEventListener("pointerup",N),document.removeEventListener("touchmove",N),document.removeEventListener("touchstart",N),document.removeEventListener("touchend",N)}function N(T){T.target.nodeName&&T.target.nodeName.toLowerCase()==="html"||(n=!1,oe())}document.addEventListener("keydown",p,!0),document.addEventListener("mousedown",m,!0),document.addEventListener("pointerdown",m,!0),document.addEventListener("touchstart",m,!0),document.addEventListener("visibilitychange",v,!0),G(),r.addEventListener("focus",d,!0),r.addEventListener("blur",h,!0),r.nodeType===Node.DOCUMENT_FRAGMENT_NODE&&r.host?r.host.setAttribute("data-js-focus-visible",""):r.nodeType===Node.DOCUMENT_NODE&&(document.documentElement.classList.add("js-focus-visible"),document.documentElement.setAttribute("data-js-focus-visible",""))}if(typeof window!="undefined"&&typeof document!="undefined"){window.applyFocusVisiblePolyfill=e;var t;try{t=new CustomEvent("focus-visible-polyfill-ready")}catch(r){t=document.createEvent("CustomEvent"),t.initCustomEvent("focus-visible-polyfill-ready",!1,!1,{})}window.dispatchEvent(t)}typeof document!="undefined"&&e(document)})});var fn=Pt(Er=>{(function(e){var t=function(){try{return!!Symbol.iterator}catch(f){return!1}},r=t(),n=function(f){var u={next:function(){var p=f.shift();return{done:p===void 0,value:p}}};return r&&(u[Symbol.iterator]=function(){return u}),u},o=function(f){return encodeURIComponent(f).replace(/%20/g,"+")},i=function(f){return decodeURIComponent(String(f).replace(/\+/g," "))},s=function(){var f=function(p){Object.defineProperty(this,"_entries",{writable:!0,value:{}});var m=typeof p;if(m!=="undefined")if(m==="string")p!==""&&this._fromString(p);else if(p instanceof f){var d=this;p.forEach(function(oe,N){d.append(N,oe)})}else if(p!==null&&m==="object")if(Object.prototype.toString.call(p)==="[object Array]")for(var h=0;hd[0]?1:0}),f._entries&&(f._entries={});for(var p=0;p1?i(d[1]):"")}})})(typeof global!="undefined"?global:typeof window!="undefined"?window:typeof self!="undefined"?self:Er);(function(e){var t=function(){try{var o=new e.URL("b","http://a");return o.pathname="c d",o.href==="http://a/c%20d"&&o.searchParams}catch(i){return!1}},r=function(){var o=e.URL,i=function(c,f){typeof c!="string"&&(c=String(c)),f&&typeof f!="string"&&(f=String(f));var u=document,p;if(f&&(e.location===void 0||f!==e.location.href)){f=f.toLowerCase(),u=document.implementation.createHTMLDocument(""),p=u.createElement("base"),p.href=f,u.head.appendChild(p);try{if(p.href.indexOf(f)!==0)throw new Error(p.href)}catch(T){throw new Error("URL unable to set base "+f+" due to "+T)}}var m=u.createElement("a");m.href=c,p&&(u.body.appendChild(m),m.href=m.href);var d=u.createElement("input");if(d.type="url",d.value=c,m.protocol===":"||!/:/.test(m.href)||!d.checkValidity()&&!f)throw new TypeError("Invalid URL");Object.defineProperty(this,"_anchorElement",{value:m});var h=new e.URLSearchParams(this.search),v=!0,G=!0,oe=this;["append","delete","set"].forEach(function(T){var Qe=h[T];h[T]=function(){Qe.apply(h,arguments),v&&(G=!1,oe.search=h.toString(),G=!0)}}),Object.defineProperty(this,"searchParams",{value:h,enumerable:!0});var N=void 0;Object.defineProperty(this,"_updateSearchParams",{enumerable:!1,configurable:!1,writable:!1,value:function(){this.search!==N&&(N=this.search,G&&(v=!1,this.searchParams._fromString(this.search),v=!0))}})},s=i.prototype,a=function(c){Object.defineProperty(s,c,{get:function(){return this._anchorElement[c]},set:function(f){this._anchorElement[c]=f},enumerable:!0})};["hash","host","hostname","port","protocol"].forEach(function(c){a(c)}),Object.defineProperty(s,"search",{get:function(){return this._anchorElement.search},set:function(c){this._anchorElement.search=c,this._updateSearchParams()},enumerable:!0}),Object.defineProperties(s,{toString:{get:function(){var c=this;return function(){return c.href}}},href:{get:function(){return this._anchorElement.href.replace(/\?$/,"")},set:function(c){this._anchorElement.href=c,this._updateSearchParams()},enumerable:!0},pathname:{get:function(){return this._anchorElement.pathname.replace(/(^\/?)/,"/")},set:function(c){this._anchorElement.pathname=c},enumerable:!0},origin:{get:function(){var c={"http:":80,"https:":443,"ftp:":21}[this._anchorElement.protocol],f=this._anchorElement.port!=c&&this._anchorElement.port!=="";return this._anchorElement.protocol+"//"+this._anchorElement.hostname+(f?":"+this._anchorElement.port:"")},enumerable:!0},password:{get:function(){return""},set:function(c){},enumerable:!0},username:{get:function(){return""},set:function(c){},enumerable:!0}}),i.createObjectURL=function(c){return o.createObjectURL.apply(o,arguments)},i.revokeObjectURL=function(c){return o.revokeObjectURL.apply(o,arguments)},e.URL=i};if(t()||r(),e.location!==void 0&&!("origin"in e.location)){var n=function(){return e.location.protocol+"//"+e.location.hostname+(e.location.port?":"+e.location.port:"")};try{Object.defineProperty(e.location,"origin",{get:n,enumerable:!0})}catch(o){setInterval(function(){e.location.origin=n()},100)}}})(typeof global!="undefined"?global:typeof window!="undefined"?window:typeof self!="undefined"?self:Er)});var Kr=Pt((Mt,qr)=>{/*! + * clipboard.js v2.0.11 + * https://clipboardjs.com/ + * + * Licensed MIT © Zeno Rocha + */(function(t,r){typeof Mt=="object"&&typeof qr=="object"?qr.exports=r():typeof define=="function"&&define.amd?define([],r):typeof Mt=="object"?Mt.ClipboardJS=r():t.ClipboardJS=r()})(Mt,function(){return function(){var e={686:function(n,o,i){"use strict";i.d(o,{default:function(){return Ci}});var s=i(279),a=i.n(s),c=i(370),f=i.n(c),u=i(817),p=i.n(u);function m(j){try{return document.execCommand(j)}catch(O){return!1}}var d=function(O){var E=p()(O);return m("cut"),E},h=d;function v(j){var O=document.documentElement.getAttribute("dir")==="rtl",E=document.createElement("textarea");E.style.fontSize="12pt",E.style.border="0",E.style.padding="0",E.style.margin="0",E.style.position="absolute",E.style[O?"right":"left"]="-9999px";var H=window.pageYOffset||document.documentElement.scrollTop;return E.style.top="".concat(H,"px"),E.setAttribute("readonly",""),E.value=j,E}var G=function(O,E){var H=v(O);E.container.appendChild(H);var I=p()(H);return m("copy"),H.remove(),I},oe=function(O){var E=arguments.length>1&&arguments[1]!==void 0?arguments[1]:{container:document.body},H="";return typeof O=="string"?H=G(O,E):O instanceof HTMLInputElement&&!["text","search","url","tel","password"].includes(O==null?void 0:O.type)?H=G(O.value,E):(H=p()(O),m("copy")),H},N=oe;function T(j){return typeof Symbol=="function"&&typeof Symbol.iterator=="symbol"?T=function(E){return typeof E}:T=function(E){return E&&typeof Symbol=="function"&&E.constructor===Symbol&&E!==Symbol.prototype?"symbol":typeof E},T(j)}var Qe=function(){var O=arguments.length>0&&arguments[0]!==void 0?arguments[0]:{},E=O.action,H=E===void 0?"copy":E,I=O.container,q=O.target,Me=O.text;if(H!=="copy"&&H!=="cut")throw new Error('Invalid "action" value, use either "copy" or "cut"');if(q!==void 0)if(q&&T(q)==="object"&&q.nodeType===1){if(H==="copy"&&q.hasAttribute("disabled"))throw new Error('Invalid "target" attribute. Please use "readonly" instead of "disabled" attribute');if(H==="cut"&&(q.hasAttribute("readonly")||q.hasAttribute("disabled")))throw new Error(`Invalid "target" attribute. You can't cut text from elements with "readonly" or "disabled" attributes`)}else throw new Error('Invalid "target" value, use a valid Element');if(Me)return N(Me,{container:I});if(q)return H==="cut"?h(q):N(q,{container:I})},De=Qe;function $e(j){return typeof Symbol=="function"&&typeof Symbol.iterator=="symbol"?$e=function(E){return typeof E}:$e=function(E){return E&&typeof Symbol=="function"&&E.constructor===Symbol&&E!==Symbol.prototype?"symbol":typeof E},$e(j)}function wi(j,O){if(!(j instanceof O))throw new TypeError("Cannot call a class as a function")}function rn(j,O){for(var E=0;E0&&arguments[0]!==void 0?arguments[0]:{};this.action=typeof I.action=="function"?I.action:this.defaultAction,this.target=typeof I.target=="function"?I.target:this.defaultTarget,this.text=typeof I.text=="function"?I.text:this.defaultText,this.container=$e(I.container)==="object"?I.container:document.body}},{key:"listenClick",value:function(I){var q=this;this.listener=f()(I,"click",function(Me){return q.onClick(Me)})}},{key:"onClick",value:function(I){var q=I.delegateTarget||I.currentTarget,Me=this.action(q)||"copy",kt=De({action:Me,container:this.container,target:this.target(q),text:this.text(q)});this.emit(kt?"success":"error",{action:Me,text:kt,trigger:q,clearSelection:function(){q&&q.focus(),window.getSelection().removeAllRanges()}})}},{key:"defaultAction",value:function(I){return vr("action",I)}},{key:"defaultTarget",value:function(I){var q=vr("target",I);if(q)return document.querySelector(q)}},{key:"defaultText",value:function(I){return vr("text",I)}},{key:"destroy",value:function(){this.listener.destroy()}}],[{key:"copy",value:function(I){var q=arguments.length>1&&arguments[1]!==void 0?arguments[1]:{container:document.body};return N(I,q)}},{key:"cut",value:function(I){return h(I)}},{key:"isSupported",value:function(){var I=arguments.length>0&&arguments[0]!==void 0?arguments[0]:["copy","cut"],q=typeof I=="string"?[I]:I,Me=!!document.queryCommandSupported;return q.forEach(function(kt){Me=Me&&!!document.queryCommandSupported(kt)}),Me}}]),E}(a()),Ci=Ai},828:function(n){var o=9;if(typeof Element!="undefined"&&!Element.prototype.matches){var i=Element.prototype;i.matches=i.matchesSelector||i.mozMatchesSelector||i.msMatchesSelector||i.oMatchesSelector||i.webkitMatchesSelector}function s(a,c){for(;a&&a.nodeType!==o;){if(typeof a.matches=="function"&&a.matches(c))return a;a=a.parentNode}}n.exports=s},438:function(n,o,i){var s=i(828);function a(u,p,m,d,h){var v=f.apply(this,arguments);return u.addEventListener(m,v,h),{destroy:function(){u.removeEventListener(m,v,h)}}}function c(u,p,m,d,h){return typeof u.addEventListener=="function"?a.apply(null,arguments):typeof m=="function"?a.bind(null,document).apply(null,arguments):(typeof u=="string"&&(u=document.querySelectorAll(u)),Array.prototype.map.call(u,function(v){return a(v,p,m,d,h)}))}function f(u,p,m,d){return function(h){h.delegateTarget=s(h.target,p),h.delegateTarget&&d.call(u,h)}}n.exports=c},879:function(n,o){o.node=function(i){return i!==void 0&&i instanceof HTMLElement&&i.nodeType===1},o.nodeList=function(i){var s=Object.prototype.toString.call(i);return i!==void 0&&(s==="[object NodeList]"||s==="[object HTMLCollection]")&&"length"in i&&(i.length===0||o.node(i[0]))},o.string=function(i){return typeof i=="string"||i instanceof String},o.fn=function(i){var s=Object.prototype.toString.call(i);return s==="[object Function]"}},370:function(n,o,i){var s=i(879),a=i(438);function c(m,d,h){if(!m&&!d&&!h)throw new Error("Missing required arguments");if(!s.string(d))throw new TypeError("Second argument must be a String");if(!s.fn(h))throw new TypeError("Third argument must be a Function");if(s.node(m))return f(m,d,h);if(s.nodeList(m))return u(m,d,h);if(s.string(m))return p(m,d,h);throw new TypeError("First argument must be a String, HTMLElement, HTMLCollection, or NodeList")}function f(m,d,h){return m.addEventListener(d,h),{destroy:function(){m.removeEventListener(d,h)}}}function u(m,d,h){return Array.prototype.forEach.call(m,function(v){v.addEventListener(d,h)}),{destroy:function(){Array.prototype.forEach.call(m,function(v){v.removeEventListener(d,h)})}}}function p(m,d,h){return a(document.body,m,d,h)}n.exports=c},817:function(n){function o(i){var s;if(i.nodeName==="SELECT")i.focus(),s=i.value;else if(i.nodeName==="INPUT"||i.nodeName==="TEXTAREA"){var a=i.hasAttribute("readonly");a||i.setAttribute("readonly",""),i.select(),i.setSelectionRange(0,i.value.length),a||i.removeAttribute("readonly"),s=i.value}else{i.hasAttribute("contenteditable")&&i.focus();var c=window.getSelection(),f=document.createRange();f.selectNodeContents(i),c.removeAllRanges(),c.addRange(f),s=c.toString()}return s}n.exports=o},279:function(n){function o(){}o.prototype={on:function(i,s,a){var c=this.e||(this.e={});return(c[i]||(c[i]=[])).push({fn:s,ctx:a}),this},once:function(i,s,a){var c=this;function f(){c.off(i,f),s.apply(a,arguments)}return f._=s,this.on(i,f,a)},emit:function(i){var s=[].slice.call(arguments,1),a=((this.e||(this.e={}))[i]||[]).slice(),c=0,f=a.length;for(c;c{"use strict";/*! + * escape-html + * Copyright(c) 2012-2013 TJ Holowaychuk + * Copyright(c) 2015 Andreas Lubbe + * Copyright(c) 2015 Tiancheng "Timothy" Gu + * MIT Licensed + */var ns=/["'&<>]/;Go.exports=os;function os(e){var t=""+e,r=ns.exec(t);if(!r)return t;var n,o="",i=0,s=0;for(i=r.index;i0&&i[i.length-1])&&(f[0]===6||f[0]===2)){r=0;continue}if(f[0]===3&&(!i||f[1]>i[0]&&f[1]=e.length&&(e=void 0),{value:e&&e[n++],done:!e}}};throw new TypeError(t?"Object is not iterable.":"Symbol.iterator is not defined.")}function W(e,t){var r=typeof Symbol=="function"&&e[Symbol.iterator];if(!r)return e;var n=r.call(e),o,i=[],s;try{for(;(t===void 0||t-- >0)&&!(o=n.next()).done;)i.push(o.value)}catch(a){s={error:a}}finally{try{o&&!o.done&&(r=n.return)&&r.call(n)}finally{if(s)throw s.error}}return i}function D(e,t,r){if(r||arguments.length===2)for(var n=0,o=t.length,i;n1||a(m,d)})})}function a(m,d){try{c(n[m](d))}catch(h){p(i[0][3],h)}}function c(m){m.value instanceof et?Promise.resolve(m.value.v).then(f,u):p(i[0][2],m)}function f(m){a("next",m)}function u(m){a("throw",m)}function p(m,d){m(d),i.shift(),i.length&&a(i[0][0],i[0][1])}}function ln(e){if(!Symbol.asyncIterator)throw new TypeError("Symbol.asyncIterator is not defined.");var t=e[Symbol.asyncIterator],r;return t?t.call(e):(e=typeof Ee=="function"?Ee(e):e[Symbol.iterator](),r={},n("next"),n("throw"),n("return"),r[Symbol.asyncIterator]=function(){return this},r);function n(i){r[i]=e[i]&&function(s){return new Promise(function(a,c){s=e[i](s),o(a,c,s.done,s.value)})}}function o(i,s,a,c){Promise.resolve(c).then(function(f){i({value:f,done:a})},s)}}function C(e){return typeof e=="function"}function at(e){var t=function(n){Error.call(n),n.stack=new Error().stack},r=e(t);return r.prototype=Object.create(Error.prototype),r.prototype.constructor=r,r}var It=at(function(e){return function(r){e(this),this.message=r?r.length+` errors occurred during unsubscription: +`+r.map(function(n,o){return o+1+") "+n.toString()}).join(` + `):"",this.name="UnsubscriptionError",this.errors=r}});function Ve(e,t){if(e){var r=e.indexOf(t);0<=r&&e.splice(r,1)}}var Ie=function(){function e(t){this.initialTeardown=t,this.closed=!1,this._parentage=null,this._finalizers=null}return e.prototype.unsubscribe=function(){var t,r,n,o,i;if(!this.closed){this.closed=!0;var s=this._parentage;if(s)if(this._parentage=null,Array.isArray(s))try{for(var a=Ee(s),c=a.next();!c.done;c=a.next()){var f=c.value;f.remove(this)}}catch(v){t={error:v}}finally{try{c&&!c.done&&(r=a.return)&&r.call(a)}finally{if(t)throw t.error}}else s.remove(this);var u=this.initialTeardown;if(C(u))try{u()}catch(v){i=v instanceof It?v.errors:[v]}var p=this._finalizers;if(p){this._finalizers=null;try{for(var m=Ee(p),d=m.next();!d.done;d=m.next()){var h=d.value;try{mn(h)}catch(v){i=i!=null?i:[],v instanceof It?i=D(D([],W(i)),W(v.errors)):i.push(v)}}}catch(v){n={error:v}}finally{try{d&&!d.done&&(o=m.return)&&o.call(m)}finally{if(n)throw n.error}}}if(i)throw new It(i)}},e.prototype.add=function(t){var r;if(t&&t!==this)if(this.closed)mn(t);else{if(t instanceof e){if(t.closed||t._hasParent(this))return;t._addParent(this)}(this._finalizers=(r=this._finalizers)!==null&&r!==void 0?r:[]).push(t)}},e.prototype._hasParent=function(t){var r=this._parentage;return r===t||Array.isArray(r)&&r.includes(t)},e.prototype._addParent=function(t){var r=this._parentage;this._parentage=Array.isArray(r)?(r.push(t),r):r?[r,t]:t},e.prototype._removeParent=function(t){var r=this._parentage;r===t?this._parentage=null:Array.isArray(r)&&Ve(r,t)},e.prototype.remove=function(t){var r=this._finalizers;r&&Ve(r,t),t instanceof e&&t._removeParent(this)},e.EMPTY=function(){var t=new e;return t.closed=!0,t}(),e}();var Sr=Ie.EMPTY;function jt(e){return e instanceof Ie||e&&"closed"in e&&C(e.remove)&&C(e.add)&&C(e.unsubscribe)}function mn(e){C(e)?e():e.unsubscribe()}var Le={onUnhandledError:null,onStoppedNotification:null,Promise:void 0,useDeprecatedSynchronousErrorHandling:!1,useDeprecatedNextContext:!1};var st={setTimeout:function(e,t){for(var r=[],n=2;n0},enumerable:!1,configurable:!0}),t.prototype._trySubscribe=function(r){return this._throwIfClosed(),e.prototype._trySubscribe.call(this,r)},t.prototype._subscribe=function(r){return this._throwIfClosed(),this._checkFinalizedStatuses(r),this._innerSubscribe(r)},t.prototype._innerSubscribe=function(r){var n=this,o=this,i=o.hasError,s=o.isStopped,a=o.observers;return i||s?Sr:(this.currentObservers=null,a.push(r),new Ie(function(){n.currentObservers=null,Ve(a,r)}))},t.prototype._checkFinalizedStatuses=function(r){var n=this,o=n.hasError,i=n.thrownError,s=n.isStopped;o?r.error(i):s&&r.complete()},t.prototype.asObservable=function(){var r=new F;return r.source=this,r},t.create=function(r,n){return new En(r,n)},t}(F);var En=function(e){ie(t,e);function t(r,n){var o=e.call(this)||this;return o.destination=r,o.source=n,o}return t.prototype.next=function(r){var n,o;(o=(n=this.destination)===null||n===void 0?void 0:n.next)===null||o===void 0||o.call(n,r)},t.prototype.error=function(r){var n,o;(o=(n=this.destination)===null||n===void 0?void 0:n.error)===null||o===void 0||o.call(n,r)},t.prototype.complete=function(){var r,n;(n=(r=this.destination)===null||r===void 0?void 0:r.complete)===null||n===void 0||n.call(r)},t.prototype._subscribe=function(r){var n,o;return(o=(n=this.source)===null||n===void 0?void 0:n.subscribe(r))!==null&&o!==void 0?o:Sr},t}(x);var Et={now:function(){return(Et.delegate||Date).now()},delegate:void 0};var wt=function(e){ie(t,e);function t(r,n,o){r===void 0&&(r=1/0),n===void 0&&(n=1/0),o===void 0&&(o=Et);var i=e.call(this)||this;return i._bufferSize=r,i._windowTime=n,i._timestampProvider=o,i._buffer=[],i._infiniteTimeWindow=!0,i._infiniteTimeWindow=n===1/0,i._bufferSize=Math.max(1,r),i._windowTime=Math.max(1,n),i}return t.prototype.next=function(r){var n=this,o=n.isStopped,i=n._buffer,s=n._infiniteTimeWindow,a=n._timestampProvider,c=n._windowTime;o||(i.push(r),!s&&i.push(a.now()+c)),this._trimBuffer(),e.prototype.next.call(this,r)},t.prototype._subscribe=function(r){this._throwIfClosed(),this._trimBuffer();for(var n=this._innerSubscribe(r),o=this,i=o._infiniteTimeWindow,s=o._buffer,a=s.slice(),c=0;c0?e.prototype.requestAsyncId.call(this,r,n,o):(r.actions.push(this),r._scheduled||(r._scheduled=ut.requestAnimationFrame(function(){return r.flush(void 0)})))},t.prototype.recycleAsyncId=function(r,n,o){var i;if(o===void 0&&(o=0),o!=null?o>0:this.delay>0)return e.prototype.recycleAsyncId.call(this,r,n,o);var s=r.actions;n!=null&&((i=s[s.length-1])===null||i===void 0?void 0:i.id)!==n&&(ut.cancelAnimationFrame(n),r._scheduled=void 0)},t}(Wt);var Tn=function(e){ie(t,e);function t(){return e!==null&&e.apply(this,arguments)||this}return t.prototype.flush=function(r){this._active=!0;var n=this._scheduled;this._scheduled=void 0;var o=this.actions,i;r=r||o.shift();do if(i=r.execute(r.state,r.delay))break;while((r=o[0])&&r.id===n&&o.shift());if(this._active=!1,i){for(;(r=o[0])&&r.id===n&&o.shift();)r.unsubscribe();throw i}},t}(Dt);var Te=new Tn(Sn);var _=new F(function(e){return e.complete()});function Vt(e){return e&&C(e.schedule)}function Cr(e){return e[e.length-1]}function Ye(e){return C(Cr(e))?e.pop():void 0}function Oe(e){return Vt(Cr(e))?e.pop():void 0}function zt(e,t){return typeof Cr(e)=="number"?e.pop():t}var pt=function(e){return e&&typeof e.length=="number"&&typeof e!="function"};function Nt(e){return C(e==null?void 0:e.then)}function qt(e){return C(e[ft])}function Kt(e){return Symbol.asyncIterator&&C(e==null?void 0:e[Symbol.asyncIterator])}function Qt(e){return new TypeError("You provided "+(e!==null&&typeof e=="object"?"an invalid object":"'"+e+"'")+" where a stream was expected. You can provide an Observable, Promise, ReadableStream, Array, AsyncIterable, or Iterable.")}function Ni(){return typeof Symbol!="function"||!Symbol.iterator?"@@iterator":Symbol.iterator}var Yt=Ni();function Gt(e){return C(e==null?void 0:e[Yt])}function Bt(e){return pn(this,arguments,function(){var r,n,o,i;return $t(this,function(s){switch(s.label){case 0:r=e.getReader(),s.label=1;case 1:s.trys.push([1,,9,10]),s.label=2;case 2:return[4,et(r.read())];case 3:return n=s.sent(),o=n.value,i=n.done,i?[4,et(void 0)]:[3,5];case 4:return[2,s.sent()];case 5:return[4,et(o)];case 6:return[4,s.sent()];case 7:return s.sent(),[3,2];case 8:return[3,10];case 9:return r.releaseLock(),[7];case 10:return[2]}})})}function Jt(e){return C(e==null?void 0:e.getReader)}function U(e){if(e instanceof F)return e;if(e!=null){if(qt(e))return qi(e);if(pt(e))return Ki(e);if(Nt(e))return Qi(e);if(Kt(e))return On(e);if(Gt(e))return Yi(e);if(Jt(e))return Gi(e)}throw Qt(e)}function qi(e){return new F(function(t){var r=e[ft]();if(C(r.subscribe))return r.subscribe(t);throw new TypeError("Provided object does not correctly implement Symbol.observable")})}function Ki(e){return new F(function(t){for(var r=0;r=2;return function(n){return n.pipe(e?A(function(o,i){return e(o,i,n)}):de,ge(1),r?He(t):Vn(function(){return new Zt}))}}function zn(){for(var e=[],t=0;t=2,!0))}function pe(e){e===void 0&&(e={});var t=e.connector,r=t===void 0?function(){return new x}:t,n=e.resetOnError,o=n===void 0?!0:n,i=e.resetOnComplete,s=i===void 0?!0:i,a=e.resetOnRefCountZero,c=a===void 0?!0:a;return function(f){var u,p,m,d=0,h=!1,v=!1,G=function(){p==null||p.unsubscribe(),p=void 0},oe=function(){G(),u=m=void 0,h=v=!1},N=function(){var T=u;oe(),T==null||T.unsubscribe()};return y(function(T,Qe){d++,!v&&!h&&G();var De=m=m!=null?m:r();Qe.add(function(){d--,d===0&&!v&&!h&&(p=$r(N,c))}),De.subscribe(Qe),!u&&d>0&&(u=new rt({next:function($e){return De.next($e)},error:function($e){v=!0,G(),p=$r(oe,o,$e),De.error($e)},complete:function(){h=!0,G(),p=$r(oe,s),De.complete()}}),U(T).subscribe(u))})(f)}}function $r(e,t){for(var r=[],n=2;ne.next(document)),e}function K(e,t=document){return Array.from(t.querySelectorAll(e))}function z(e,t=document){let r=ce(e,t);if(typeof r=="undefined")throw new ReferenceError(`Missing element: expected "${e}" to be present`);return r}function ce(e,t=document){return t.querySelector(e)||void 0}function _e(){return document.activeElement instanceof HTMLElement&&document.activeElement||void 0}function tr(e){return L(b(document.body,"focusin"),b(document.body,"focusout")).pipe(ke(1),l(()=>{let t=_e();return typeof t!="undefined"?e.contains(t):!1}),V(e===_e()),B())}function Xe(e){return{x:e.offsetLeft,y:e.offsetTop}}function Qn(e){return L(b(window,"load"),b(window,"resize")).pipe(Ce(0,Te),l(()=>Xe(e)),V(Xe(e)))}function rr(e){return{x:e.scrollLeft,y:e.scrollTop}}function dt(e){return L(b(e,"scroll"),b(window,"resize")).pipe(Ce(0,Te),l(()=>rr(e)),V(rr(e)))}var Gn=function(){if(typeof Map!="undefined")return Map;function e(t,r){var n=-1;return t.some(function(o,i){return o[0]===r?(n=i,!0):!1}),n}return function(){function t(){this.__entries__=[]}return Object.defineProperty(t.prototype,"size",{get:function(){return this.__entries__.length},enumerable:!0,configurable:!0}),t.prototype.get=function(r){var n=e(this.__entries__,r),o=this.__entries__[n];return o&&o[1]},t.prototype.set=function(r,n){var o=e(this.__entries__,r);~o?this.__entries__[o][1]=n:this.__entries__.push([r,n])},t.prototype.delete=function(r){var n=this.__entries__,o=e(n,r);~o&&n.splice(o,1)},t.prototype.has=function(r){return!!~e(this.__entries__,r)},t.prototype.clear=function(){this.__entries__.splice(0)},t.prototype.forEach=function(r,n){n===void 0&&(n=null);for(var o=0,i=this.__entries__;o0},e.prototype.connect_=function(){!Dr||this.connected_||(document.addEventListener("transitionend",this.onTransitionEnd_),window.addEventListener("resize",this.refresh),ga?(this.mutationsObserver_=new MutationObserver(this.refresh),this.mutationsObserver_.observe(document,{attributes:!0,childList:!0,characterData:!0,subtree:!0})):(document.addEventListener("DOMSubtreeModified",this.refresh),this.mutationEventsAdded_=!0),this.connected_=!0)},e.prototype.disconnect_=function(){!Dr||!this.connected_||(document.removeEventListener("transitionend",this.onTransitionEnd_),window.removeEventListener("resize",this.refresh),this.mutationsObserver_&&this.mutationsObserver_.disconnect(),this.mutationEventsAdded_&&document.removeEventListener("DOMSubtreeModified",this.refresh),this.mutationsObserver_=null,this.mutationEventsAdded_=!1,this.connected_=!1)},e.prototype.onTransitionEnd_=function(t){var r=t.propertyName,n=r===void 0?"":r,o=va.some(function(i){return!!~n.indexOf(i)});o&&this.refresh()},e.getInstance=function(){return this.instance_||(this.instance_=new e),this.instance_},e.instance_=null,e}(),Bn=function(e,t){for(var r=0,n=Object.keys(t);r0},e}(),Xn=typeof WeakMap!="undefined"?new WeakMap:new Gn,Zn=function(){function e(t){if(!(this instanceof e))throw new TypeError("Cannot call a class as a function.");if(!arguments.length)throw new TypeError("1 argument required, but only 0 present.");var r=ya.getInstance(),n=new Aa(t,r,this);Xn.set(this,n)}return e}();["observe","unobserve","disconnect"].forEach(function(e){Zn.prototype[e]=function(){var t;return(t=Xn.get(this))[e].apply(t,arguments)}});var Ca=function(){return typeof nr.ResizeObserver!="undefined"?nr.ResizeObserver:Zn}(),eo=Ca;var to=new x,Ra=$(()=>k(new eo(e=>{for(let t of e)to.next(t)}))).pipe(g(e=>L(ze,k(e)).pipe(R(()=>e.disconnect()))),J(1));function he(e){return{width:e.offsetWidth,height:e.offsetHeight}}function ye(e){return Ra.pipe(S(t=>t.observe(e)),g(t=>to.pipe(A(({target:r})=>r===e),R(()=>t.unobserve(e)),l(()=>he(e)))),V(he(e)))}function bt(e){return{width:e.scrollWidth,height:e.scrollHeight}}function ar(e){let t=e.parentElement;for(;t&&(e.scrollWidth<=t.scrollWidth&&e.scrollHeight<=t.scrollHeight);)t=(e=t).parentElement;return t?e:void 0}var ro=new x,ka=$(()=>k(new IntersectionObserver(e=>{for(let t of e)ro.next(t)},{threshold:0}))).pipe(g(e=>L(ze,k(e)).pipe(R(()=>e.disconnect()))),J(1));function sr(e){return ka.pipe(S(t=>t.observe(e)),g(t=>ro.pipe(A(({target:r})=>r===e),R(()=>t.unobserve(e)),l(({isIntersecting:r})=>r))))}function no(e,t=16){return dt(e).pipe(l(({y:r})=>{let n=he(e),o=bt(e);return r>=o.height-n.height-t}),B())}var cr={drawer:z("[data-md-toggle=drawer]"),search:z("[data-md-toggle=search]")};function oo(e){return cr[e].checked}function Ke(e,t){cr[e].checked!==t&&cr[e].click()}function Ue(e){let t=cr[e];return b(t,"change").pipe(l(()=>t.checked),V(t.checked))}function Ha(e,t){switch(e.constructor){case HTMLInputElement:return e.type==="radio"?/^Arrow/.test(t):!0;case HTMLSelectElement:case HTMLTextAreaElement:return!0;default:return e.isContentEditable}}function Pa(){return L(b(window,"compositionstart").pipe(l(()=>!0)),b(window,"compositionend").pipe(l(()=>!1))).pipe(V(!1))}function io(){let e=b(window,"keydown").pipe(A(t=>!(t.metaKey||t.ctrlKey)),l(t=>({mode:oo("search")?"search":"global",type:t.key,claim(){t.preventDefault(),t.stopPropagation()}})),A(({mode:t,type:r})=>{if(t==="global"){let n=_e();if(typeof n!="undefined")return!Ha(n,r)}return!0}),pe());return Pa().pipe(g(t=>t?_:e))}function le(){return new URL(location.href)}function ot(e){location.href=e.href}function ao(){return new x}function so(e,t){if(typeof t=="string"||typeof t=="number")e.innerHTML+=t.toString();else if(t instanceof Node)e.appendChild(t);else if(Array.isArray(t))for(let r of t)so(e,r)}function M(e,t,...r){let n=document.createElement(e);if(t)for(let o of Object.keys(t))typeof t[o]!="undefined"&&(typeof t[o]!="boolean"?n.setAttribute(o,t[o]):n.setAttribute(o,""));for(let o of r)so(n,o);return n}function fr(e){if(e>999){let t=+((e-950)%1e3>99);return`${((e+1e-6)/1e3).toFixed(t)}k`}else return e.toString()}function co(){return location.hash.substring(1)}function Vr(e){let t=M("a",{href:e});t.addEventListener("click",r=>r.stopPropagation()),t.click()}function $a(e){return L(b(window,"hashchange"),e).pipe(l(co),V(co()),A(t=>t.length>0),J(1))}function fo(e){return $a(e).pipe(l(t=>ce(`[id="${t}"]`)),A(t=>typeof t!="undefined"))}function zr(e){let t=matchMedia(e);return er(r=>t.addListener(()=>r(t.matches))).pipe(V(t.matches))}function uo(){let e=matchMedia("print");return L(b(window,"beforeprint").pipe(l(()=>!0)),b(window,"afterprint").pipe(l(()=>!1))).pipe(V(e.matches))}function Nr(e,t){return e.pipe(g(r=>r?t():_))}function ur(e,t={credentials:"same-origin"}){return ue(fetch(`${e}`,t)).pipe(fe(()=>_),g(r=>r.status!==200?Tt(()=>new Error(r.statusText)):k(r)))}function We(e,t){return ur(e,t).pipe(g(r=>r.json()),J(1))}function po(e,t){let r=new DOMParser;return ur(e,t).pipe(g(n=>n.text()),l(n=>r.parseFromString(n,"text/xml")),J(1))}function pr(e){let t=M("script",{src:e});return $(()=>(document.head.appendChild(t),L(b(t,"load"),b(t,"error").pipe(g(()=>Tt(()=>new ReferenceError(`Invalid script: ${e}`))))).pipe(l(()=>{}),R(()=>document.head.removeChild(t)),ge(1))))}function lo(){return{x:Math.max(0,scrollX),y:Math.max(0,scrollY)}}function mo(){return L(b(window,"scroll",{passive:!0}),b(window,"resize",{passive:!0})).pipe(l(lo),V(lo()))}function ho(){return{width:innerWidth,height:innerHeight}}function bo(){return b(window,"resize",{passive:!0}).pipe(l(ho),V(ho()))}function vo(){return Q([mo(),bo()]).pipe(l(([e,t])=>({offset:e,size:t})),J(1))}function lr(e,{viewport$:t,header$:r}){let n=t.pipe(Z("size")),o=Q([n,r]).pipe(l(()=>Xe(e)));return Q([r,t,o]).pipe(l(([{height:i},{offset:s,size:a},{x:c,y:f}])=>({offset:{x:s.x-c,y:s.y-f+i},size:a})))}(()=>{function e(n,o){parent.postMessage(n,o||"*")}function t(...n){return n.reduce((o,i)=>o.then(()=>new Promise(s=>{let a=document.createElement("script");a.src=i,a.onload=s,document.body.appendChild(a)})),Promise.resolve())}var r=class extends EventTarget{constructor(n){super(),this.url=n,this.m=i=>{i.source===this.w&&(this.dispatchEvent(new MessageEvent("message",{data:i.data})),this.onmessage&&this.onmessage(i))},this.e=(i,s,a,c,f)=>{if(s===`${this.url}`){let u=new ErrorEvent("error",{message:i,filename:s,lineno:a,colno:c,error:f});this.dispatchEvent(u),this.onerror&&this.onerror(u)}};let o=document.createElement("iframe");o.hidden=!0,document.body.appendChild(this.iframe=o),this.w.document.open(),this.w.document.write(`

Changelog

CARLISLE 2.6.0

Bug fixes

  • Bug fixes for DESeq (#127, @epehrsson)
  • Removes single-sample group check for DESeq.
  • Increases memory for DESeq.
  • Ensures control replicate number is an integer.
  • Fixes FDR cutoff misassigned to log2FC cutoff.
  • Fixes no_dedup variable names in library normalization scripts.
  • Fig bug that added nonexistent directories to the singularity bind paths. (#135, @kelly-sovacool)
  • Containerize rules that require R (deseq, go_enrichment, and spikein_assessment) to fix installation issues with common R library path. (#129, @kelly-sovacool)
    • The Rlib_dir and Rpkg_config config options have been removed as they are no longer needed.

New features

  • New visualizations: (#132, @epehrsson)
  • New rules cov_correlation, homer_enrich, combine_homer, count_peaks
  • Add peak caller to MACS2 peak xls filename
  • New parameters in the config file to make certain rules optional: (#133, @kelly-sovacool)
  • GO enrichment is controlled by run_go_enrichment (default: false)
  • ROSE is controlled by run_rose (default: false)
  • New --singcache argument to provide a singularity cache dir location. The singularity cache dir is automatically set inside /data/$USER/ or $WORKDIR/ if --singcache is not provided. (#143, @kelly-sovacool)

Misc

CARLISLE v2.5.0

  • Refactors R packages to a common source location (#118, @slsevilla)
  • Adds a --force flag to allow for re-initialization of a workdir (#97, @slsevilla)
  • Fixes error with testrun in DESEQ2 (#113, @slsevilla)
  • Decreases the number of samples being run with testrun, essentially running tinytest as default and removing tinytest as an option (#115, @slsevilla)
  • Reads version from VERSION file instead of github repo link (#96, #112, @slsevilla)
  • Added a CHANGELOG (#116, @slsevilla)
  • Fix: RNA report bug, caused by hard-coding of PC1-3, when only PC1-2 were generated (#104, @slsevilla)
  • Minor documentation improvements. (#100, @kelly-sovacool)
  • Fix: allow printing the version or help message even if singularity is not in the path. (#110, @kelly-sovacool)

CARLISLE v2.4.1

CARLISLE v2.4.0

  • Feature- Merged Yue's fork, adding DEEPTOOLS by @slsevilla in #85
  • Feature- Added tracking features from SPOOK by @slsevilla in #88
  • Feature - Dev test run completed by @slsevilla in #89
  • Bug - Fixed bugs related to Biowulf transition

CARLISLE v2.1.0

  • enhancement
  • update gopeaks resources
  • change SEACR to run "norm" without spikein controls, "non" with spikein controls
  • update docs for changes; provide extra troubleshooting guidance
  • fix GoEnrich bug for failed plots

CARLISLE v2.0.1

  • fix error when contrasts set to "N"
  • adjust goenrich resources to be more efficient

CARLISLE 2.0.0

  • Add a MAPQ filter to samtools (rule align)
  • Add GoPeaks MultiQC module
  • Allow for library normalization to occur during first pass
  • Add --broad-cutoff to MACS2 broad peak calling for MACS2
  • Create a spike in QC report
  • Reorganize file structure to help with qthreshold folder
  • Update variable names of all peak caller
  • Merge rules with input/output/wildcard congruency
  • Convert the "spiked" variable to "norm_method
  • Add name of control used to MACS2 peaks
  • Running extra control:sample comparisons that are not needed
  • improved resource allocation
  • test data originally included 1475 jobs, this version includes 1087 jobs (reduction of 25%) despite including additional features
  • moved ~12% of all jobs to local deployment (within SLURM submission)

CARLISLE 1.2.0

  • merge increases to resources; update workflow img, contributions

CARLISLE 1.1.1

  • patch for gz bigbed bug

CARLISLE 1.1.0

  • add broad-cutoff to macs2 broad peaks param settings
  • add non.stringent and non.relaxed to annotation options
  • merge DESEQ and DESEQ2 rules together
  • identify some files as temp

CARLISLE 1.0.1

  • contains patch for DESEQ error with non hs1 reference samples

Last update: 2024-09-12
\ No newline at end of file diff --git a/2.6/contributing/index.html b/2.6/contributing/index.html new file mode 100644 index 0000000..47b4438 --- /dev/null +++ b/2.6/contributing/index.html @@ -0,0 +1,18 @@ + Contributing to CARLISLE - CARLISLE

Contributing to CARLISLE

Proposing changes with issues

If you want to make a change, it's a good idea to first open an issue and make sure someone from the team agrees that it’s needed.

If you've decided to work on an issue, assign yourself to the issue so others will know you're working on it.

Pull request process

We use GitHub Flow as our collaboration process. Follow the steps below for detailed instructions on contributing changes to CARLISLE.

GitHub Flow diagram

Clone the repo

If you are a member of CCBR, you can clone this repository to your computer or development environment. Otherwise, you will first need to fork the repo and clone your fork. You only need to do this step once.

git clone https://github.com/CCBR/CARLISLE
+

Cloning into 'CARLISLE'...
remote: Enumerating objects: 1136, done.
remote: Counting objects: 100% (463/463), done.
remote: Compressing objects: 100% (357/357), done.
remote: Total 1136 (delta 149), reused 332 (delta 103), pack-reused 673
Receiving objects: 100% (1136/1136), 11.01 MiB | 9.76 MiB/s, done.
Resolving deltas: 100% (530/530), done.

cd CARLISLE
+

If this is your first time cloning the repo, you may need to install dependencies

  • Install snakemake and singularity or docker if needed (biowulf already has these available as modules).

  • Install the python dependencies with pip

pip install .
+

If you're developing on biowulf, you can use our shared conda environment which already has these dependencies installed

. "/data/CCBR_Pipeliner/db/PipeDB/Conda/etc/profile.d/conda.sh"
+conda activate py311
+
  • Install pre-commit if you don't already have it. Then from the repo's root directory, run
pre-commit install
+

This will install the repo's pre-commit hooks. You'll only need to do this step the first time you clone the repo.

Create a branch

Create a Git branch for your pull request (PR). Give the branch a descriptive name for the changes you will make, such as iss-10 if it is for a specific issue.

# create a new branch and switch to it
+git branch iss-10
+git switch iss-10
+

Switched to a new branch 'iss-10'

Make your changes

Edit the code, write and run tests, and update the documentation as needed.

test

Changes to the python package code will also need unit tests to demonstrate that the changes work as intended. We write unit tests with pytest and store them in the tests/ subdirectory. Run the tests with python -m pytest.

If you change the workflow, please run the workflow with the test profile and make sure your new feature or bug fix works as intended.

document

If you have added a new feature or changed the API of an existing feature, you will likely need to update the documentation in docs/.

Commit and push your changes

If you're not sure how often you should commit or what your commits should consist of, we recommend following the "atomic commits" principle where each commit contains one new feature, fix, or task. Learn more about atomic commits here: https://www.freshconsulting.com/insights/blog/atomic-commits/

First, add the files that you changed to the staging area:

git add path/to/changed/files/
+

Then make the commit. Your commit message should follow the Conventional Commits specification. Briefly, each commit should start with one of the approved types such as feat, fix, docs, etc. followed by a description of the commit. Take a look at the Conventional Commits specification for more detailed information about how to write commit messages.

git commit -m 'feat: create function for awesome feature'
+

pre-commit will enforce that your commit message and the code changes are styled correctly and will attempt to make corrections if needed.

Check for added large files..............................................Passed
Fix End of Files.........................................................Passed
Trim Trailing Whitespace.................................................Failed

  • hook id: trailing-whitespace
  • exit code: 1
  • files were modified by this hook
    >
    Fixing path/to/changed/files/file.txt
    >
    codespell................................................................Passed
    style-files..........................................(no files to check)Skipped
    readme-rmd-rendered..................................(no files to check)Skipped
    use-tidy-description.................................(no files to check)Skipped

In the example above, one of the hooks modified a file in the proposed commit, so the pre-commit check failed. You can run git diff to see the changes that pre-commit made and git status to see which files were modified. To proceed with the commit, re-add the modified file(s) and re-run the commit command:

git add path/to/changed/files/file.txt
+git commit -m 'feat: create function for awesome feature'
+

This time, all the hooks either passed or were skipped (e.g. hooks that only run on R code will not run if no R files were committed). When the pre-commit check is successful, the usual commit success message will appear after the pre-commit messages showing that the commit was created.

Check for added large files..............................................Passed
Fix End of Files.........................................................Passed
Trim Trailing Whitespace.................................................Passed
codespell................................................................Passed
style-files..........................................(no files to check)Skipped
readme-rmd-rendered..................................(no files to check)Skipped
use-tidy-description.................................(no files to check)Skipped
Conventional Commit......................................................Passed
> [iss-10 9ff256e] feat: create function for awesome feature
1 file changed, 22 insertions(+), 3 deletions(-)

Finally, push your changes to GitHub:

git push
+

If this is the first time you are pushing this branch, you may have to explicitly set the upstream branch:

git push --set-upstream origin iss-10
+

Enumerating objects: 7, done.
Counting objects: 100% (7/7), done.
Delta compression using up to 10 threads
Compressing objects: 100% (4/4), done.
Writing objects: 100% (4/4), 648 bytes | 648.00 KiB/s, done.
Total 4 (delta 3), reused 0 (delta 0), pack-reused 0
remote: Resolving deltas: 100% (3/3), completed with 3 local objects.
remote:
remote: Create a pull request for 'iss-10' on GitHub by visiting:
remote: https://github.com/CCBR/CARLISLE/pull/new/iss-10
remote:
To https://github.com/CCBR/CARLISLE
>
> [new branch] iss-10 -> iss-10
branch 'iss-10' set up to track 'origin/iss-10'.

We recommend pushing your commits often so they will be backed up on GitHub. You can view the files in your branch on GitHub at https://github.com/CCBR/CARLISLE/tree/<your-branch-name> (replace <your-branch-name> with the actual name of your branch).

Create the PR

Once your branch is ready, create a PR on GitHub: https://github.com/CCBR/CARLISLE/pull/new/

Select the branch you just pushed:

Create a new PR from your branch

Edit the PR title and description. The title should briefly describe the change. Follow the comments in the template to fill out the body of the PR, and you can delete the comments (everything between <!-- and -->) as you go. Be sure to fill out the checklist, checking off items as you complete them or striking through any irrelevant items. When you're ready, click 'Create pull request' to open it.

Open the PR after editing the title and description

Optionally, you can mark the PR as a draft if you're not yet ready for it to be reviewed, then change it later when you're ready.

Wait for a maintainer to review your PR

We will do our best to follow the tidyverse code review principles: https://code-review.tidyverse.org/. The reviewer may suggest that you make changes before accepting your PR in order to improve the code quality or style. If that's the case, continue to make changes in your branch and push them to GitHub, and they will appear in the PR.

Once the PR is approved, the maintainer will merge it and the issue(s) the PR links will close automatically. Congratulations and thank you for your contribution!

After your PR has been merged

After your PR has been merged, update your local clone of the repo by switching to the main branch and pulling the latest changes:

git checkout main
+git pull
+

It's a good idea to run git pull before creating a new branch so it will start from the most recent commits in main.


Last update: 2024-09-12
\ No newline at end of file diff --git a/2.6/img/CUTandRUN_Workflow.jpeg b/2.6/img/CUTandRUN_Workflow.jpeg new file mode 100644 index 0000000..c080358 Binary files /dev/null and b/2.6/img/CUTandRUN_Workflow.jpeg differ diff --git a/2.6/index.html b/2.6/index.html new file mode 100644 index 0000000..3d6a5be --- /dev/null +++ b/2.6/index.html @@ -0,0 +1 @@ + CARLISLE
\ No newline at end of file diff --git a/2.6/requirements.txt b/2.6/requirements.txt new file mode 100644 index 0000000..9d5ddac --- /dev/null +++ b/2.6/requirements.txt @@ -0,0 +1,10 @@ +#https://pypi.org/project/mkdocs-git-revision-date-localized-plugin/ +mkdocs-git-revision-date-localized-plugin==1.2.0 +#https://pypi.org/project/mkdocs-minify-plugin/ +mkdocs-minify-plugin==0.6.4 +#https://pypi.org/project/mkdocs-git-revision-date-plugin/ +mkdocs-git-revision-date-plugin==0.3.2 +#https://pypi.org/project/mkdocs-material/ +mkdocs-material==9.1.6 +#https://pypi.org/project/mkdocs-material-extensions/ +mkdocs-material-extensions==1.1.1 diff --git a/2.6/search/search_index.json b/2.6/search/search_index.json new file mode 100644 index 0000000..3c2c94d --- /dev/null +++ b/2.6/search/search_index.json @@ -0,0 +1 @@ +{"config":{"lang":["en"],"separator":"[\\s\\-]+","pipeline":["stopWordFilter"]},"docs":[{"location":"","title":"Home","text":""},{"location":"#carlisle","title":"CARLISLE","text":"

Cut And Run anaLysIS pipeLinE

This snakemake pipeline is built to run on Biowulf.

For comments/suggestions/advice please contact CCBR_Pipeliner@mail.nih.gov.

For detailed documentation on running the pipeline view the documentation website.

"},{"location":"#workflow","title":"Workflow","text":"

The CARLISLE pipeline was developed in support of NIH Dr Vassiliki Saloura's Laboratory and Dr Javed Khan's Laboratory. It has been developed and tested solely on NIH HPC Biowulf.

"},{"location":"changelog/","title":"Changelog","text":""},{"location":"changelog/#carlisle-260","title":"CARLISLE 2.6.0","text":""},{"location":"changelog/#bug-fixes","title":"Bug fixes","text":"
  • Bug fixes for DESeq (#127, @epehrsson)
  • Removes single-sample group check for DESeq.
  • Increases memory for DESeq.
  • Ensures control replicate number is an integer.
  • Fixes FDR cutoff misassigned to log2FC cutoff.
  • Fixes no_dedup variable names in library normalization scripts.
  • Fig bug that added nonexistent directories to the singularity bind paths. (#135, @kelly-sovacool)
  • Containerize rules that require R (deseq, go_enrichment, and spikein_assessment) to fix installation issues with common R library path. (#129, @kelly-sovacool)
    • The Rlib_dir and Rpkg_config config options have been removed as they are no longer needed.
"},{"location":"changelog/#new-features","title":"New features","text":"
  • New visualizations: (#132, @epehrsson)
  • New rules cov_correlation, homer_enrich, combine_homer, count_peaks
  • Add peak caller to MACS2 peak xls filename
  • New parameters in the config file to make certain rules optional: (#133, @kelly-sovacool)
  • GO enrichment is controlled by run_go_enrichment (default: false)
  • ROSE is controlled by run_rose (default: false)
  • New --singcache argument to provide a singularity cache dir location. The singularity cache dir is automatically set inside /data/$USER/ or $WORKDIR/ if --singcache is not provided. (#143, @kelly-sovacool)
"},{"location":"changelog/#misc","title":"Misc","text":"
  • The singularity version is no longer specified, per request of the biowulf admins. (#139, @kelly-sovacool)
  • Minor documentation updates. (#146, @kelly-sovacool)
"},{"location":"changelog/#carlisle-v250","title":"CARLISLE v2.5.0","text":"
  • Refactors R packages to a common source location (#118, @slsevilla)
  • Adds a --force flag to allow for re-initialization of a workdir (#97, @slsevilla)
  • Fixes error with testrun in DESEQ2 (#113, @slsevilla)
  • Decreases the number of samples being run with testrun, essentially running tinytest as default and removing tinytest as an option (#115, @slsevilla)
  • Reads version from VERSION file instead of github repo link (#96, #112, @slsevilla)
  • Added a CHANGELOG (#116, @slsevilla)
  • Fix: RNA report bug, caused by hard-coding of PC1-3, when only PC1-2 were generated (#104, @slsevilla)
  • Minor documentation improvements. (#100, @kelly-sovacool)
  • Fix: allow printing the version or help message even if singularity is not in the path. (#110, @kelly-sovacool)
"},{"location":"changelog/#carlisle-v241","title":"CARLISLE v2.4.1","text":"
  • Add GitHub Action to add issues/PRs to personal project boards by @kelly-sovacool in #95
  • Create install script by @kelly-sovacool in #93
  • feat: use summits bed for homer input; save temporary files; fix deseq2 bug by @slsevilla in #108
  • docs: adding citation and DOI to pipeline by @slsevilla in #107
  • Test a dryrun with GitHub Actions by @kelly-sovacool in #94
"},{"location":"changelog/#carlisle-v240","title":"CARLISLE v2.4.0","text":"
  • Feature- Merged Yue's fork, adding DEEPTOOLS by @slsevilla in #85
  • Feature- Added tracking features from SPOOK by @slsevilla in #88
  • Feature - Dev test run completed by @slsevilla in #89
  • Bug - Fixed bugs related to Biowulf transition
"},{"location":"changelog/#carlisle-v210","title":"CARLISLE v2.1.0","text":"
  • enhancement
  • update gopeaks resources
  • change SEACR to run \"norm\" without spikein controls, \"non\" with spikein controls
  • update docs for changes; provide extra troubleshooting guidance
  • fix GoEnrich bug for failed plots
"},{"location":"changelog/#carlisle-v201","title":"CARLISLE v2.0.1","text":"
  • fix error when contrasts set to \"N\"
  • adjust goenrich resources to be more efficient
"},{"location":"changelog/#carlisle-200","title":"CARLISLE 2.0.0","text":"
  • Add a MAPQ filter to samtools (rule align)
  • Add GoPeaks MultiQC module
  • Allow for library normalization to occur during first pass
  • Add --broad-cutoff to MACS2 broad peak calling for MACS2
  • Create a spike in QC report
  • Reorganize file structure to help with qthreshold folder
  • Update variable names of all peak caller
  • Merge rules with input/output/wildcard congruency
  • Convert the \"spiked\" variable to \"norm_method
  • Add name of control used to MACS2 peaks
  • Running extra control:sample comparisons that are not needed
  • improved resource allocation
  • test data originally included 1475 jobs, this version includes 1087 jobs (reduction of 25%) despite including additional features
  • moved ~12% of all jobs to local deployment (within SLURM submission)
"},{"location":"changelog/#carlisle-120","title":"CARLISLE 1.2.0","text":"
  • merge increases to resources; update workflow img, contributions
"},{"location":"changelog/#carlisle-111","title":"CARLISLE 1.1.1","text":"
  • patch for gz bigbed bug
"},{"location":"changelog/#carlisle-110","title":"CARLISLE 1.1.0","text":"
  • add broad-cutoff to macs2 broad peaks param settings
  • add non.stringent and non.relaxed to annotation options
  • merge DESEQ and DESEQ2 rules together
  • identify some files as temp
"},{"location":"changelog/#carlisle-101","title":"CARLISLE 1.0.1","text":"
  • contains patch for DESEQ error with non hs1 reference samples
"},{"location":"contributing/","title":"Contributing to CARLISLE","text":""},{"location":"contributing/#proposing-changes-with-issues","title":"Proposing changes with issues","text":"

If you want to make a change, it's a good idea to first open an issue and make sure someone from the team agrees that it\u2019s needed.

If you've decided to work on an issue, assign yourself to the issue so others will know you're working on it.

"},{"location":"contributing/#pull-request-process","title":"Pull request process","text":"

We use GitHub Flow as our collaboration process. Follow the steps below for detailed instructions on contributing changes to CARLISLE.

"},{"location":"contributing/#clone-the-repo","title":"Clone the repo","text":"

If you are a member of CCBR, you can clone this repository to your computer or development environment. Otherwise, you will first need to fork the repo and clone your fork. You only need to do this step once.

git clone https://github.com/CCBR/CARLISLE\n

Cloning into 'CARLISLE'... remote: Enumerating objects: 1136, done. remote: Counting objects: 100% (463/463), done. remote: Compressing objects: 100% (357/357), done. remote: Total 1136 (delta 149), reused 332 (delta 103), pack-reused 673 Receiving objects: 100% (1136/1136), 11.01 MiB | 9.76 MiB/s, done. Resolving deltas: 100% (530/530), done.

cd CARLISLE\n
"},{"location":"contributing/#if-this-is-your-first-time-cloning-the-repo-you-may-need-to-install-dependencies","title":"If this is your first time cloning the repo, you may need to install dependencies","text":"
  • Install snakemake and singularity or docker if needed (biowulf already has these available as modules).

  • Install the python dependencies with pip

pip install .\n

If you're developing on biowulf, you can use our shared conda environment which already has these dependencies installed

. \"/data/CCBR_Pipeliner/db/PipeDB/Conda/etc/profile.d/conda.sh\"\nconda activate py311\n
  • Install pre-commit if you don't already have it. Then from the repo's root directory, run
pre-commit install\n

This will install the repo's pre-commit hooks. You'll only need to do this step the first time you clone the repo.

"},{"location":"contributing/#create-a-branch","title":"Create a branch","text":"

Create a Git branch for your pull request (PR). Give the branch a descriptive name for the changes you will make, such as iss-10 if it is for a specific issue.

# create a new branch and switch to it\ngit branch iss-10\ngit switch iss-10\n

Switched to a new branch 'iss-10'

"},{"location":"contributing/#make-your-changes","title":"Make your changes","text":"

Edit the code, write and run tests, and update the documentation as needed.

"},{"location":"contributing/#test","title":"test","text":"

Changes to the python package code will also need unit tests to demonstrate that the changes work as intended. We write unit tests with pytest and store them in the tests/ subdirectory. Run the tests with python -m pytest.

If you change the workflow, please run the workflow with the test profile and make sure your new feature or bug fix works as intended.

"},{"location":"contributing/#document","title":"document","text":"

If you have added a new feature or changed the API of an existing feature, you will likely need to update the documentation in docs/.

"},{"location":"contributing/#commit-and-push-your-changes","title":"Commit and push your changes","text":"

If you're not sure how often you should commit or what your commits should consist of, we recommend following the \"atomic commits\" principle where each commit contains one new feature, fix, or task. Learn more about atomic commits here: https://www.freshconsulting.com/insights/blog/atomic-commits/

First, add the files that you changed to the staging area:

git add path/to/changed/files/\n

Then make the commit. Your commit message should follow the Conventional Commits specification. Briefly, each commit should start with one of the approved types such as feat, fix, docs, etc. followed by a description of the commit. Take a look at the Conventional Commits specification for more detailed information about how to write commit messages.

git commit -m 'feat: create function for awesome feature'\n

pre-commit will enforce that your commit message and the code changes are styled correctly and will attempt to make corrections if needed.

Check for added large files..............................................Passed Fix End of Files.........................................................Passed Trim Trailing Whitespace.................................................Failed

  • hook id: trailing-whitespace
  • exit code: 1
  • files were modified by this hook > Fixing path/to/changed/files/file.txt > codespell................................................................Passed style-files..........................................(no files to check)Skipped readme-rmd-rendered..................................(no files to check)Skipped use-tidy-description.................................(no files to check)Skipped

In the example above, one of the hooks modified a file in the proposed commit, so the pre-commit check failed. You can run git diff to see the changes that pre-commit made and git status to see which files were modified. To proceed with the commit, re-add the modified file(s) and re-run the commit command:

git add path/to/changed/files/file.txt\ngit commit -m 'feat: create function for awesome feature'\n

This time, all the hooks either passed or were skipped (e.g. hooks that only run on R code will not run if no R files were committed). When the pre-commit check is successful, the usual commit success message will appear after the pre-commit messages showing that the commit was created.

Check for added large files..............................................Passed Fix End of Files.........................................................Passed Trim Trailing Whitespace.................................................Passed codespell................................................................Passed style-files..........................................(no files to check)Skipped readme-rmd-rendered..................................(no files to check)Skipped use-tidy-description.................................(no files to check)Skipped Conventional Commit......................................................Passed > [iss-10 9ff256e] feat: create function for awesome feature 1 file changed, 22 insertions(+), 3 deletions(-)

Finally, push your changes to GitHub:

git push\n

If this is the first time you are pushing this branch, you may have to explicitly set the upstream branch:

git push --set-upstream origin iss-10\n

Enumerating objects: 7, done. Counting objects: 100% (7/7), done. Delta compression using up to 10 threads Compressing objects: 100% (4/4), done. Writing objects: 100% (4/4), 648 bytes | 648.00 KiB/s, done. Total 4 (delta 3), reused 0 (delta 0), pack-reused 0 remote: Resolving deltas: 100% (3/3), completed with 3 local objects. remote: remote: Create a pull request for 'iss-10' on GitHub by visiting: remote: https://github.com/CCBR/CARLISLE/pull/new/iss-10 remote: To https://github.com/CCBR/CARLISLE > > [new branch] iss-10 -> iss-10 branch 'iss-10' set up to track 'origin/iss-10'.

We recommend pushing your commits often so they will be backed up on GitHub. You can view the files in your branch on GitHub at https://github.com/CCBR/CARLISLE/tree/<your-branch-name> (replace <your-branch-name> with the actual name of your branch).

"},{"location":"contributing/#create-the-pr","title":"Create the PR","text":"

Once your branch is ready, create a PR on GitHub: https://github.com/CCBR/CARLISLE/pull/new/

Select the branch you just pushed:

Edit the PR title and description. The title should briefly describe the change. Follow the comments in the template to fill out the body of the PR, and you can delete the comments (everything between <!-- and -->) as you go. Be sure to fill out the checklist, checking off items as you complete them or striking through any irrelevant items. When you're ready, click 'Create pull request' to open it.

Optionally, you can mark the PR as a draft if you're not yet ready for it to be reviewed, then change it later when you're ready.

"},{"location":"contributing/#wait-for-a-maintainer-to-review-your-pr","title":"Wait for a maintainer to review your PR","text":"

We will do our best to follow the tidyverse code review principles: https://code-review.tidyverse.org/. The reviewer may suggest that you make changes before accepting your PR in order to improve the code quality or style. If that's the case, continue to make changes in your branch and push them to GitHub, and they will appear in the PR.

Once the PR is approved, the maintainer will merge it and the issue(s) the PR links will close automatically. Congratulations and thank you for your contribution!

"},{"location":"contributing/#after-your-pr-has-been-merged","title":"After your PR has been merged","text":"

After your PR has been merged, update your local clone of the repo by switching to the main branch and pulling the latest changes:

git checkout main\ngit pull\n

It's a good idea to run git pull before creating a new branch so it will start from the most recent commits in main.

"},{"location":"contributing/#helpful-links-for-more-information","title":"Helpful links for more information","text":"
  • GitHub Flow
  • semantic versioning guidelines
  • changelog guidelines
  • tidyverse code review principles
  • reproducible examples
  • nf-core extensions for VS Code
"},{"location":"user-guide/contributions/","title":"Contributions","text":"

The following members contributed to the development of the CARLISLE pipeline:

  • Samantha Sevilla
  • Vishal Koparde
  • Hsien-chao Chou
  • Sohyoung Kim
  • Yue Hu
  • Vassiliki Saloura

VK, SS, SK, HC contributed to the generating the source code and all members contributed to the main concepts and analysis.

"},{"location":"user-guide/getting-started/","title":"Overview","text":"

The CARLISLE github repository is stored locally, and will be used for project deployment. Multiple projects can be deployed from this one point simultaneously, without concern.

"},{"location":"user-guide/getting-started/#1-getting-started","title":"1. Getting Started","text":""},{"location":"user-guide/getting-started/#11-introduction","title":"1.1 Introduction","text":"

The CARLISLE Pipelie beings with raw FASTQ files and performs trimming followed by alignment using BOWTIE2. Data is then normalized through either the use of an user-species species (IE E.Coli) spike-in control or through the determined library size. Peaks are then called using MACS2, SEACR, and GoPEAKS with various options selected by the user. Peaks are then annotated, and summarized into reports. If designated, differential analysis is performed using DESEQ2. QC reports are also generated with each project using FASTQC and MULTIQC. Annotations are added using HOMER and ROSE. GSEA Enrichment analysis predictions are added using CHIPENRICH.

The following are sub-commands used within CARLISLE:

  • initialize: initalize the pipeline
  • dryrun: predict the binding of peptides to any MHC molecule
  • cluster: execute the pipeline on the Biowulf HPC
  • local: execute a local, interactive, session
  • git: execute GitHub actions
  • unlock: unlock directory
  • DAG: create DAG report
  • report: create SNAKEMAKE report
  • runtest: copies test manifests and files to WORKDIR
"},{"location":"user-guide/getting-started/#12-setup-dependencies","title":"1.2 Setup Dependencies","text":"

CARLISLE has several dependencies listed below. These dependencies can be installed by a sysadmin. All dependencies will be automatically loaded if running from Biowulf.

  • bedtools: \"bedtools/2.30.0\"
  • bedops: \"bedops/2.4.40\"
  • bowtie2: \"bowtie/2-2.4.2\"
  • cutadapt: \"cutadapt/1.18\"
  • fastqc: \"fastqc/0.11.9\"
  • fastq_screen: \"fastq_screen/0.15.2\"
  • fastq_val: \"/data/CCBR_Pipeliner/iCLIP/bin/fastQValidator\"
  • fastxtoolkit: \"fastxtoolkit/0.0.14\"
  • gopeaks: \"github clone https://github.com/maxsonBraunLab/gopeaks\"
  • macs2: \"macs/2.2.7.1\"
  • multiqc: \"multiqc/1.9\"
  • perl: \"perl/5.34.0\"
  • picard: \"picard/2.26.9\"
  • python37: \"python/3.7\"
  • R: \"R/4.2.2\"
  • rose: \"ROSE/1.3.1\"
  • samtools: \"samtools/1.15\"
  • seacr: \"seacr/1.4-beta.2\"
  • ucsc: \"ucsc/407\"
"},{"location":"user-guide/getting-started/#13-login-to-the-cluster","title":"1.3 Login to the cluster","text":"

CARLISLE has been exclusively tested on Biowulf HPC. Login to the cluster's head node and move into the pipeline location.

# ssh into cluster's head node\nssh -Y $USER@biowulf.nih.gov\n
"},{"location":"user-guide/getting-started/#14-load-an-interactive-session","title":"1.4 Load an interactive session","text":"

An interactive session should be started before performing any of the pipeline sub-commands, even if the pipeline is to be executed on the cluster.

# Grab an interactive node\nsinteractive --time=12:00:00 --mem=8gb  --cpus-per-task=4 --pty bash\n
"},{"location":"user-guide/output/","title":"4. Expected Outputs","text":"

The following directories are created under the WORKDIR/results directory:

  • alignment_stats: this directory include information on the alignment of each sample
  • bam: this directory includes BAM files, statistics on samples, statistics on spike-in controls for each sample
  • bedgraph: this directory includes BEDGRAPH files and statistic summaries for each sample
  • bigwig: this directory includes the bigwig files for each sample
  • peaks: this directory contains a sub-directory that relates to the quality threshold used.
  • quality threshold
    • contrasts: this directory includes the contrasts for each line listed in the contrast manifest
    • peak_caller: this directory includes all peak calls from each peak_caller (SEACR, MACS2, GOPEAKS) for each sample
    • annotation
      • go_enrichment: this directory includes gene set enrichment pathway predictions when run_go_enrichment is set to true in the config file.
      • homer: this directory includes the annotation output from HOMER
      • rose: this directory includes the annotation output from ROSE when run_rose is set to true in the config file.
  • qc: this directory includes MULTIQC reports and spike-in control reports (when applicable)
\u251c\u2500\u2500 alignment_stats\n\u251c\u2500\u2500 bam\n\u251c\u2500\u2500 bedgraph\n\u251c\u2500\u2500 bigwig\n\u251c\u2500\u2500 fragments\n\u251c\u2500\u2500 peaks\n\u2502\u00a0\u00a0 \u251c\u2500\u2500 0.05\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 contrasts\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 contrast_id1.dedup_status\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 contrast_id2.dedup_status\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 gopeaks\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 annotation\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 go_enrichment\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 contrast_id1.dedup_status.go_enrichment_tables\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 contrast_id2.dedup_status.go_enrichment_html_report\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 homer\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 replicate_id1_vs_control_id.dedup_status.gopeaks_broad.motifs\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 homerResults\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 knownResults\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 replicate_id1_vs_control_id.dedup_status.gopeaks_narrow.motifs\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 homerResults\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 knownResults\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 replicate_id2_vs_control_id.dedup_status.gopeaks_broad.motifs\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 homerResults\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 knownResults\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 replicate_id2_vs_control_id.dedup_status.gopeaks_narrow.motifs\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 homerResults\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 knownResults\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 rose\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0     \u251c\u2500\u2500 replicate_id1_vs_control_id.dedup_status.gopeaks_broad.12500\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0     \u251c\u2500\u2500 replicate_id1_vs_control_id.dedup_status.gopeaks_narrow.12500\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0     \u251c\u2500\u2500 replicate_id2_vs_control_id.dedup_status.dedup.gopeaks_broad.12500\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0     \u251c\u2500\u2500 replicate_id2_vs_control_id.dedup_status.dedup.gopeaks_narrow.12500\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 peak_output\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 macs2\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 annotation\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 go_enrichment\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 contrast_id1.dedup_status.go_enrichment_tables\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 contrast_id2.dedup_status.go_enrichment_html_report\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 homer\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 replicate_id1_vs_control_id.dedup_status.macs2_narrow.motifs\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 homerResults\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 knownResults\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 replicate_id1_vs_control_id.dedup_status.macs2_broad.motifs\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 homerResults\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 knownResults\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 replicate_id2_vs_control_id.dedup_status.macs2_narrow.motifs\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 homerResults\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 knownResults\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 replicate_id2_vs_control_id.dedup_status.macs2_broad.motifs\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 homerResults\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 knownResults\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 rose\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0     \u251c\u2500\u2500 replicate_id1_vs_control_id.dedup_status.macs2_broad.12500\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0     \u251c\u2500\u2500 replicate_id1_vs_control_id.dedup_status.macs2_narrow.12500\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0     \u251c\u2500\u2500 replicate_id2_vs_control_id.dedup_status.macs2_broad.12500\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0     \u251c\u2500\u2500 replicate_id2_vs_control_id.dedup_status.macs2_narrow.12500\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 peak_output\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 seacr\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 annotation\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 go_enrichment\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 contrast_id1.dedup_status.go_enrichment_tables\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 contrast_id2.dedup_status.go_enrichment_html_report\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 homer\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 replicate_id1_vs_control_id.dedup_status.seacr_non_relaxed.motifs\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 homerResults\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 knownResults\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 replicate_id1_vs_control_id.dedup_status.seacr_non_stringent.motifs\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 homerResults\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 knownResults\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 replicate_id1_vs_control_id.dedup_status.seacr_norm_relaxed.motifs\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 homerResults\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 knownResults\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 replicate_id1_vs_control_id.dedup_status.seacr_norm_stringent.motifs\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 homerResults\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 knownResults\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 replicate_id2_vs_control_id.dedup_status.seacr_non_relaxed.motifs\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 homerResults\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 knownResults\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 replicate_id2_vs_control_id.dedup_status.seacr_non_stringent.motifs\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 homerResults\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 knownResults\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 replicate_id2_vs_control_id.dedup_status.seacr_norm_relaxed.motifs\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 homerResults\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 knownResults\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 replicate_id2_vs_control_id.dedup_status.seacr_norm_stringent.motifs\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 homerResults\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 knownResults\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 rose\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0     \u251c\u2500\u2500 replicate_id1_vs_control_id.dedup_status.seacr_non_relaxed.12500\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0     \u251c\u2500\u2500 replicate_id1_vs_control_id.dedup_status.seacr_non_stringent.12500\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0     \u251c\u2500\u2500 replicate_id1_vs_control_id.dedup_status.seacr_norm_relaxed.12500\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0     \u251c\u2500\u2500 replicate_id1_vs_control_id.dedup_status.seacr_norm_stringent.12500\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0     \u251c\u2500\u2500 replicate_id2_vs_control_id.dedup_status.seacr_non_relaxed.12500\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0     \u251c\u2500\u2500 replicate_id2_vs_control_id.dedup_status.seacr_non_stringent.12500\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0     \u251c\u2500\u2500 replicate_id2_vs_control_id.dedup_status.seacr_norm_relaxed.12500\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0     \u251c\u2500\u2500 replicate_id2_vs_control_id.dedup_status.seacr_norm_stringent.12500\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0     \u2514\u2500\u2500 peak_output\n\u2514\u2500\u2500 qc\n    \u251c\u2500\u2500 fastqc_raw\n    \u2514\u2500\u2500 fqscreen_raw\n
"},{"location":"user-guide/preparing-files/","title":"2. Preparing Files","text":"

The pipeline is controlled through editing configuration and manifest files. Defaults are found in the /WORKDIR/config and /WORKDIR/manifest directories, after initialization.

"},{"location":"user-guide/preparing-files/#21-configs","title":"2.1 Configs","text":"

The configuration files control parameters and software of the pipeline. These files are listed below:

  • config/config.yaml
  • resources/cluster.yaml
  • resources/tools.yaml
"},{"location":"user-guide/preparing-files/#211-cluster-config","title":"2.1.1 Cluster Config","text":"

The cluster configuration file dictates the resouces to be used during submission to Biowulf HPC. There are two differnt ways to control these parameters - first, to control the default settings, and second, to create or edit individual rules. These parameters should be edited with caution, after significant testing.

"},{"location":"user-guide/preparing-files/#212-tools-config","title":"2.1.2 Tools Config","text":"

The tools configuration file dictates the version of each software or program that is being used in the pipeline.

"},{"location":"user-guide/preparing-files/#213-config-yaml","title":"2.1.3 Config YAML","text":"

There are several groups of parameters that are editable for the user to control the various aspects of the pipeline. These are :

  • Folders and Paths
  • These parameters will include the input and ouput files of the pipeline, as well as list all manifest names.
  • User parameters
  • These parameters will control the pipeline features. These include thresholds and whether to perform processes.
  • References
  • These parameters will control the location of index files, spike-in references, adaptors and species calling information.
"},{"location":"user-guide/preparing-files/#2131-user-parameters","title":"2.1.3.1 User Parameters","text":""},{"location":"user-guide/preparing-files/#21311-spike-in-controls","title":"2.1.3.1.1 (Spike in Controls)","text":"

The pipeline allows for the use of a species specific spike-in control, or the use of normalization via library size. The parameter spikein_genome should be set to the species term used in spikein_reference.

For example for ecoli spike-in:

run_contrasts: true\nnorm_method: \"spikein\"\nspikein_genome: \"ecoli\"\nspikein_reference:\n  ecoli:\n    fa: \"PIPELINE_HOME/resources/spikein/Ecoli_GCF_000005845.2_ASM584v2_genomic.fna\"\n

For example for drosophila spike-in:

run_contrasts: true\nnorm_method: \"spikein\"\nspikein_genome: \"drosophila\"\nspikein_reference:\n  drosophila:\n    fa: \"/fdb/igenomes/Drosophila_melanogaster/UCSC/dm6/Sequence/WholeGenomeFasta/genome.fa\"\n

If it's determined that the amount of spike-in is not sufficient for the run, a library normaliaztion can be performed.

  1. Complete a CARLISLE run with spike-in set to \"Y\". This will allow for the complete assessment of the spike-in.
  2. Run inital QC analysis on the output data
  3. Add the alignment_stats dir to the configuration file.
  4. Re-run the CARLISLE pipeline
"},{"location":"user-guide/preparing-files/#21312-duplication-status","title":"2.1.3.1.2 Duplication Status","text":"

Users can select duplicated peaks (dedup) or non-deduplicated peaks (no_dedup) through the user parameter.

dupstatus: \"dedup, no_dedup\"\n
"},{"location":"user-guide/preparing-files/#21313-peak-caller","title":"2.1.3.1.3 Peak Caller","text":"

Three peak callers are available for deployment within the pipeline, with different settings deployed for each caller.

  1. MACS2 is available with two peak calling options: narrowPeak or broadPeak. NOTE: DESeq step generally fails for broadPeak; generally has too many calls.
peaktype: \"macs2_narrow, macs2_broad,\"\n
  1. SEACR is available with four peak calling options: stringent or relaxed parameters, to be paired with \"norm\" for samples without a spike-in control and \"non\" for samples with a spikein control
peaktype: \"seacr_stringent, seacr_relaxed\"\n
  1. GOPEAKS is available with two peak calling options: narrowpeaks or broadpeaks
peaktype: \"gopeaks_narrow, gopeaks_broad\"\n

A complete list of the available peak calling parameters and the recommended list of parameters is provided below:

Peak Caller Narrow Broad Normalized, Stringent Normalized, Relaxed Non-Normalized, Stringent Non-Normalized, Relaxed Macs2 AVAIL AVAIL NA NA NA NA SEACR NA NA AVAIL w/o SPIKEIN AVAIL w/o SPIKEIN AVAIL w/ SPIKEIN AVAIL w/ SPIKEIN GoPeaks AVAIL AVAIL NA NA NA NA
# Recommended list\n### peaktype: \"macs2_narrow, macs2_broad, gopeaks_narrow, gopeaks_broad\"\n\n# Available list\n### peaktype: \"macs2_narrow, macs2_broad, seacr_norm_stringent, seacr_norm_relaxed, seacr_non_stringent, seacr_non_relaxed, gopeaks_narrow, gopeaks_broad\"\n
"},{"location":"user-guide/preparing-files/#213131-macs2-additional-option","title":"2.1.3.1.3.1 Macs2 additional option","text":"

MACS2 can be run with or without the control. adding a control will increase peak specificity Selecting \"Y\" for the macs2_control will run the paired control sample provided in the sample manifest

"},{"location":"user-guide/preparing-files/#21314-quality-tresholds","title":"2.1.3.1.4 Quality Tresholds","text":"

Thresholds for quality can be controled through the quality_tresholds parameter. This must be a list of comma separated values. minimum of numeric value required.

  • default MACS2 qvalue is 0.05 https://manpages.ubuntu.com/manpages/xenial/man1/macs2_callpeak.1.html
  • default GOPEAKS pvalue is 0.05 https://github.com/maxsonBraunLab/gopeaks/blob/main/README.md
  • default SEACR FDR threshold 1 https://github.com/FredHutch/SEACR/blob/master/README.md
#default values\nquality_thresholds: \"0.1, 0.05, 0.01\"\n
"},{"location":"user-guide/preparing-files/#2132-references","title":"2.1.3.2 References","text":"

Additional reference files may be added to the pipeline, if other species were to be used.

The absolute file paths which must be included are:

  1. fa: \"/path/to/species.fa\"
  2. blacklist: \"/path/to/blacklistbed/species.bed\"

The following information must be included:

  1. regions: \"list of regions to be included; IE chr1 chr2 chr3\"
  2. macs2_g: \"macs2 genome shorthand; IE mm IE hs\"
"},{"location":"user-guide/preparing-files/#22-preparing-manifests","title":"2.2 Preparing Manifests","text":"

There are two manifests, one which required for all pipeliens and one that is only required if running a differential analysis. These files describe information on the samples and desired contrasts. The paths of these files are defined in the snakemake_config.yaml file. These files are:

  • samplemanifest
  • contrasts
"},{"location":"user-guide/preparing-files/#221-samples-manifest-required","title":"2.2.1 Samples Manifest (REQUIRED)","text":"

This manifest will include information to sample level information. It includes the following column headers:

  • sampleName: the sample name WITHOUT replicate number (IE \"SAMPLE\")
  • replicateNumber: the sample replicate number (IE \"1\")
  • isControl: whether the sample should be identified as a control (IE \"Y\")
  • controlName: the name of the control to use for this sample (IE \"CONTROL\")
  • controlReplicateNumber: the replicate number of the control to use for this sample (IE \"1\")
  • path_to_R1: the full path to R1 fastq file (IE \"/path/to/sample1.R1.fastq\")
  • path_to_R2: the full path to R1 fastq file (IE \"/path/to/sample2.R2.fastq\")

An example sampleManifest file is shown below:

sampleName replicateNumber isControl controlName controlReplicateNumber path_to_R1 path_to_R2 53_H3K4me3 1 N HN6_IgG_rabbit_negative_control 1 PIPELINE_HOME/.test/53_H3K4me3_1.R1.fastq.gz PIPELINE_HOME/.test/53_H3K4me3_1.R2.fastq.gz 53_H3K4me3 2 N HN6_IgG_rabbit_negative_control 1 PIPELINE_HOME/.test/53_H3K4me3_2.R1.fastq.gz PIPELINE_HOME/.test/53_H3K4me3_2.R2.fastq.gz HN6_H3K4me3 1 N HN6_IgG_rabbit_negative_control 1 PIPELINE_HOME/.test/HN6_H3K4me3_1.R1.fastq.gz PIPELINE_HOME/.test/HN6_H3K4me3_1.R2.fastq.gz HN6_H3K4me3 2 N HN6_IgG_rabbit_negative_control 1 PIPELINE_HOME/.test/HN6_H3K4me3_2.R1.fastq.gz PIPELINE_HOME/.test/HN6_H3K4me3_2.R2.fastq.gz HN6_IgG_rabbit_negative_control 1 Y - - PIPELINE_HOME/.test/HN6_IgG_rabbit_negative_control_1.R1.fastq.gz PIPELINE_HOME/.test/HN6_IgG_rabbit_negative_control_1.R2.fastq.gz"},{"location":"user-guide/preparing-files/#222-contrast-manifest-optional","title":"2.2.2 Contrast Manifest (OPTIONAL)","text":"

This manifest will include sample information to performed differential comparisons.

An example contrast file:

condition1 condition2 MOC1_siSmyd3_2m_25_HCHO MOC1_siNC_2m_25_HCHO

Note: you must have more than one sample per condition in order to perform differential analysis with DESeq2

"},{"location":"user-guide/run/","title":"3. Running the Pipeline","text":""},{"location":"user-guide/run/#31-pipeline-overview","title":"3.1 Pipeline Overview","text":"

The Snakemake workflow has a multiple options

"},{"location":"user-guide/run/#required-arguments","title":"Required arguments","text":"
Usage: bash ./data/CCBR_Pipeliner/Pipelines/CARLISLE/carlisle -m/--runmode=<RUNMODE> -w/--workdir=<WORKDIR>\n\n1.  RUNMODE: [Type: String] Valid options:\n    *) init : initialize workdir\n    *) run : run with slurm\n    *) reset : DELETE workdir dir and re-init it\n    *) dryrun : dry run snakemake to generate DAG\n    *) unlock : unlock workdir if locked by snakemake\n    *) runlocal : run without submitting to sbatch\n    *) runtest: run on cluster with included test dataset\n2.  WORKDIR: [Type: String]: Absolute or relative path to the output folder with write permissions.\n
"},{"location":"user-guide/run/#optional-arguments","title":"Optional arguments","text":"

--help|-h : print this help. --version|-v : print the version of carlisle. --force|-f : use the force flag for snakemake to force all rules to run. --singcache|-c : singularity cache directory. Default is /data/${USER}/.singularity if available, or falls back to ${WORKDIR}/.singularity. Use this flag to specify a different singularity cache directory.

"},{"location":"user-guide/run/#32-commands-explained","title":"3.2 Commands explained","text":"

The following explains each of the command options:

  • Preparation Commands
  • init (REQUIRED): This must be performed before any Snakemake run (dry, local, cluster) can be performed. This will copy the necessary config, manifest and Snakefiles needed to run the pipeline to the provided output directory.
    • the -f/--force flag can be used in order to re-initialize a workdir that has already been created
  • dryrun (OPTIONAL): This is an optional step, to be performed before any Snakemake run (local, cluster). This will check for errors within the pipeline, and ensure that you have read/write access to the files needed to run the full pipeline.
  • Processing Commands
  • local: This will run the pipeline on a local node. NOTE: This should only be performed on an interactive node.
  • run: This will submit a master job to the cluster, and subsequent sub-jobs as needed to complete the workflow. An email will be sent when the pipeline begins, if there are any errors, and when it completes.
  • Other Commands (All optional)
  • unlock: This will unlock the pipeline if an error caused it to stop in the middle of a run.
  • runtest: This will run a test of the pipeline with test data

To run any of these commands, follow the the syntax:

bash ./data/CCBR_Pipeliner/Pipelines/CARLISLE/carlisle --runmode=COMMAND --workdir=/path/to/output/dir\n
"},{"location":"user-guide/run/#33-typical-workflow","title":"3.3 Typical Workflow","text":"

A typical command workflow, running on the cluser, is as follows:

bash ./data/CCBR_Pipeliner/Pipelines/CARLISLE/carlisle --runmode=init --workdir=/path/to/output/dir\n\nbash ./data/CCBR_Pipeliner/Pipelines/CARLISLE/carlisle --runmode=dryrun --workdir=/path/to/output/dir\n\nbash ./data/CCBR_Pipeliner/Pipelines/CARLISLE/carlisle --runmode=run --workdir=/path/to/output/dir\n
"},{"location":"user-guide/test-info/","title":"5. Pipeline Tutorial","text":"

Welcome to the CARLISLE Pipeline Tutorial!

"},{"location":"user-guide/test-info/#51-getting-started","title":"5.1 Getting Started","text":"

Review the information on the Getting Started for a complete overview the pipeline. The tutorial below will use test data available on NIH Biowulf HPC only. All example code will assume you are running v1.0 of the pipeline, using test data available on GitHub.

A. Change working directory to the CARLISLE repository

B. Initialize Pipeline

bash ./path/to/dir/carlisle --runmode=init --workdir=/path/to/output/dir\n
"},{"location":"user-guide/test-info/#52-submit-the-test-data","title":"5.2 Submit the test data","text":"

Test data is included in the .test directory as well as the config directory.

A Run the test command to prepare the data, perform a dry-run and submit to the cluster

bash ./path/to/dir/carlisle --runmode=runtest --workdir=/path/to/output/dir\n
  • An expected output for the runtest is as follows:
Job stats:\njob                              count    min threads    max threads\n-----------------------------  -------  -------------  -------------\nDESeq                                  24              1              1\nalign                                   9             56             56\nalignstats                              9              2              2\nall                                     1              1              1\nbam2bg                                  9              4              4\ncreate_contrast_data_files             24              1              1\ncreate_contrast_peakcaller_files       12              1              1\ncreate_reference                        1             32             32\ncreate_replicate_sample_table           1              1              1\ndiffbb                                 24              1              1\nfilter                                 18              2              2\nfindMotif                              96              6              6\ngather_alignstats                       1              1              1\ngo_enrichment                          12              1              1\ngopeaks_broad                          16              2              2\ngopeaks_narrow                         16              2              2\nmacs2_broad                            16              2              2\nmacs2_narrow                           16              2              2\nmake_counts_matrix                     24              1              1\nmultiqc                                 2              1              1\nqc_fastqc                               9              1              1\nrose                                   96              2              2\nseacr_relaxed                          16              2              2\nseacr_stringent                        16              2              2\nspikein_assessment                      1              1              1\ntrim                                    9             56             56\ntotal                                 478              1             56\n
"},{"location":"user-guide/test-info/#53-review-outputs","title":"5.3 Review outputs","text":"

Review the expected outputs on the Output page. If there are errors, review and performing stesp described on the Troubleshooting page as needed.

"},{"location":"user-guide/troubleshooting/","title":"Troubleshooting","text":"

Recommended steps to troubleshoot the pipeline.

"},{"location":"user-guide/troubleshooting/#11-email","title":"1.1 Email","text":"

Check your email for an email regarding pipeline failure. You will receive an email from slurm@biowulf.nih.gov with the subject: Slurm Job_id=[#] Name=CARLISLE Failed, Run time [time], FAILED, ExitCode 1

"},{"location":"user-guide/troubleshooting/#12-review-the-log-files","title":"1.2 Review the log files","text":"

Review the logs in two ways:

  1. Review the master slurm file: This file will be found in the /path/to/results/dir/ and titled slurm-[jobid].out. Reviewing this file will tell you what rule errored, and for any local SLURM jobs, provide error details
  2. Review the individual rule log files: After reviewing the master slurm-file, review the specific rules that failed within the /path/to/results/dir/logs/. Each rule will include a .err and .out file, with the following formatting: {rulename}.{masterjobID}.{individualruleID}.{wildcards from the rule}.{out or err}
"},{"location":"user-guide/troubleshooting/#13-restart-the-run","title":"1.3 Restart the run","text":"

After addressing the issue, unlock the output directory, perform another dry-run and check the status of the pipeline, then resubmit to the cluster.

#unlock dir\nbash ./data/CCBR_Pipeliner/Pipelines/CARLISLE/carlisle --runmode=unlock --workdir=/path/to/output/dir\n\n#perform dry-run\nbash ./data/CCBR_Pipeliner/Pipelines/CARLISLE/carlisle --runmode=dryrun --workdir=/path/to/output/dir\n\n#submit to cluster\nbash ./data/CCBR_Pipeliner/Pipelines/CARLISLE/carlisle --runmode=run --workdir=/path/to/output/dir\n
"},{"location":"user-guide/troubleshooting/#14-contact-information","title":"1.4 Contact information","text":"

If after troubleshooting, the error cannot be resolved, or if a bug is found, please create an issue and send and email to Samantha Chill.

"}]} \ No newline at end of file diff --git a/2.6/sitemap.xml b/2.6/sitemap.xml new file mode 100644 index 0000000..0f8724e --- /dev/null +++ b/2.6/sitemap.xml @@ -0,0 +1,3 @@ + + + \ No newline at end of file diff --git a/2.6/sitemap.xml.gz b/2.6/sitemap.xml.gz new file mode 100644 index 0000000..943d4a7 Binary files /dev/null and b/2.6/sitemap.xml.gz differ diff --git a/2.6/user-guide/contributions/index.html b/2.6/user-guide/contributions/index.html new file mode 100644 index 0000000..ed0af3a --- /dev/null +++ b/2.6/user-guide/contributions/index.html @@ -0,0 +1 @@ + Contributions - CARLISLE

Contributions

The following members contributed to the development of the CARLISLE pipeline:

VK, SS, SK, HC contributed to the generating the source code and all members contributed to the main concepts and analysis.


Last update: 2024-07-16
\ No newline at end of file diff --git a/2.6/user-guide/getting-started/index.html b/2.6/user-guide/getting-started/index.html new file mode 100644 index 0000000..344b08e --- /dev/null +++ b/2.6/user-guide/getting-started/index.html @@ -0,0 +1,5 @@ + 1. Getting Started - CARLISLE

Overview

The CARLISLE github repository is stored locally, and will be used for project deployment. Multiple projects can be deployed from this one point simultaneously, without concern.

1. Getting Started

1.1 Introduction

The CARLISLE Pipelie beings with raw FASTQ files and performs trimming followed by alignment using BOWTIE2. Data is then normalized through either the use of an user-species species (IE E.Coli) spike-in control or through the determined library size. Peaks are then called using MACS2, SEACR, and GoPEAKS with various options selected by the user. Peaks are then annotated, and summarized into reports. If designated, differential analysis is performed using DESEQ2. QC reports are also generated with each project using FASTQC and MULTIQC. Annotations are added using HOMER and ROSE. GSEA Enrichment analysis predictions are added using CHIPENRICH.

The following are sub-commands used within CARLISLE:

  • initialize: initalize the pipeline
  • dryrun: predict the binding of peptides to any MHC molecule
  • cluster: execute the pipeline on the Biowulf HPC
  • local: execute a local, interactive, session
  • git: execute GitHub actions
  • unlock: unlock directory
  • DAG: create DAG report
  • report: create SNAKEMAKE report
  • runtest: copies test manifests and files to WORKDIR

1.2 Setup Dependencies

CARLISLE has several dependencies listed below. These dependencies can be installed by a sysadmin. All dependencies will be automatically loaded if running from Biowulf.

  • bedtools: "bedtools/2.30.0"
  • bedops: "bedops/2.4.40"
  • bowtie2: "bowtie/2-2.4.2"
  • cutadapt: "cutadapt/1.18"
  • fastqc: "fastqc/0.11.9"
  • fastq_screen: "fastq_screen/0.15.2"
  • fastq_val: "/data/CCBR_Pipeliner/iCLIP/bin/fastQValidator"
  • fastxtoolkit: "fastxtoolkit/0.0.14"
  • gopeaks: "github clone https://github.com/maxsonBraunLab/gopeaks"
  • macs2: "macs/2.2.7.1"
  • multiqc: "multiqc/1.9"
  • perl: "perl/5.34.0"
  • picard: "picard/2.26.9"
  • python37: "python/3.7"
  • R: "R/4.2.2"
  • rose: "ROSE/1.3.1"
  • samtools: "samtools/1.15"
  • seacr: "seacr/1.4-beta.2"
  • ucsc: "ucsc/407"

1.3 Login to the cluster

CARLISLE has been exclusively tested on Biowulf HPC. Login to the cluster's head node and move into the pipeline location.

# ssh into cluster's head node
+ssh -Y $USER@biowulf.nih.gov
+

1.4 Load an interactive session

An interactive session should be started before performing any of the pipeline sub-commands, even if the pipeline is to be executed on the cluster.

# Grab an interactive node
+sinteractive --time=12:00:00 --mem=8gb  --cpus-per-task=4 --pty bash
+

Last update: 2024-07-16
\ No newline at end of file diff --git a/2.6/user-guide/output/index.html b/2.6/user-guide/output/index.html new file mode 100644 index 0000000..9451182 --- /dev/null +++ b/2.6/user-guide/output/index.html @@ -0,0 +1,102 @@ + 4. Expected Output - CARLISLE

4. Expected Outputs

The following directories are created under the WORKDIR/results directory:

  • alignment_stats: this directory include information on the alignment of each sample
  • bam: this directory includes BAM files, statistics on samples, statistics on spike-in controls for each sample
  • bedgraph: this directory includes BEDGRAPH files and statistic summaries for each sample
  • bigwig: this directory includes the bigwig files for each sample
  • peaks: this directory contains a sub-directory that relates to the quality threshold used.
  • quality threshold
    • contrasts: this directory includes the contrasts for each line listed in the contrast manifest
    • peak_caller: this directory includes all peak calls from each peak_caller (SEACR, MACS2, GOPEAKS) for each sample
    • annotation
      • go_enrichment: this directory includes gene set enrichment pathway predictions when run_go_enrichment is set to true in the config file.
      • homer: this directory includes the annotation output from HOMER
      • rose: this directory includes the annotation output from ROSE when run_rose is set to true in the config file.
  • qc: this directory includes MULTIQC reports and spike-in control reports (when applicable)
├── alignment_stats
+├── bam
+├── bedgraph
+├── bigwig
+├── fragments
+├── peaks
+│   ├── 0.05
+│   │   ├── contrasts
+│   │   │   ├── contrast_id1.dedup_status
+│   │   │   └── contrast_id2.dedup_status
+│   │   ├── gopeaks
+│   │   │   ├── annotation
+│   │   │   │   ├── go_enrichment
+│   │   │   │   │   ├── contrast_id1.dedup_status.go_enrichment_tables
+│   │   │   │   │   └── contrast_id2.dedup_status.go_enrichment_html_report
+│   │   │   │   ├── homer
+│   │   │   │   │   ├── replicate_id1_vs_control_id.dedup_status.gopeaks_broad.motifs
+│   │   │   │   │   │   ├── homerResults
+│   │   │   │   │   │   └── knownResults
+│   │   │   │   │   ├── replicate_id1_vs_control_id.dedup_status.gopeaks_narrow.motifs
+│   │   │   │   │   │   ├── homerResults
+│   │   │   │   │   │   └── knownResults
+│   │   │   │   │   ├── replicate_id2_vs_control_id.dedup_status.gopeaks_broad.motifs
+│   │   │   │   │   │   ├── homerResults
+│   │   │   │   │   │   └── knownResults
+│   │   │   │   │   ├── replicate_id2_vs_control_id.dedup_status.gopeaks_narrow.motifs
+│   │   │   │   │   │   ├── homerResults
+│   │   │   │   │   │   └── knownResults
+│   │   │   │   └── rose
+│   │   │   │       ├── replicate_id1_vs_control_id.dedup_status.gopeaks_broad.12500
+│   │   │   │       ├── replicate_id1_vs_control_id.dedup_status.gopeaks_narrow.12500
+│   │   │   │       ├── replicate_id2_vs_control_id.dedup_status.dedup.gopeaks_broad.12500
+│   │   │   │       ├── replicate_id2_vs_control_id.dedup_status.dedup.gopeaks_narrow.12500
+│   │   │   └── peak_output
+│   │   ├── macs2
+│   │   │   ├── annotation
+│   │   │   │   ├── go_enrichment
+│   │   │   │   │   ├── contrast_id1.dedup_status.go_enrichment_tables
+│   │   │   │   │   └── contrast_id2.dedup_status.go_enrichment_html_report
+│   │   │   │   ├── homer
+│   │   │   │   │   ├── replicate_id1_vs_control_id.dedup_status.macs2_narrow.motifs
+│   │   │   │   │   │   ├── homerResults
+│   │   │   │   │   │   └── knownResults
+│   │   │   │   │   ├── replicate_id1_vs_control_id.dedup_status.macs2_broad.motifs
+│   │   │   │   │   │   ├── homerResults
+│   │   │   │   │   │   └── knownResults
+│   │   │   │   │   ├── replicate_id2_vs_control_id.dedup_status.macs2_narrow.motifs
+│   │   │   │   │   │   ├── homerResults
+│   │   │   │   │   │   └── knownResults
+│   │   │   │   │   ├── replicate_id2_vs_control_id.dedup_status.macs2_broad.motifs
+│   │   │   │   │   │   ├── homerResults
+│   │   │   │   │   │   └── knownResults
+│   │   │   │   └── rose
+│   │   │   │       ├── replicate_id1_vs_control_id.dedup_status.macs2_broad.12500
+│   │   │   │       ├── replicate_id1_vs_control_id.dedup_status.macs2_narrow.12500
+│   │   │   │       ├── replicate_id2_vs_control_id.dedup_status.macs2_broad.12500
+│   │   │   │       ├── replicate_id2_vs_control_id.dedup_status.macs2_narrow.12500
+│   │   │   └── peak_output
+│   │   └── seacr
+│   │   │   ├── annotation
+│   │   │   │   ├── go_enrichment
+│   │   │   │   │   ├── contrast_id1.dedup_status.go_enrichment_tables
+│   │   │   │   │   └── contrast_id2.dedup_status.go_enrichment_html_report
+│   │   │   │   ├── homer
+│   │   │   │   │   ├── replicate_id1_vs_control_id.dedup_status.seacr_non_relaxed.motifs
+│   │   │   │   │   │   ├── homerResults
+│   │   │   │   │   │   └── knownResults
+│   │   │   │   │   ├── replicate_id1_vs_control_id.dedup_status.seacr_non_stringent.motifs
+│   │   │   │   │   │   ├── homerResults
+│   │   │   │   │   │   └── knownResults
+│   │   │   │   │   ├── replicate_id1_vs_control_id.dedup_status.seacr_norm_relaxed.motifs
+│   │   │   │   │   │   ├── homerResults
+│   │   │   │   │   │   └── knownResults
+│   │   │   │   │   ├── replicate_id1_vs_control_id.dedup_status.seacr_norm_stringent.motifs
+│   │   │   │   │   │   ├── homerResults
+│   │   │   │   │   │   └── knownResults
+│   │   │   │   │   ├── replicate_id2_vs_control_id.dedup_status.seacr_non_relaxed.motifs
+│   │   │   │   │   │   ├── homerResults
+│   │   │   │   │   │   └── knownResults
+│   │   │   │   │   ├── replicate_id2_vs_control_id.dedup_status.seacr_non_stringent.motifs
+│   │   │   │   │   │   ├── homerResults
+│   │   │   │   │   │   └── knownResults
+│   │   │   │   │   ├── replicate_id2_vs_control_id.dedup_status.seacr_norm_relaxed.motifs
+│   │   │   │   │   │   ├── homerResults
+│   │   │   │   │   │   └── knownResults
+│   │   │   │   │   ├── replicate_id2_vs_control_id.dedup_status.seacr_norm_stringent.motifs
+│   │   │   │   │   │   ├── homerResults
+│   │   │   │   │   │   └── knownResults
+│   │   │   │   └── rose
+│   │   │   │       ├── replicate_id1_vs_control_id.dedup_status.seacr_non_relaxed.12500
+│   │   │   │       ├── replicate_id1_vs_control_id.dedup_status.seacr_non_stringent.12500
+│   │   │   │       ├── replicate_id1_vs_control_id.dedup_status.seacr_norm_relaxed.12500
+│   │   │   │       ├── replicate_id1_vs_control_id.dedup_status.seacr_norm_stringent.12500
+│   │   │   │       ├── replicate_id2_vs_control_id.dedup_status.seacr_non_relaxed.12500
+│   │   │   │       ├── replicate_id2_vs_control_id.dedup_status.seacr_non_stringent.12500
+│   │   │   │       ├── replicate_id2_vs_control_id.dedup_status.seacr_norm_relaxed.12500
+│   │   │   │       ├── replicate_id2_vs_control_id.dedup_status.seacr_norm_stringent.12500
+│   │       └── peak_output
+└── qc
+    ├── fastqc_raw
+    └── fqscreen_raw
+

Last update: 2024-07-16
\ No newline at end of file diff --git a/2.6/user-guide/preparing-files/index.html b/2.6/user-guide/preparing-files/index.html new file mode 100644 index 0000000..c1081f5 --- /dev/null +++ b/2.6/user-guide/preparing-files/index.html @@ -0,0 +1,24 @@ + 2. Preparing Files - CARLISLE

2. Preparing Files

The pipeline is controlled through editing configuration and manifest files. Defaults are found in the /WORKDIR/config and /WORKDIR/manifest directories, after initialization.

2.1 Configs

The configuration files control parameters and software of the pipeline. These files are listed below:

  • config/config.yaml
  • resources/cluster.yaml
  • resources/tools.yaml

2.1.1 Cluster Config

The cluster configuration file dictates the resouces to be used during submission to Biowulf HPC. There are two differnt ways to control these parameters - first, to control the default settings, and second, to create or edit individual rules. These parameters should be edited with caution, after significant testing.

2.1.2 Tools Config

The tools configuration file dictates the version of each software or program that is being used in the pipeline.

2.1.3 Config YAML

There are several groups of parameters that are editable for the user to control the various aspects of the pipeline. These are :

  • Folders and Paths
  • These parameters will include the input and ouput files of the pipeline, as well as list all manifest names.
  • User parameters
  • These parameters will control the pipeline features. These include thresholds and whether to perform processes.
  • References
  • These parameters will control the location of index files, spike-in references, adaptors and species calling information.

2.1.3.1 User Parameters

2.1.3.1.1 (Spike in Controls)

The pipeline allows for the use of a species specific spike-in control, or the use of normalization via library size. The parameter spikein_genome should be set to the species term used in spikein_reference.

For example for ecoli spike-in:

run_contrasts: true
+norm_method: "spikein"
+spikein_genome: "ecoli"
+spikein_reference:
+  ecoli:
+    fa: "PIPELINE_HOME/resources/spikein/Ecoli_GCF_000005845.2_ASM584v2_genomic.fna"
+

For example for drosophila spike-in:

run_contrasts: true
+norm_method: "spikein"
+spikein_genome: "drosophila"
+spikein_reference:
+  drosophila:
+    fa: "/fdb/igenomes/Drosophila_melanogaster/UCSC/dm6/Sequence/WholeGenomeFasta/genome.fa"
+

If it's determined that the amount of spike-in is not sufficient for the run, a library normaliaztion can be performed.

  1. Complete a CARLISLE run with spike-in set to "Y". This will allow for the complete assessment of the spike-in.
  2. Run inital QC analysis on the output data
  3. Add the alignment_stats dir to the configuration file.
  4. Re-run the CARLISLE pipeline
2.1.3.1.2 Duplication Status

Users can select duplicated peaks (dedup) or non-deduplicated peaks (no_dedup) through the user parameter.

dupstatus: "dedup, no_dedup"
+
2.1.3.1.3 Peak Caller

Three peak callers are available for deployment within the pipeline, with different settings deployed for each caller.

  1. MACS2 is available with two peak calling options: narrowPeak or broadPeak. NOTE: DESeq step generally fails for broadPeak; generally has too many calls.
peaktype: "macs2_narrow, macs2_broad,"
+
  1. SEACR is available with four peak calling options: stringent or relaxed parameters, to be paired with "norm" for samples without a spike-in control and "non" for samples with a spikein control
peaktype: "seacr_stringent, seacr_relaxed"
+
  1. GOPEAKS is available with two peak calling options: narrowpeaks or broadpeaks
peaktype: "gopeaks_narrow, gopeaks_broad"
+

A complete list of the available peak calling parameters and the recommended list of parameters is provided below:

Peak Caller Narrow Broad Normalized, Stringent Normalized, Relaxed Non-Normalized, Stringent Non-Normalized, Relaxed
Macs2 AVAIL AVAIL NA NA NA NA
SEACR NA NA AVAIL w/o SPIKEIN AVAIL w/o SPIKEIN AVAIL w/ SPIKEIN AVAIL w/ SPIKEIN
GoPeaks AVAIL AVAIL NA NA NA NA
# Recommended list
+### peaktype: "macs2_narrow, macs2_broad, gopeaks_narrow, gopeaks_broad"
+
+# Available list
+### peaktype: "macs2_narrow, macs2_broad, seacr_norm_stringent, seacr_norm_relaxed, seacr_non_stringent, seacr_non_relaxed, gopeaks_narrow, gopeaks_broad"
+
2.1.3.1.3.1 Macs2 additional option

MACS2 can be run with or without the control. adding a control will increase peak specificity Selecting "Y" for the macs2_control will run the paired control sample provided in the sample manifest

2.1.3.1.4 Quality Tresholds

Thresholds for quality can be controled through the quality_tresholds parameter. This must be a list of comma separated values. minimum of numeric value required.

#default values
+quality_thresholds: "0.1, 0.05, 0.01"
+

2.1.3.2 References

Additional reference files may be added to the pipeline, if other species were to be used.

The absolute file paths which must be included are:

  1. fa: "/path/to/species.fa"
  2. blacklist: "/path/to/blacklistbed/species.bed"

The following information must be included:

  1. regions: "list of regions to be included; IE chr1 chr2 chr3"
  2. macs2_g: "macs2 genome shorthand; IE mm IE hs"

2.2 Preparing Manifests

There are two manifests, one which required for all pipeliens and one that is only required if running a differential analysis. These files describe information on the samples and desired contrasts. The paths of these files are defined in the snakemake_config.yaml file. These files are:

  • samplemanifest
  • contrasts

2.2.1 Samples Manifest (REQUIRED)

This manifest will include information to sample level information. It includes the following column headers:

  • sampleName: the sample name WITHOUT replicate number (IE "SAMPLE")
  • replicateNumber: the sample replicate number (IE "1")
  • isControl: whether the sample should be identified as a control (IE "Y")
  • controlName: the name of the control to use for this sample (IE "CONTROL")
  • controlReplicateNumber: the replicate number of the control to use for this sample (IE "1")
  • path_to_R1: the full path to R1 fastq file (IE "/path/to/sample1.R1.fastq")
  • path_to_R2: the full path to R1 fastq file (IE "/path/to/sample2.R2.fastq")

An example sampleManifest file is shown below:

sampleName replicateNumber isControl controlName controlReplicateNumber path_to_R1 path_to_R2
53_H3K4me3 1 N HN6_IgG_rabbit_negative_control 1 PIPELINE_HOME/.test/53_H3K4me3_1.R1.fastq.gz PIPELINE_HOME/.test/53_H3K4me3_1.R2.fastq.gz
53_H3K4me3 2 N HN6_IgG_rabbit_negative_control 1 PIPELINE_HOME/.test/53_H3K4me3_2.R1.fastq.gz PIPELINE_HOME/.test/53_H3K4me3_2.R2.fastq.gz
HN6_H3K4me3 1 N HN6_IgG_rabbit_negative_control 1 PIPELINE_HOME/.test/HN6_H3K4me3_1.R1.fastq.gz PIPELINE_HOME/.test/HN6_H3K4me3_1.R2.fastq.gz
HN6_H3K4me3 2 N HN6_IgG_rabbit_negative_control 1 PIPELINE_HOME/.test/HN6_H3K4me3_2.R1.fastq.gz PIPELINE_HOME/.test/HN6_H3K4me3_2.R2.fastq.gz
HN6_IgG_rabbit_negative_control 1 Y - - PIPELINE_HOME/.test/HN6_IgG_rabbit_negative_control_1.R1.fastq.gz PIPELINE_HOME/.test/HN6_IgG_rabbit_negative_control_1.R2.fastq.gz

2.2.2 Contrast Manifest (OPTIONAL)

This manifest will include sample information to performed differential comparisons.

An example contrast file:

condition1 condition2
MOC1_siSmyd3_2m_25_HCHO MOC1_siNC_2m_25_HCHO

Note: you must have more than one sample per condition in order to perform differential analysis with DESeq2


Last update: 2024-07-16
\ No newline at end of file diff --git a/2.6/user-guide/run/index.html b/2.6/user-guide/run/index.html new file mode 100644 index 0000000..7a5b418 --- /dev/null +++ b/2.6/user-guide/run/index.html @@ -0,0 +1,18 @@ + 3. Running the Pipeline - CARLISLE

3. Running the Pipeline

3.1 Pipeline Overview

The Snakemake workflow has a multiple options

Required arguments

Usage: bash ./data/CCBR_Pipeliner/Pipelines/CARLISLE/carlisle -m/--runmode=<RUNMODE> -w/--workdir=<WORKDIR>
+
+1.  RUNMODE: [Type: String] Valid options:
+    *) init : initialize workdir
+    *) run : run with slurm
+    *) reset : DELETE workdir dir and re-init it
+    *) dryrun : dry run snakemake to generate DAG
+    *) unlock : unlock workdir if locked by snakemake
+    *) runlocal : run without submitting to sbatch
+    *) runtest: run on cluster with included test dataset
+2.  WORKDIR: [Type: String]: Absolute or relative path to the output folder with write permissions.
+

Optional arguments

--help|-h : print this help. --version|-v : print the version of carlisle. --force|-f : use the force flag for snakemake to force all rules to run. --singcache|-c : singularity cache directory. Default is /data/${USER}/.singularity if available, or falls back to ${WORKDIR}/.singularity. Use this flag to specify a different singularity cache directory.

3.2 Commands explained

The following explains each of the command options:

  • Preparation Commands
  • init (REQUIRED): This must be performed before any Snakemake run (dry, local, cluster) can be performed. This will copy the necessary config, manifest and Snakefiles needed to run the pipeline to the provided output directory.
    • the -f/--force flag can be used in order to re-initialize a workdir that has already been created
  • dryrun (OPTIONAL): This is an optional step, to be performed before any Snakemake run (local, cluster). This will check for errors within the pipeline, and ensure that you have read/write access to the files needed to run the full pipeline.
  • Processing Commands
  • local: This will run the pipeline on a local node. NOTE: This should only be performed on an interactive node.
  • run: This will submit a master job to the cluster, and subsequent sub-jobs as needed to complete the workflow. An email will be sent when the pipeline begins, if there are any errors, and when it completes.
  • Other Commands (All optional)
  • unlock: This will unlock the pipeline if an error caused it to stop in the middle of a run.
  • runtest: This will run a test of the pipeline with test data

To run any of these commands, follow the the syntax:

bash ./data/CCBR_Pipeliner/Pipelines/CARLISLE/carlisle --runmode=COMMAND --workdir=/path/to/output/dir
+

3.3 Typical Workflow

A typical command workflow, running on the cluser, is as follows:

bash ./data/CCBR_Pipeliner/Pipelines/CARLISLE/carlisle --runmode=init --workdir=/path/to/output/dir
+
+bash ./data/CCBR_Pipeliner/Pipelines/CARLISLE/carlisle --runmode=dryrun --workdir=/path/to/output/dir
+
+bash ./data/CCBR_Pipeliner/Pipelines/CARLISLE/carlisle --runmode=run --workdir=/path/to/output/dir
+

Last update: 2024-09-10
\ No newline at end of file diff --git a/2.6/user-guide/test-info/index.html b/2.6/user-guide/test-info/index.html new file mode 100644 index 0000000..1ed744f --- /dev/null +++ b/2.6/user-guide/test-info/index.html @@ -0,0 +1,33 @@ + 5. Running Test Data - CARLISLE

5. Pipeline Tutorial

Welcome to the CARLISLE Pipeline Tutorial!

5.1 Getting Started

Review the information on the Getting Started for a complete overview the pipeline. The tutorial below will use test data available on NIH Biowulf HPC only. All example code will assume you are running v1.0 of the pipeline, using test data available on GitHub.

A. Change working directory to the CARLISLE repository

B. Initialize Pipeline

bash ./path/to/dir/carlisle --runmode=init --workdir=/path/to/output/dir
+

5.2 Submit the test data

Test data is included in the .test directory as well as the config directory.

A Run the test command to prepare the data, perform a dry-run and submit to the cluster

bash ./path/to/dir/carlisle --runmode=runtest --workdir=/path/to/output/dir
+
  • An expected output for the runtest is as follows:
Job stats:
+job                              count    min threads    max threads
+-----------------------------  -------  -------------  -------------
+DESeq                                  24              1              1
+align                                   9             56             56
+alignstats                              9              2              2
+all                                     1              1              1
+bam2bg                                  9              4              4
+create_contrast_data_files             24              1              1
+create_contrast_peakcaller_files       12              1              1
+create_reference                        1             32             32
+create_replicate_sample_table           1              1              1
+diffbb                                 24              1              1
+filter                                 18              2              2
+findMotif                              96              6              6
+gather_alignstats                       1              1              1
+go_enrichment                          12              1              1
+gopeaks_broad                          16              2              2
+gopeaks_narrow                         16              2              2
+macs2_broad                            16              2              2
+macs2_narrow                           16              2              2
+make_counts_matrix                     24              1              1
+multiqc                                 2              1              1
+qc_fastqc                               9              1              1
+rose                                   96              2              2
+seacr_relaxed                          16              2              2
+seacr_stringent                        16              2              2
+spikein_assessment                      1              1              1
+trim                                    9             56             56
+total                                 478              1             56
+

5.3 Review outputs

Review the expected outputs on the Output page. If there are errors, review and performing stesp described on the Troubleshooting page as needed.


Last update: 2024-07-16
\ No newline at end of file diff --git a/2.6/user-guide/troubleshooting/index.html b/2.6/user-guide/troubleshooting/index.html new file mode 100644 index 0000000..9f0c492 --- /dev/null +++ b/2.6/user-guide/troubleshooting/index.html @@ -0,0 +1,9 @@ + Troubleshooting - CARLISLE

Troubleshooting

Recommended steps to troubleshoot the pipeline.

1.1 Email

Check your email for an email regarding pipeline failure. You will receive an email from slurm@biowulf.nih.gov with the subject: Slurm Job_id=[#] Name=CARLISLE Failed, Run time [time], FAILED, ExitCode 1

1.2 Review the log files

Review the logs in two ways:

  1. Review the master slurm file: This file will be found in the /path/to/results/dir/ and titled slurm-[jobid].out. Reviewing this file will tell you what rule errored, and for any local SLURM jobs, provide error details
  2. Review the individual rule log files: After reviewing the master slurm-file, review the specific rules that failed within the /path/to/results/dir/logs/. Each rule will include a .err and .out file, with the following formatting: {rulename}.{masterjobID}.{individualruleID}.{wildcards from the rule}.{out or err}

1.3 Restart the run

After addressing the issue, unlock the output directory, perform another dry-run and check the status of the pipeline, then resubmit to the cluster.

#unlock dir
+bash ./data/CCBR_Pipeliner/Pipelines/CARLISLE/carlisle --runmode=unlock --workdir=/path/to/output/dir
+
+#perform dry-run
+bash ./data/CCBR_Pipeliner/Pipelines/CARLISLE/carlisle --runmode=dryrun --workdir=/path/to/output/dir
+
+#submit to cluster
+bash ./data/CCBR_Pipeliner/Pipelines/CARLISLE/carlisle --runmode=run --workdir=/path/to/output/dir
+

1.4 Contact information

If after troubleshooting, the error cannot be resolved, or if a bug is found, please create an issue and send and email to Samantha Chill.


Last update: 2024-07-16
\ No newline at end of file diff --git a/latest b/latest new file mode 120000 index 0000000..c20c8ac --- /dev/null +++ b/latest @@ -0,0 +1 @@ +2.6 \ No newline at end of file diff --git a/versions.json b/versions.json new file mode 100644 index 0000000..5e4fcf9 --- /dev/null +++ b/versions.json @@ -0,0 +1,9 @@ +[ + { + "version": "2.6", + "title": "2.6", + "aliases": [ + "latest" + ] + } +]