From 4bcd6c3a40ade2ff94038d343fb9419ca570d386 Mon Sep 17 00:00:00 2001 From: Geoff Levand Date: Wed, 12 Feb 2020 14:01:18 -0800 Subject: [PATCH] Add project files Signed-off-by: Geoff Levand --- README.md | 113 +++ images/distro-test-flow.png | Bin 0 -> 14085 bytes images/kernel-test-flow-2.png | Bin 0 -> 17577 bytes images/kernel-test-flow.png | Bin 0 -> 41938 bytes jenkins/jobs/distro/fedora/f28-t88-ks.cfg | 50 ++ .../distro/fedora/f29-installer-test.groovy | 392 ++++++++++ jenkins/jobs/distro/fedora/f29-qemu.ks | 61 ++ .../distro/fedora/f30-installer-test.groovy | 393 ++++++++++ jenkins/jobs/distro/fedora/f30-qemu.ks | 61 ++ jenkins/jobs/kernel/kernel-test-matrix.groovy | 83 ++ jenkins/jobs/kernel/kernel-test.groovy | 721 ++++++++++++++++++ .../kernel/linux-4.19.y-stable-trigger.groovy | 12 + .../kernel/linux-4.20.y-stable-trigger.groovy | 12 + .../jobs/kernel/linux-mainline-trigger.groovy | 12 + jenkins/jobs/kernel/linux-next-trigger.groovy | 12 + jenkins/vars/fileCache.groovy | 127 +++ jenkins/vars/kernelTrigger.groovy | 112 +++ jenkins/vars/newFileCache.groovy | 179 +++++ scripts/build-grub.sh | 313 ++++++++ scripts/build-linux-kernel.sh | 418 ++++++++++ scripts/build-rootfs.sh | 504 ++++++++++++ scripts/build-tumbleweed-kernel.sh | 256 +++++++ scripts/checkin.sh | 93 +++ scripts/checkout-query.sh | 99 +++ scripts/checkout.sh | 98 +++ scripts/embed-initrd.sh | 181 +++++ scripts/enter-tdd-jenkins-service.sh | 71 ++ scripts/lib/checkout.sh | 102 +++ scripts/lib/chroot.sh | 86 +++ scripts/lib/ipmi.sh | 68 ++ scripts/lib/relay.sh | 180 +++++ scripts/lib/toolchain.sh | 53 ++ scripts/lib/util.sh | 461 +++++++++++ scripts/prepare-ld-scripts.sh | 147 ++++ scripts/prepare-sysroot.sh | 121 +++ scripts/relink.sh | 139 ++++ scripts/rootfs-plugin/alpine.sh | 216 ++++++ scripts/rootfs-plugin/coreos.sh | 214 ++++++ scripts/rootfs-plugin/debian.sh | 233 ++++++ scripts/rootfs-plugin/rootfs-plugin.sh | 6 + scripts/rpm2files.sh | 10 + scripts/run-builder.sh | 211 +++++ scripts/run-fedora-qemu-tests.sh | 300 ++++++++ scripts/run-kernel-qemu-tests.sh | 394 ++++++++++ scripts/run-kernel-remote-tests.sh | 342 +++++++++ scripts/run-shellcheck.sh | 11 + scripts/set-config-opts.sh | 137 ++++ scripts/set-known-good-cache.sh | 33 + scripts/set-relay-triple.sh | 148 ++++ scripts/set-systemd-debug.sh | 153 ++++ scripts/start-qemu.sh | 368 +++++++++ scripts/targets/arm64/tx2/set-numcores.sh | 1 + scripts/targets/arm64/tx2/set-smt.sh | 280 +++++++ scripts/targets/arm64/tx2/set-turbo.sh | 1 + scripts/targets/arm64/tx2/tx2-fixup.spec | 34 + scripts/targets/generic-fixup.spec | 13 + scripts/tdd-bisect.sh | 107 +++ scripts/tdd-run.conf | 19 + scripts/tdd-run.sh | 714 +++++++++++++++++ scripts/test-plugin/http-wrk/http-wrk.sh | 97 +++ scripts/test-plugin/ilp32/generic-test.sh | 179 +++++ scripts/test-plugin/ilp32/ilp32.sh | 186 +++++ scripts/test-plugin/ilp32/spec-cpu-test.sh | 79 ++ scripts/test-plugin/ltp/ltp.sh | 207 +++++ scripts/test-plugin/phoronix/phoronix.sh | 146 ++++ scripts/test-plugin/sys-info/sys-info-test.sh | 76 ++ scripts/test-plugin/sys-info/sys-info.sh | 82 ++ scripts/test-plugin/test-plugin.sh | 12 + scripts/test-plugin/unixbench/unixbench.sh | 141 ++++ scripts/tftp-upload.sh | 169 ++++ scripts/upload-di.sh | 177 +++++ scripts/upload-fedora-installer.sh | 198 +++++ scripts/upload.conf-sample | 2 + scripts/useradd-jenkins.sh | 230 ++++++ vars | 1 + 75 files changed, 11657 insertions(+) create mode 100644 README.md create mode 100644 images/distro-test-flow.png create mode 100644 images/kernel-test-flow-2.png create mode 100644 images/kernel-test-flow.png create mode 100644 jenkins/jobs/distro/fedora/f28-t88-ks.cfg create mode 100644 jenkins/jobs/distro/fedora/f29-installer-test.groovy create mode 100644 jenkins/jobs/distro/fedora/f29-qemu.ks create mode 100644 jenkins/jobs/distro/fedora/f30-installer-test.groovy create mode 100644 jenkins/jobs/distro/fedora/f30-qemu.ks create mode 100644 jenkins/jobs/kernel/kernel-test-matrix.groovy create mode 100644 jenkins/jobs/kernel/kernel-test.groovy create mode 100644 jenkins/jobs/kernel/linux-4.19.y-stable-trigger.groovy create mode 100644 jenkins/jobs/kernel/linux-4.20.y-stable-trigger.groovy create mode 100644 jenkins/jobs/kernel/linux-mainline-trigger.groovy create mode 100644 jenkins/jobs/kernel/linux-next-trigger.groovy create mode 100644 jenkins/vars/fileCache.groovy create mode 100644 jenkins/vars/kernelTrigger.groovy create mode 100644 jenkins/vars/newFileCache.groovy create mode 100755 scripts/build-grub.sh create mode 100755 scripts/build-linux-kernel.sh create mode 100755 scripts/build-rootfs.sh create mode 100755 scripts/build-tumbleweed-kernel.sh create mode 100755 scripts/checkin.sh create mode 100755 scripts/checkout-query.sh create mode 100755 scripts/checkout.sh create mode 100755 scripts/embed-initrd.sh create mode 100755 scripts/enter-tdd-jenkins-service.sh create mode 100644 scripts/lib/checkout.sh create mode 100644 scripts/lib/chroot.sh create mode 100644 scripts/lib/ipmi.sh create mode 100644 scripts/lib/relay.sh create mode 100644 scripts/lib/toolchain.sh create mode 100644 scripts/lib/util.sh create mode 100755 scripts/prepare-ld-scripts.sh create mode 100755 scripts/prepare-sysroot.sh create mode 100755 scripts/relink.sh create mode 100644 scripts/rootfs-plugin/alpine.sh create mode 100644 scripts/rootfs-plugin/coreos.sh create mode 100644 scripts/rootfs-plugin/debian.sh create mode 100644 scripts/rootfs-plugin/rootfs-plugin.sh create mode 100755 scripts/rpm2files.sh create mode 100755 scripts/run-builder.sh create mode 100755 scripts/run-fedora-qemu-tests.sh create mode 100755 scripts/run-kernel-qemu-tests.sh create mode 100755 scripts/run-kernel-remote-tests.sh create mode 100755 scripts/run-shellcheck.sh create mode 100755 scripts/set-config-opts.sh create mode 100755 scripts/set-known-good-cache.sh create mode 100755 scripts/set-relay-triple.sh create mode 100755 scripts/set-systemd-debug.sh create mode 100755 scripts/start-qemu.sh create mode 120000 scripts/targets/arm64/tx2/set-numcores.sh create mode 100755 scripts/targets/arm64/tx2/set-smt.sh create mode 120000 scripts/targets/arm64/tx2/set-turbo.sh create mode 100644 scripts/targets/arm64/tx2/tx2-fixup.spec create mode 100644 scripts/targets/generic-fixup.spec create mode 100755 scripts/tdd-bisect.sh create mode 100644 scripts/tdd-run.conf create mode 100755 scripts/tdd-run.sh create mode 100644 scripts/test-plugin/http-wrk/http-wrk.sh create mode 100644 scripts/test-plugin/ilp32/generic-test.sh create mode 100644 scripts/test-plugin/ilp32/ilp32.sh create mode 100644 scripts/test-plugin/ilp32/spec-cpu-test.sh create mode 100644 scripts/test-plugin/ltp/ltp.sh create mode 100644 scripts/test-plugin/phoronix/phoronix.sh create mode 100644 scripts/test-plugin/sys-info/sys-info-test.sh create mode 100644 scripts/test-plugin/sys-info/sys-info.sh create mode 100644 scripts/test-plugin/test-plugin.sh create mode 100644 scripts/test-plugin/unixbench/unixbench.sh create mode 100755 scripts/tftp-upload.sh create mode 100755 scripts/upload-di.sh create mode 100644 scripts/upload-fedora-installer.sh create mode 100644 scripts/upload.conf-sample create mode 100755 scripts/useradd-jenkins.sh create mode 120000 vars diff --git a/README.md b/README.md new file mode 100644 index 00000000..e667756d --- /dev/null +++ b/README.md @@ -0,0 +1,113 @@ +# The TDD project + +A framework for test driven Linux software development. + +## Jenkins Support + +For setup see the TDD Jenkins service [README](docker/jenkins/README.md). + +### Jenkins Kernel Tests: + +![Job Flow](images/kernel-test-flow.png) + +### Jenkins Distribution Tests: + +![Job Flow](images/distro-test-flow.png) + +## tftpd service + +The tdd-tftpd service is used in conjunction with a tdd-ipxe image installed +on remote machines to provide an automated boot mechanism of remote machines +controlled via Intelligent Platform Management Interface (IPMI) commands. + +For setup see the TDD tftpd service [README](docker/tftpd/README.md). + +## ipxe Support + +Remote machines have a custom [ipxe bootloader](https://ipxe.org) image +installed. This custom image knows where the tdd-tftpd server is located and +what files from the server are to be booted on that system. The system's +UEFI is then configured to run this custom ipxe bootloader image on boot. + +For setup see the TDD ipxe [README](https://github.com/glevand/tdd--ipxe/blob/master/README). + +Use commands like to these to build and install: + +```sh +cd src +make V=1 CROSS_COMPILE=aarch64-linux-gnu- ARCH=arm64 EMBED=tdd-boot-script -j $(getconf _NPROCESSORS_ONLN || echo 1) bin-arm64-efi/snp.efi +scp bin-arm64-efi/snp.efi root@${remote}:/boot/efi/EFI/ipxe-tdd.efi +ssh ${tftpd_server} mkdir -p /var/tftproot/${remote}/ +``` + +## relay service + +Once a remote machine has booted it needs to let the master know it is and ready +to receive commands, and if the remote machine was configured via DHCP it must +provide its IP address to the master. The tdd-relay server is at a known +network location and acts as a message relay server from the remote machine to +the master. If there is a firewall between the master and any remote machines +the relay service must accessible from outside the firewall. + +For setup see the TDD relay service [README](docker/relay/README.md). + +## Build Host Setup + +### Host System binfmt support + +QEMU user mode emulation is used when cross building root filesystems. QEMU +user mode binfmt support needs to be setup on the build host. + +#### Debian based systems + +For Debian based systems the following packages will install the needed binfmt +support: + +```sh +sudo apt-get install binfmt-support qemu-user-static +sudo systemctl restart systemd-binfmt.service +``` +#### Fedora based systems + +For Fedora based systems the following packages will install the needed binfmt +support: + +```sh +sudo qemu-user qemu-user-binfmt +sudo systemctl restart systemd-binfmt.service +``` +#### Other systemd based systems + +```sh +echo ':qemu-aarch64:M::\x7fELF\x02\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\xb7:\xff\xff\xff\xff\xff\xff\xff\x00\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff:/usr/bin/qemu-aarch64-static:' | sudo tee /etc/binfmt.d/qemu-aarch64.conf > /dev/null +sudo systemctl restart systemd-binfmt.service +``` + +#### To test binfmt installation: + +```sh +$ ls /proc/sys/fs/binfmt_misc +qemu-aarch64 register status +``` + +### Container and Service Setup + +The [docker-build-all.sh](docker/docker-build-all.sh) script will bulid all the +TDD containers and can also install and enable the systemd services of those +containers that have them. Individual containers and services can be build +and/or setup with the container's build script, +[build-jenkins.sh](docker/jenkins/build-jenkins.sh) for example. + +## Trouble-shooting + +Seen: `Got permission denied while trying to connect to the Docker daemon socket` + +Seen: `/var/run/docker.sock: permission denied` + +Solution: Add current user to docker group. + +*** + +Seen: `chroot: failed to run command ‘/bin/bash’: Exec format error` + +Solution: Install [host binfmt support](https://github.com/glevand/tdd-project#host-system-binfmt-support). diff --git a/images/distro-test-flow.png b/images/distro-test-flow.png new file mode 100644 index 0000000000000000000000000000000000000000..a7dcc8d5c72978eb2ff404a95714010f2407c67a GIT binary patch literal 14085 zcmdU$^;eW(*YB}FK|l~tQY8iHZWKmR0qF*%ySr2b84!_fkZw?Vs8NwFY3Y*gZaACg zS?@YOoPXdwvzE-L_YC(HdtZBhzTZJA%1hy5lVM|EVBpF~Ba|>OuA0N^p_^CXXV;5Y zx$tz$PFm9u0|W2(<)152Pw>bwFdkyaAfBnXC9aLT8{A&Fz}oDdXf<(loJ8IUQ+y<@ z^AaEN#D>v2DlJlJpgKRYcP3xuOefX3@zpZK_Cswz{|@@%MBQJe`cW?$AzIce#f6}6|jBNf)8=o3O-!=_K)zJ0T>RWRhS8LOHM zHt$J$u3MQCHZ|mYZ8_{C{sRl{cm>g;4}m(my851LJSOg5Uc#e!$s(RveM@=e&LPXF^PDX z=7)v9K3v*t!0xm*qMAQ2Xf3oNKvC-5)!uHoIrTF&H5InqaEY;;hle0R2$e{+=fV1w zD_35>e%+U?pq@W)>(;GOlg^P+Q-zhmLiHk@R_}k-V&| zAZ#{ueQzG8#Xs9^ktAefWW>aKE5&Gtn7BZub6qM?ulrO~^Hd^WG;2`LGUMrq@ zAEy87Grz86k#;LXV?npgscPpHJ=i}Fd=7uMwOOo>q3F&v98kiLDc~U_l+ZrMs#p6L z7NDWQurr?LF8vF|Too?Uu1~?i2{2!=9EP7^mJOTnAX`8(7#SJqO5iJ2ND)Q+o15b? z@1><)9xh#o=dtwIU)78lAC$I&4Yw#}GhW@uEbM#kl`WqHi!bCj-%+St$=Uo4Zn)BM zp-aGV{&auT7FHubHZ!j;V952pYrR27EC)<6%5Ccn0`a>iUCQ@#OUl-EW99~ex@RH9 zx2tx2qVD9UAC|Kq<;R9cX<;-{^$?}ygDR{sFmMfOPCmYkq2funLhNJ}_5s}1;3vjE z<(BFuInNM+NjCed!xa@39iyD>kzdvFVH9;{J=F9!#Euu!w`QAtkLF^su9A|HW>qR@ z%a>PFsO1m1u8+CThVaEoSVRpf=1xr+m>hDN{rS4Nx!Kav0(n{GZ1#tpt80p=w;%zk zx>`O(^sFuF$@%$tFbT^WpXIeR@9AIuSol;Kl_=3s8(h`QNHD9j6M2tizXT1=x zn|f+ywm52w7V$Yfl(oze#QSJyZQXhL`3K3)_~*dYwKZhmQWFI0Y;&lX&uJ{_L%5x+ zPaz?=cM}nj5fP5Ni#;%7CZ?tf|Ncd~{(uy~j%5zX%Tg;gCPuxsqxpgF7~0pDdS<8n z??Oga7GV!$t{67`E@lF3e_mc*_}ocIOMH`FhK7b*=DiuPYcEDc-JBtB1i8NS5={{h zPDoBZKVB&wQmY{oHd=7R^!w=)AJIH3ARrKRE{1mBnGYgp2I z!>F7EndPZPO>xOdLz@_*~^|Z2w=7#H&##?~}F4Io0+#hqbje7G`E~35j4f z1vxpcI#$sSEFnkjKJfo^`{UOzZ!eBkZa|)ago=XQX=EBwQdZ{WT-4_2&OdoH28ZH8 zN1Wru`B_<6nb^hYPH(1M2Z6Yw<4$BGk-vCYWaKW~HvP-^GRvXQ?Q`++RFNh&Hbp8- zeSLkFHsZNI4XT`$!hagL9q%qpH++n{E8f%716h52eO)jKLf+HZ=Npr8U|{p?4J<4u zdE!X3XzA(CH+(Phzltm^Ep@1-LR3R&aK;tXk|C%G2pXH48R+Sk_l~n(CP+Si&Paq! z9TXV&`0-=*0oas`G40NR|k&GOlnWy1U`4k$eU+1wG^+X-=e}CnKNBi)wdqV&&9P71^2#)twDs3lOW7=Up-B$)#kcc-XCQY~F z;~Hq;-%aRi&PG|?{c}{9+&Mhz>TAQ{OcM1zcCxWyB{+jZp#$eJep6C+2!(*dXoa<0 zH1jK_C>tA_Qq%7Jy*;|LuV23g-(!Qj@%Zuf%4Ir*gRRWGub`md@*vorPpUBQn=c>I zOJINfzPNbf^k~OjSg*V?x_wU6ZPVcFcu%X)W$MQ}ctMkfFM4PImFL0wc!X?beSLi+ z`Y=Hy^R^)r3PP^yiK5;fdV0yFr8}M*b?VPN`z`h$^-4%c2vEoy7))u~)~V{@QinaJ zr8OGJQ-#xhXTEaNi~gi0z~AthQi)3*L7tFF$wQE2j2y5!k*J6i&;HTJDrM^K~OW7C5(c^*!uJQXH3 zwqgPrNi%M;OeK`nFlTyC%PAj0n4h17v~&x^Ooh!jQ<$`%)8c0DcwdV(L>5$fS(UKm z9o^}$aLFPQ6NO*@x^8K?;&XNk<@$i^HFBoUJ!SRXdiD~zUyCK{p8TT>|WT{|I@3?{->2;SHFpIo~eR5@9O(C ziu7+qjR;3QlFD5mY*y-sinCAni#=iX2+z!-tNFmu?2}yw35JGp4z5sf8~$X`KZmD| zj~0@KXlbgog8UOl%Occ!9Q@y|VVZjzm+sd9vG) z`TeHEi|I<-d^4&C5&f7nnt}W@HClUn5f8;ud*B0}sxtC4`Ac!Giz+(`^tY>XmgHx4 z)TSAhXx=#1Ro^+U)i+>Qda7KiS+~ZH;oo!xx-Ql5#QX$Tb{Zaqq2H41mFT2=;*>xq z50CiFPL;(uCO;kH!()wta?;5GrA?bB`Kebbc;B!R(9JE9T!h7a$#kyx$A0RBx>q7-AiC5L|_+Bw*tg8WwZfb2u{mFL3Ag9gm~#B+HQ2B%z13Yn>uwY0jz zoyARg$r+ZNH^zo^>|EcN{a|1JP?B5bE@UI$t!}FJz4nEXSZ#yD_a7ya+BeGC6pZiJ z(P`%MoeTC3HwHaO$*nui__M2w4BQC_{VS{EK;+I+Z!oidcsp~j>|c$Zjp4Pma0jQ^ z%Iw%%0<&O7ZReVGbE^fB~ymsguDh)PQH!o$Pmr4W%myag&uxXZ(R z0|VvqTB;I7BIAx6HVf#(De_Ly^)Vyq@=C1bAT85VgEOuxp{0Au_mF{}Ti=&8D?T5E zeWXO^tItU-eY8Q=QRwfM?XXhyZYm@!FyR{h3g45Q^=+<+yJgoZTj-IwZ^(l!@qtMO z`USVCr*ZhI-QC?{@JdNZX{|CZ;BmRT`rUn%y4pb7*B;(Hk90DlhMuN>wHcFr+)tWg z>F9_I)Xw3Ff8Y1w$1O9p%)Zf;0N$v;pk#edo=0M$+Lmg+^i;Gy?sk&3M?RMsm8niqoXa!H zDynR1YJEN~YT1t7xM=jmFCE2ltm@G~6wXOi+{Aia)JsHt5jFMk6RKfNPrdRpjh7NG zYOKrC4V>s);+T|_2YZ{Z*UGZ98cohr8^eFHptnm_yyjcyXK-dqYzHsL^V`${*)krl zfKVt6qCeLN+~6LHU(CqW`zrQv}D7>RAvb z+lJ~W3~FpD-QoN9@2ZS0OTC#ZL&c?qiS2XSZo;Ba;BI}OCWz6FnKXeTB!g4a?bpUkA;LhCnxH7dfS+|$ZlgA65tWo^XuS!*tuBFBSNp|iTcdST|E9WW9J^E|P1*+v9sxqZn!PQtLB zmQ~y3+7$BZaW5nhnm0VCOJcT>(kI<+N?OZQ6~5Ud>_24?NYt_?V_p|k_{@!X4J1_t zBT_n@?@n0xjff;nYCMn7&#t3lr1o^Zs>^k;BSw>T!M`xATshPr zg)NvZ_aQ$rFb<_#d2KOhz+H^*W=GSH1&2WCd_KakC9O!#CSkUYK{i8;;tNwz^q1U5 zX~b{l*{wX6nO#*?C&uf%BOaKuU2V_@&@nKmYkg{%zIpxXU^#h+N1dL&h3)S-2Wx9< z=oD74_1{aT6WqN1CN-*-eY}iamtbgorOzPkZs?$*F^q>q+>%;}DXM(T?LWm`H{iKG-Uarx%?x7 zY-H4*q1FP#yTy()^Jgw3Ues;Of9{5r$I2ndLNH?_BeRB9N=r+5$X+5tBHCxSs>Xc3 zH%1>#zKTyuq8yFaf7#O2=I-I4MkjRb+BN-o;b#^W%V(YkY3b>E9wG#=#!yP7KJYfO z)*T%R6_OPWHZ={PUPo6h`24kb#ZEw=N|A{8_t0Dt5gi>pD|%AUGC>k=&wa)3 z9GNvWp7}$1mJ_w^NCG>g4?Q)^5yRQoLk*<6xr6uT-YFxQ=r_sYh3Rh?L|-z0sBN`H zjmf>T=cmwT;Mxji5ZaUUh*yqG`pkKSz?gCO#TTx93sOfzaW&0J=`tXm@%HroSC9cGvQscEO9mlL?9JP!O@xgt*q>H z$`2p&HpSnx=fS-~@yC7LxbI{9z67TA^&^ZtTw*7laxBp?Bx~GsPZFU`ir0v|DNn8C z$veJVKs+zJn6AP@;65_69EJ>_W@KFYy)~OPRaRPhGSz_JKAN0gNtewGcVAnZ-Rb%EdjI0z8~0@i*yLAUb_OoWehLaqEJ*%1v-aVaxOJ3`Omu8I zZrzu*+IdF-!)ug*i*L$$RRkjK0L%2PL>Fp#^E>6#dGiR*7xNw%7GE_ojjF1y#>2kF zxTBq*!bC|#s8q*b1(fc!))17G4vtS!4z)r)2;1KaJ&XR2|+}clgW(Ch3HM7+e`0`y${TMy(zUWqQ z?^8CRnK${jV#rnI#aeFpewF#MZPL8xaB{Y{UxI##M{Rw1nT&`?Wgc-oRq;L%(TBU; zsvk|is7Ae@w;axk(qJB3abTTPHy>mxnj&V@a!2iMB&4bpG11V7u;Nge zp?Rh~TaVy1<5LE6GLu?Hb<*ufOexZo?k6W~R4IjLPNriai07|Vb-JRblLy~Sv?&iG z1*%=Ow6A*%EDsf6PWLtjXRRI89ZcL+J9zx3Yhq$(sB+hbImc818rx-E_1OTFICCI^ z8tt-XvGD8lJ#}>{DJc^KF){G~N-kz35vOf#6Z6QYW_G?TuQ^-Ah_A&}o<$7tJXJB4 z6?fO7=RHa^Dyn*I0rNFNJ--hhn^okMNarB$cLHd!eCCirN3Y&gu<|HO5B)Acvgo(? zPS`FTRZkX~7NcYHSofV)H#|`8Bex8>%3};wu2O}pTBweqW$F&LM;%rB=Y0wY8aPIe z+nsvrzkYql!SOwMh?PVG&lcS=GM35YZ{4`wFnVFsRBS{qUM|PTdx082C(SBWp6Og@ z2BXeKWsr;6cQq}NdKx}&Y;1Y4XAJb|+xwqfD4p8ZOi5Zz=TO>H#)#ZJT$*TeY;E-0 z3Fe~^Te8R>B#ohLr%k)EmXZ13=+;1qh{W2Dd2L_RJt3c1CI4Wfyd;F#iT%g9HQf_? zfw|;o{~QkCC9Wm<6RMyzniwsX9V{31roK#YW8rvXmNok2+V$_GBbm*mfi2CNhiY<; z=fOgur{WC}+-nzUJH13~V+w7bx^?&TN;NkG$&o(7(Q&cqJ7xE7uY5Aih)F%%2co6( zk>#E2bgcCmgsZ@XPLFN8lvX>0f@|@2$}y}aBR$Xf6W7n20>$h}?bEW)U3lN{x#kb* zx#(u33D(34>c4Vg?VO?eY%}oDr)Z($dgAj(coDt|btL&(ZnQfq6BVp#r=n}LLxw83 z?!>yIvn$McTp8w$o~cm6ZE4K!Rf&5t+HQH`xmlUU1=)7W8E03-DC-M=6)d%Hr*1G&jMR8I-4_f<) zeYH*s>yYvM3Qm91)$(V*XSI+=c!WOWroolk(jJu+v}qYS<7qT;3Z>SRhZ^bwsd*Cw6f`1?OPe}9$3LwuG~CgE2m3O$^SJFk2ZVC%lPdn$^n7>Y!iz^u(=P2b= zetSR4&i+0j84l=R-+s&~rtpTZi;@Tx&r_nat4>~cAH-GH{+bt;x3g^BWR0E||4q-$ z1nQbWr%ns$N%TO@)BJ>6d5)oxG6Guesh1t8JwK;oQOBnh8ydGhZHhl-|D1H*Lhm>H z7a=LRK8f}PBFd)O4UKSkL_~Aw1N}P7$vRJ0FRxPoZd`z@UMBDYXxo+*KVz7Bbx&c3 zHF@57{Lb5vfQ=7|8fqYCz7zIGM>v0Bve8OXeMY)r1 z*&Gx&C8b^3pkn_Zcd|tIzqO07oI54 zETy2Nyd>SBqQ}6t7U4b#kw(k?x##C>!jk2 zWZIovXWUK#30L_Hs7**n$k|+sK4@9{8K4A$4SS`+@azI^8(c7 z5*R^!8%RI{$K$INU7WXu6(R4{@&Vf{0UiVvWK)%5Z4`G`je z3X2TlQqJk@jB1|)vI5jkPft%WJ*;`w8q}SW4fJH0x$;(mokx%ixor+4hi!QX`*H9l2Q(7Y3VQ0v=b8(P+?uwH!?C( zRJ=m~CpoBg06QTpj^;ZPWHXi0dqA37D@LD#2&4ugXSXNcW=z- zRwsj!$97)rxNSboRJtU1uy27gwzsyvX4=}{uP7;TaBVQ@x#*(d@o^Cv8X9hHfM$yH5yWh(Q&SJ`+#D>_ZvG@|nbwEFkgv|B=0nEk(&FkrO+umMZJhB?|< zP|uw65}oYswgtA>crPFzAX&)OX0SkWXQA5`v~JbqciSpk@3J~7Ghut|ZA_*FCMG30 z3+f_w`d+94mgS(7RzG#T7+_-3^KImT2Bdrs6g=O<>5uS)-suv`n#04xyNnFbfiyJ! z0`1IFNa@PM#Td9c+tu`=vy+8}#*Y+B*kd7BSxJdX%x4VbhM+vw{N(4sBsZ>K?}+32NE!;mVX_!18gC4~=jiAd*WEU^ zU0VwhVCj`>m}Rz;$?gU8wZd3fSk#wlD(I*XKkZLUKzM^FTUs)=v6+D4o}AAH@%%X@ zC|=op1Yx0}p)crhaBv`oG!cmFrK&2my&(BB5d}e*M;4_7eh{CA2zGFAkR76U@PO^k zO?vsn#hIC3R#aKPu>EJ2`m!JRp4x+yxwE~UmmiFw@w2Lw1N6@OTqYfG49w2V^j27F z&tr^(bTU++={WQI7KnV#%PN=sFr*9tr^PP`3FHKj3plvmG8NT2i9l2xpB+qs8cs`y zY}586hQ0@noYxA{F5=(f;^Bb0eq&=J;OyY;h0(u%0mRSgAKW4u0*wG*IkQ$-3*_yF zhIFO}q@rGjFb|+zPlA#>U-cgH7AOLhKy}&b4}Ss72c?pDh#gLe6rbaz(r!^$MRLbK znlec9UTpgSn*UdAoZXT2HiW~?Nji*)K%o7xgm$Zre77h_La;| zmg2LY$J~$ev)0yD#J`yt!tDu9=kY|3?CN8zGoA16u3>@F7cDS`&4V@i`r3QLic4S1 zR4faI$_!9$=ke=rk8k{!6aW8bc|jMEV-SP#)`3%~rd`RBo6=8)yCZI7vd_8qHASCM zOGKo7kltiz8wXc5!gk!hXDqcCy&w5icH*^=C|$NyCR8^*?O*{fKR{xo33!*#7NujA6}U zne*Z7d=M=3x@Wg6P*?shAeQvrn$v9i&20R}X+ElyL;HV7aomu@C^ilbF6OnJW4hA2 zXQxckT|K%%YD>6m%-r=FlBVob^(o#nuhzqT);B6Eiy8;(szl^6%TX4G_w4FR^&((P z>1AhR^ly(4#~0{UnmnhcWce<^)@9H?E-P5QGB|W2K5@i~oIu3;xPNz~v2o5gPh5=8 zBUFid;X&P<#5a$cd%_oHX;;VFs>{>;qmFjoeULPb^)9 zX0sJH(sI3gsSxqoy+O)quiL#-Kf#@yt*G(di!sWSRA#}2v-V6CtXsZTQ*zc?s_I^z z6|QzUNm0Mh0rsYK^ii#2K)_IriC*m$lwmdf>S`IFxP33;ocd(ZNh z#6&ByhZPnB%XF5|)4y?unjVP|?;ADQe?Q?vm|sGF-Py*%-o_%I^<~NfHr?HY(iVg< z3wMb}rD@KMmagL>aPoBEfWu<@_pKy(b(|-4VO3&0V)1L)O8-KO#m4pTt}#B#A2@g4 z&R8m}4DD_oLi*SgtkdVcWZ)55JxoeS*lL?xlR=Daa&+q3YB?Q`8D!A;4jyux#{W%D z&4PxbW?oOP=z>BVY)xmAaa=rgl9{|47Qd47V(x9Fq%haU-zn;Cv;6w1o0x=zO4KRm z>({sL05rzs27Rk$*FReyUD|k4oe)h5PA}mdQAm&!*kNVnweOYQLPil!Sv~A>) z4-1Pe)E%g~;PSBPvrgWnN>;P-T!$kFw-6`qp6$=~H=M^YbFS+@v~zIz92E3s0^C#+ zXkRfNI{)@rO>J#Q{={Y7@wvlsR2~y$Hnz=CRE+LII=+vZg~Ll6&<*82R_0a8AR^=A zJ=T^7;0Pfh5fyXg0ZTyN9330}+1TNjyy6ocN$Qe2_9J}W)0kxVeUbWWp&}==8>@GZ z>V@L06X{$?x)|M zP@{Bw1gU4{5;6uXRGEk!hmE)9sKau=nKK|t`Tf!UPF!EMe!Wac)W|f;%q?V#o|c3D zK?5=ydu@kNJ^1{^->oei1~P4dAhzaqIp8V5i`5&o zdniPH8@NYk0Pdwt;$NY*uQ!_p0tzuMg7jmLz>Sqj;rX{@O zuyXmbKsi^2(CFvigE{kG`K>H~ByC}?nr<{pYk+qz(4UQh*DbP$v)+y3ndR5T+p0Jx z3k--Pqvw+m*ku9nqp?V7W)djLMq7tb^J-TH@P>!KfHV&NGbl26nV6WCrzQIS=2Sz0 zOBLEYXNTLkUeP;}`?1D&1%PgBw8}RL25NfSs$37`T?Ko<-ddU z=6q~ke8Qys$ncQ5yy!ROhfj2JWFd6Cd8fb9Qd8e*E0ZSxnba0TgLA51@l4V2OA>CV zzyEV+2S9=S0ydbqxVV@Ecxay%w4$?m%m5B+n9;Gl;A45A{@eNBm^(IbxKOrWOy{%-Px1Xo4o~>oK!+$c9fFnFhx31S z5Zq)kEejsQb#~(1f~AYLEW}%RE0SVf@19!OuvUEgR$gumCGT*%NCU?5os!Zkj&^oA z$h)+k3A<-bc5go)gY9zmZ9L6@nmW(zqdWpH+yp@zCM`bUSv)eT#<1)zhL3$KcfW#% z>>gDvsAh=Bd}enadmp=q#%w|!!D|G!+!Nh5cp1-?>%x-fiHuFTv`ZyRIV7B)xrob9 z@t*g@sRy%htPLV_W+(*-cuLLnOV=!!|8$rCXbtZ+%DyD8BlCFO*&gLi?2{a0LV^;F z({{3-_>pF7!%lq!druWQy#2%Z8%-rJ?u1DepYFA3gw+3gZo3cnsj`JP{)vrr$k^`- zL($KD9+JfctuOj!hqEp{x7n_Q9#=%f-eMAS2k3*Vwetn_)IU((Yhs(nltRb-RR{Z}+TohJ zKweuhb|rBQdgI`l;##806_;&$zwiHud@*AhY!?VA{#sRiQO{&hc^U@`_E13G_2|3o!Ayig zh-AF8a27&_OVo7FV-*3xgqYXEwQsNJhmUoZ`qXt-`bxllGROMDPa0pF?4|X(1D{(l=M#;pP2q zMB^*3mnU_XY=`TY%~5{s+HvvT8lHdzMS=hNp<@fN{$GpqTP;qfMMZz{SMI2Lov|D= z;47|8$y3P#BGKH~cxf;m()aa7VS+ON#PYn}-rn<#i}T_l?JEcbcr(GBI|mnZF6=;q zzO?#D?yz0WE4{O|^=Q3jgI*!|6l!$cD!X@@Zl{M^kSSh*2^u7f8#iubTE_BNsu2$n z6I?!Cv2+F;<99#w%afItZz@>zm;r+D?AbFg!BVH`)p-m!J1!T`kgtL5^z8H$6Q2sC zU+@6mxpU`hj~Tc~Q1-KuU{3-UA9ztdlkwyoHbdhE4x)h}Ro&1Q`0U|Z&xx=bfe(NS z`sqWUJOP=Rb6|9W#-vQk8ai;W7lOAqG9f|8aHqoj2UhOqv|Ur^18D8s!Sa0#3-6}?FTM#knv7JD$3nuw zI;=7JXrFOtgc=(&oy2TMD>}`%v$C^=goL^i__{k*kGK>F@7@izE#r)P!5VWnOet3- ze*o;#)M3&O?>vr@g$5I)Q>2Er>l01ysRElb+zQyT_ZE9-9zOi~{&RV0DRgm>DAW-Q zB$`>%0fa4GFCPIX8?$zW6&QfQAC=5& zt>w;xkdk84sru4BC!fH(2Dk&xgq+U(A->QDVwYxWU0q!`iR*kX&Mz(KnpEWEHNYb- zPS6*nV6y=it?$LjB+Lz*XTb5mDR}Aqgbj4*IJL1^2VeqvdWKAdY(K^Q`}avnp${Gg zQrEkp3+w*&%C(V7+m!I|?VWBhHsD3zYXU5nkDxxfXR5v82Y{eENoZ(%+#6h(nZEH~ zzHGvUFohQjViy9g>pEMnsz3(Z3K?u|H3!ZIAKca5{eZ_J5`Ynb_vTM5V1Z$>nE>;E zGc;Pl764mh9C&77KO)Vb#v*nqKv1KA>OkKKEw~H@rh=vUR5 zNr1s5G)xz0e19KEODMC{qhJnZWNtn_hBP=uVGhwV!%K^ci_6Qh2-t%nc2jT7-T#Yd zn3;$l#Z~2S+i`&B%25kdKx((p$nMg-T&Z{O-`7Y-JK)~BeS3GG^+3jYe}BJYNl!tc zdw18VTuTJ(Q`Fmgdr4p|+X~NDD_kBd9EAp#i<1-VumnUzk#TV-aFW9)z~Q(GW-|EL zM~@x>&jB`7`}85WkNuWPt%NF44+K=OA&P5jYr)tE>%j+c3RGw3=oAz(*X90Jl_{_o zXAM!{Wfuqr!M8|Q4~XLf)XZ%+-D-m@`F+n%2lCaO1(TF=!LIF5m)z|;?c3Mg&Bn}J zz{E1`fJ*T_7ucZ-;DC<>Y;swX0Dpy@kY5&l+|=($e85FO8X_4ct)eFd+|(A#VhAJK z%ZKmLwsD=_#>$GNrKKDKh;S?n77S+ab~)c@>t^0v(T}oq;zIm68sM&riz9VZVPawF zoSeJ}6*~udESbIq8y?ta_rg6QB6c||Yioc%fsg6ukG#tua*@)xDtah~#>q)^mrsP2 z>2e5|=vG&)Oj31N)pJ$g?SO?EJsY~aFVRs)WEY?gh^uq3B?6j@2S+53CTRNH?js;p3S^DKPp0)T3-*!w1|S^3A1SxPO3 zSVdE84GsG;I-4=M6IC8Zu&jIC?NRC z62J@LaAApQ2WmPA$V%|c+F}4~#FrLq2p(|aySur?Mzd*_(83pGMn`KPJ!JVqb_MbV zbe(?31F((*3UVN%j*-E^LD0hk#l^)TO@Rmc1wKAL8ylNiNF2ZY%<@1!gM6a&<+m;@ z#;f-r3q!44uEoN&Y*E#=vVcuAX?AJ$1PeMi`|gJ{!I{LNZ{~5ZE&^ZO`H!7h#Cf@& z@Bvs*AOJ3*5TtrN^JYv;%*(%k7x^48Ecwe633od-w;ZYtuS@^optHoXe@;#g2m-}G zSc}N7V(?sTwz~Xs)6c5^e3j|b|9{_f`d{~Lt)vUAYgRtqg}JqK0NF5Pp35VO#0}p6 EA4}jyLI3~& literal 0 HcmV?d00001 diff --git a/images/kernel-test-flow-2.png b/images/kernel-test-flow-2.png new file mode 100644 index 0000000000000000000000000000000000000000..d9e9050fc740a30bd636c3c78b36b23eb7814070 GIT binary patch literal 17577 zcmd74cR1DY|2M7@C)pGsJC&?t&m2Ms*?VP^ojr3R2}#IK_TCv;C3_RH$=-Xf`+53& zzrXvw?(6>dcis27s&m3Q@7MeFdd|n=@f4&eFNKFgfrEj8f%jBeLJ0%o@*cd-M_h(~ z`S(ql;pv*4w5B5l2L3Pf-%F7X@F_7cXfd8jh^e^7uZ_DHkeFOxY~J;_i}!5QiOhV- zsFW1*DrVb+^8ABi`nQ)~)8cGjrl+N6M$mszc^pOinl|V0Ev5bCFRZMnz8Zb_+{!#K<9F4H)(Qgt`fr(64+Yv9|EgFNE-Z z@uHtMxdch3GT%UE zR#fmkc+k${C(YQ_-Cdq=u{vC`GE^W~%aEt0t*IGAsC18XC||dMql4K`GNsaaEml6m z(KC*^LFaHErw_-St_P>ci{|)+-n~i)%G@up1Y2Syxd};j-AL znk_z@pYM#@s6APqZH;ISqp5RR>MJhhM17EBl2uSBSV?Eot@@35%PH;k>(_S`5-SAA z^6jU8z8U}SBADoXetM+ubHRv_T5enZY;#E8=eD48hqJf0_qn>h*W6(!kL`qz z*RfMduZD_BspSB({||%6FWK3a1KFyaF&$ek-EkeKM;}m)ep2JW)h@G)oEbPDt90C` zyYRNLVMChlEf0iI3m3~wp^_R@-M-10=3jiHI(iUeRpzfhts3$1<95G-=zUV07`c6T zks#_bq1QG-LVDxIc(q#z^{on*jYMh2?B;iPT+XY1o?k>oM=wlOQSw-~baYtvt@6KJ z{aB->s+y-+GC%Gzd7Hz)SA|*FVfL+7o+QG=*f=#ky{WO0zxIBXN|oErJny7uNh{~d z<-79nh->JF{VT~>zt)JmJ4c>(S2KMs&WVVK7>Qf2om^(WK}N>wwmsX^qo{6{iXThg zjhB0&n3FKmlPqpjbXYc|m$ULt^mJ4FV12xyp&^c5y(~_|%iYN6>k&h6qk}g!MGN;D zF9EgCV+0-!rDD%G49Lw;hT!tZUQD zJHNFWGqbRio|O+~=H|-Lhx1KT?hU4 zHPvLS`N}5asJ*(ThTs?#X6}Waj$*Z$t0&+^845|U3!grH(ohSd@Nvg&U?dJ&Oa8Op zGWa`D$c;Xtl>@iEwbgBN%Fm=LO=$^imeuTU&?^C5NlC1UXULoZQxg;TV)yoY<@Q?6 zM~}c?^<$MX`hz6sYxMNBv@AtvGY6KzpH5Fb+nY*?i;Fut(@>Kw}T|!y?HVz?`GhLF>7N03lq~86xw$$; zMo&yk?nJlmZEtgScel5n?2psNY#1RKtn+T-OMUtf$I>vHR1hqL273Hs-0O5hiHI+Y*g z@{c%_bal@lEJQ{|77bg&?YnLnRpSr+g?pjowfVi|v-1s+4 zi0-lZd#olZh@JU z2^VuCHS0P)-jO{$J9{N?+xyyWGO~lYX#J1Ushx4WckkYv34dm6JR8ex1y%$bx?8oWR4jz`ck`ghsWT|=Y-<*Ql;-T01N?ElR=O>VvBBU83T6bYz!zg*X^rAR$ zDIb~Q)~;kJXE?8qxgBlK4Hf9?*Ll~-r2XVzVPu>QX0Ij0c{BR$rJ%zsqIPhxH|4w2 zQnB4M64e9m0gLB;wfy_xizY5-zk$vl{@B6q-jU*5^?tlKm^XSi$@4WKCpj+IP=)iF z*5k(*kut9s1)T7Qe({1S#&a0{481LgPa`sxA{7Q+gn0AQqCb6U?|6B687zNx>`8wX_wL<0%@WfDK^L2P-%Ftd=Z9M}`rb!#D+P5nqu=u6 zGaAB(xPNkV==ffoOkQ}Otd%Q>o|VdLj#s^U5Y~KhezMVOygiVk{)m&45NTQFKGzo6 zpP{e;0j|~fc>VhZKDE#x90vpH{AeNRVA98DsV|KbCrw_^(urG*I->P=ozKOuUlR1; zhkHZ%7hBCVG(M+As^1(P93Zz3)p~j4sR_PldM_KzrpaTZJyK}sUsrbl7Kp$^Iyh`M zg;1iMV#E=grp#@}lsKr=>tsKKl9%buoq;9UfkQ_w?uFT-v{q{Z_NZV72ZzC2P4Sdo zz1Sp3^(ty=U7sJOz#Aa)~)E8@`xuJzvBS z$zYl;D}yf~oWfVZ^(h}3Nz{3tfm4fKobF8eT#Qd7zko`L*LFgu%u*FD9ZD@6`{@%Y z8CmnpmZh8=Be4q{Sp8TAgoIE^(qob5`P_3n@ zpU`t%U$5icEaf^=)Q2Ai72apZ z?`}NU#KJwj2qUb5(n=`{%3gHsTKB}=U4VKLg+fhXt3~G&Z9`syFiECBNJ6p$0dcB7 zYS?D7R#-$VrB|!m=4F9Po>pG%tCc~IjhaBwRLL}@jmbLO+J?8*-uS1#N5ODTMMcbe zzK93nKV>9tR(QAX9hz<$XVyHFcc&XpTG*RC1qhzFy67 zC-3Cc-4@u28Eh(1(K;w~v|nP19=dvXc(}QNI}-&-P=wqhBJy*=%G}COXO5hwexk!h zR(Ex5;2IJ?e;@+Fft(_Eh{T4FVDut)^za*GLC2{S97R5A{jOd!j2i3g_D5S zd`AqJdw8p{drfU>Dy9O5)1u-|YejyAPW~LLPuLHmqUDjs?Be1+PtS5va&q&~h)k92 z0i{ztr37cjNSTMYCF9X@Oey_K^GT8IbNIIfNRynqv#s)l$O95YypEx17#J9Es2m%! zAK)CtdGi-SvW0~Ob7c0{uRkXz>zZEvT?>^->(jyCs5wY95gi=y!S@Vfj%?dHY?|nv z=9(80td(JPTZb?ynIeX`iiyd$JpeHZk(`~K4V%7oHT{?sJ-xhGX$IDAbAl0F( z2PZ&1MvH>_7V7o5sHk)m=4VV1HQPTpFD7h3ADgVwuAtI*x_r$0kSEQm5E2OE={Nb|RlBnP|=!PK`t=4NKv zO<&~S%ztm2xZo>?kAnkNqPAl@S$oz%Aj+aw%_ksm3_Zts|E#L55Vb8tV>sNynK048 zJZ+b$`b)`*hLZnjG?`4r1#d~0p4FPy1!Y{wyb(|ZnbTqTES%+w~C9C(*eo>YaS11)AqkD&CG~1Ma<01Yz1&>tBD4x;x&J-I6gY!*SH8jOKPy_ zaLdolwKlkcL_j^zDimUyw_jlD;R;8Gjm@t8Z8btcWEh+Taybza8Yy_ZI=4a435wRJ z<=`O@<`6%EBw__#F?&n8%tDdBnH+#Z8tUmyY|lX%xF+ax?g=TdOK{^4)1YNplGlDr zLBR&Z(@k%peI96=&Q4D`a8F>TI*W*o55(ze-1oBYyl~YyJYFdvpyKOs@Gj7+ao=B! zNl2)1UK@cLqC%+Fo+_!^r8~!JNJupD)2CARz2%=jk8Z^@|XJg7WasC~1hKa?Su4aJctseq>@|{(vQP-{$7`yzMz4w7qB!olfbV z9+GsuA`&9`fSn!MGv>&5t;S$ZmuUo(!LJ-Vui`dr8e&0<7D^*JQJ^n66`aAfm(uG})x(k-p%hZq7nk?~5$W<8nq%R=^lG4*_Z z{P=)pE`zu4X&7rps{R%274&nINJ5`~@ z3NVC^oUHXKsM)88ZqG|Q*DgAMQesqyw;E@FA!cp*s#HclBtG}`G5($LrsJZ6e(*^? z=$RN8#2JYp^jy1G7!fSyhwh5s`}7d97Bo;$vp~+@Y+>NwP%IdNS|YQlOocg8j%jCW z%aDWItSdgt%e4xO6k2^~Le8P!fD%-z!Y=tJAX=E^W=?AA?BB*!V-_y@bSN{~n@!BTxJ2C};W$Hm0*KFIlzt}tTJ4+c(d~LGs zq9vR`AxT6S(mivefO!wCNBQo^x0eq{akie{0T*8Ay220H{sk0=RQOUUy&VaHWOTdp zakkLsv1%6oGUhZSAN=lOD|7)ZRI}h|cPw`f<6znJ7$IH!cW3#aw%I7>#IR^USjt&a|i< z9Vq9ULnsgGF3!Sa(!d*DLD}Q#U&yMjKl%IjZ-58djEgT_!J4{Vb>+$x_{nnfUV8k% zekdiVh24LD9DsT_54@@4^TW%22K%eSSy@>KIw&onVu9+^9-1BiN`{79pvA*u4rfw) zF?UPE^AKt;o3RS9jQ;N~8{S|SIc0Ap(0M^UPit;tV`FA!2YhvCn{N^7hUib9b|*YG zp5G(IdBbV{_gD0Mn=xn2$@=&1MB(7jP@aSJQuT|M|H{|@X>*No_ej~aN*AFt$H&9V zb17%^5EYK=3=Iw4-`FO#8oAKv2B1*T_)0u^ z0_CzTL>zPh8q%rmDdMx(lZ*z>po4*G2qKt2L4!t$m_OEi(sxk804$X#$K(Rt^^@S6 zWdy%O-FFvXjzZPBR-Pgm63g>)1p133h~PGV3NJMj`uh4_oFCM2#;o?HNWsTSAZxz~wvdoo|$#kq&aeemV`#6c5Y2fFxyJA>M` zF(($4R6pJ*u+Q+-kn6uTG6F>09Fs^yKG3WhAnVhwXySNSD zOfS_UW$yU@C_Ig|EQ>`Zf422|K@J&@^@Kh(Ee*OdF!C4fH5101+|k-}q5bAuq&SVs zh4-S{HCfcw_AZ%mQQ%?uGZ4H7Y$T=EJW{D(2(FGT*p@Y534OOm*g!Go_ed>f0ue5J z2n;5#%Cuf`#!#_PdAyo{ZYEVNCy&QQ`@*VQFGN(TKkH$4h|-(oPr3O|bP3c%YkJM8 z_#I}u5(GQP*(cl<5&%SrX*Nf9!&�)8U|CfnX9?{)TS5iuXvlDDw6fZf;@6-M(l5 z`@r&_o+|k;|IKZODnUM@*PP|Tire&YmP+4}lS{2rvqhN}agecXDI(g=L~S!iAScF{ zb6Dpw=H(v~;x17TdrA;Cgh+sSnsP_>O-i!=O3?RC{1~58px`AMHS8foh92Mxd?+j#iI*TGBqt}Yjg&pQYo{9MKI&kXYP_uFsCTSTWbBdm zBuyzVU)HL_*F)r!L3?=c)4Pr)CwS&>A!R@us%mX$`r_u-c7-Ci*Sy+(M$+3`BwX4W z>JKFH^&{=BT%9`a1Y|NJBVz^gmHNE@#@J_y4+8(8?Oa4}ZPBRvlm%@q8ld;h@H@mp ze<86ck4DeyYAct?*EY(p?d>OPU@PYdb8})~-esc?WSh?6@lSoM}crLbe2s zf0Q?UbG-`X;WFJ9H#w?3PV%Y5@iD((pnOJqcefAV5=p|!EpwQc{ThwjwvO_k{u;=R zT);yPj{VqCUPq?i|IR#-#9AcC!XDBggP^#%5{;0`32?RGXFwUf39?1W`#RVl?wKw4 zIymD;D!DUC?c6jadbcFZ(e-42njZk?YgsY zrZH6ZamPC~T+@zwp6|bI5uRGw+rN7r6(2D~x8LS+EqqK{D5LiLx&89&(-^q`pK8w| zPXAUo>X02y9LvAy2N|VWFQJH=IXwkD7WE-4Orc#Z54x1GO*}F@y8cgVRgb#E=S{S^ zIIU!Ux>sbZ|Mzafr82X#M=nk_=9P_sH|W-@S*>~AWxx?H`xHupLn**%^vTE_& zFOctD?P_0l^~r`x;LoH_Fty01_Blz20$go#+tBF#EK>{>%T_sVnfrrdOe;s~e&qRcg@>Xz*v1qajR_sOv74e&6c1 zoEz=`+(yML6~NXHxguIu2XdkrtoD}2M^8_GgWaU%Dp1BzTBa|U9K5qF)mp3k^l79Y zZo1d9tgBTa!d=6@Mt%ufvh&vP{tlMS>CPeQ?9k%WopGp2B3nlQP(7V*++;??)=BgKm0<32T z{+AD&PvoM)zsteVvvosUT%4Z@U*zR2MsEGigX&eiZiP9)n`;x*ZaV;gJxhp5Cm|yv z8!S$ndG_m2Z29pqv8S<;tX2vR-7er#?{{}4yG(Y57U$w4A{-n48sZGm69)luF2@w{ zl#w|^O}5)Neco+Pv!H(oplv-{4ssS6T<5%XVRs2BDRD0wOINS_%*=XG_pPH>z(j~>5mE{O1UA-(U$Eu&a>)_DT}RjTTb732sqqr@beY}xGkR;hm;Js# zeVEn?jciz07|C|O#O88S+L4CtyU6?02lzg=w8+C2-B7AjPYuIEiG+2+I(mwlcy7xw z${J+1F(*ZcWZa-BefZ+q+FFMv&%tr-&`M5rHZBi?9}zTn;fo^b%32{ueo9KK{66pMyn@B6OI=!S&gG#Dae0E)|dDavSkWJ&}SxyoP!!Pw2yq%}j&=qVn0H z2`~g4n}kVzcBp3Zu{c-%v5ubJ`N9wXs08BI{{AU1i~yHeOYEdnOiF0brmdfegA^i= zXAUGCU3hspujTn3KSZ$ND`%xzWe(rfeJX|yy#X1WZU`)!B%Zh9t8qX0#{1Uz(O2$! z*R%U`O|zZ)npG`qza{p)zOmcqPm0WZ(NlNXHoos<=(V-U%I_@URXO-xLz_glY6s$Y)q>Umzwie ztDh%{q4JSSaM*BHGlA;x#~pTIX*$$#*-?+>CY!voJx~EkQ*ajLd!M57 z)0DF0bReK+0-tWlw0}Y;o?&9Tasp^>%VE3eow{S_Cg@m zZmG7&yT(dKCEs%wJyqQ*+(55HK+gN69j)xJI@&ynu# z0>B4cZ54TkR@*A+bTpD`4o?Ac@YKraPt&WKZPAK8KWZN9BBCPxIkOeZ?86gVD#F{p zQ|wVTIaQ%!qv8bNj|<%cMoG{^G<>z#BS0`j@!wj2Wc7^+8*(}x6#IA8sM+}5*`CGA zYMG4lg`XA1Pw6|JRSV1kw(7-_Uz$lO^MQojEl!+ zzLjIT{^sZ6;vyi7@PHmQS#iC6&KZ*{t`py1^^q&ZLG2zXv>}I1ae#TY%3>rN*H<;1wA7U2%MPv02XRn<@e!NLanmS%{0@y1!Uh~nBI)D+*dzNBiesc`6!tT3oLxb$Kn0o70 zcH)FRc)lNxNsRGsyTAH{bJEk2t6WL>N^(G!MEPmQugwFaH(v1{9ug=IIZ%_L^U1)q zTUJg{;~Ys*pJ4O&95*_@#{>wvNw z`xb0oSH?du%)KXkx3_X8`RCWXb{7?X;A7gUGc1+t=%iDMgp=qJo#gtv8oBemBx#cC z`f%;JJ$^1Or@qjdn4xOcPZZ^VDVyQCzvSW5^Taa_~|vV2DtI>0u0n$Gwm_022%@HhH4-8 zzHA@XVObv&?6bGg?h0vqw(RID>;GBnb8ut0n7iI>s8{}}TpnK$`#-2!s=xlWeC6$3 zo8!3U0EiLlKGgULF5lo|$=% zi77yuQN&~48rojyRN29V-oFR-5ZWv-WZ=;{y1LM4N<=Hbo@VauRdP&^V&vqQ904wZ zY7lBeK>4DAe;L^WT@9&(M#ztvP^v--I;KKlP<6<>1 zd)&W&A3%beK@tGzqamgsiDMvp1z#=i0$u=b1Nr}fIL3=Jlt&q{52fz^kxoAex(9jTA87QotGAfyy zgB}2~DC8-?nZyvFF?j?;i~$FgfD@Yb?da~d?N7&xlsN;Qu1ssqw(cx|5I_ZJW`Ig~ z7ZjANlm%=4Hj*;e1R#Y6fKGtiL`FxKTl7mZ5(6_hs$jCwd9>^Os@&g+W?IbXn0Dk~992E?hwh!=%0MGBP4nG3E0x16^?_&$N zNf{X&B$`^~=jV4^=(>Tx1YEPUO~cZ1If&#gXaOQWf7UoNGAM`0O^Z@1&=Up}2KYY> zeA2j>m`cbl;m^n=IVL~3@VCHMqWOueGZ^6 zXe;dCn3`#PKvjZI#pg>{Jf4yU_oR656Ci*t%fGWqOLs?Y>k@!a2A&1r-c1lZX~8ZB z$L||G51YRBLi-ZNGE~@wun0sls58#b4kkh4gXNzLa2+BUZW)+pwSpo23OhqU$8^}r zfu#q$1)x^IV3{IX4{vZJb=N^lrg-mtaIiRZHNZ4);YvzNOT%5d z3F!fR3h1-m%zAD%eT*DFF7EEm&I#aWV1xPkb)ket|AY!8JRW$(NjW7O5n6{QG{X5fU?5_&{m)W z<>cff5X0KHT{#?|e2OHJyj@y03uHic=aEOVssSraw8`snrC%FFDhPU_?;+YR968&)@v^D&-s~{ zn4s=PbA4Yg(|wAJoST_xn6@)AGO`)3dP4}!D1CV5vp60AiB8!}VBbN*VA381P+O&r zO=U|TfTa^Y7iVxcJ!6Zn^{RIdfv<=722B1BgTi6!(>ID>E{Dp`fh1d2y9~S~K%`{) zo_>@0NCYqqEEX z&PQJUL5oqq#uEpW$ufkjhve^FCfovl7yyN3_KyM5PU(f!V9;?vaDq5QBj_y07OjvV zph`<PKT)2CF$d(gmV zBzap#7J+DlMf)3zu<-cgBu!oUn@1EOK)Qgs2F^Qz=^1Bt*t>TWzXw8FUJDL#X+|Y?_hZPA zK=KB41JwhNYk1p!pK$ho<&NWP=-uHnE|~s#;Q(UUG33h`malrkT5|t4?g>l$aA_`@ z*t}y*4Yp$AJxso^W(Gn;8BLiLE})s*HZ&Nr0BPnEap*9()lN<#P$J zd(9B5BNDNsICuZoS1>B-fiBTWxQb!f1I7z~sszZs5Y>lRqCr7d3N%`Xr!-^5ehH(# z>|dRcR5PS^8N=5KUFc+rx~|>9s23KyFv56z_B5>zuyxQh&cNxB2bh?e0zw4*HI0a8 zMP_CuWDhtSaBXyQ0x}<>GSmaGk2-2<-{6~||7mHFW{Ti9ZXrxlas%EoNTLs77zFO0 zInJ;QOm$rh-{VJh`^ANYKz;ZF(*7tJ+tmB*rFOF7AE;#_T1kN#qM}+|SU7+b z33xQrSa=N_JMG+i?lq5TX(L)aMJ9)actu6^f!1p@I@JwWT3X6KXU6a~C0JLugh3H; zG~mD3IEwvJKOfC+71SOx^YLliBL%tK7RaO^pdC_T6CnXDG^xbKaK%CI?lUXcb=fGE zYX}6I^4*L!(Sy1L_5;{z&>&!5rekE3V2%V+N>%l z!=JPsqlJwewpnJHPs^51Z{APce`mZGWLT7mnK6g=`!ehp2F4dGN!*upEW(L&yawzF z@ppUYo;ua(y(?M&L}F`9SUbN>fZw_;9M8l7#63mcf{Hzu|uleQ(3AaQ7|)gd-T z-f(yk^!tPE*G`r`@`otU$Mx#F%-vCH$hcB-i`2Zu{?5OT@b$SL4H8q8{oR;L7xxSz z(wz^MZ~kB$wTOI3-g_h5^2EgA&Cj=lRpAAR<@&`&sRitTv!y~;9)P5N$}dxeStNOA zMfWpaOuC$ko4AU-_zT6T2hme z3wiT1&*hW3R$ea-X&C;cOeWDGo>Ng*ohG4T8#?m9el^!|oZB0B?oxqEVDpFTXEcLn z%4rsJ+BTGYX!&PKFJ}>CMwzs$VKn-T;UnLyqCb4#_(sCZ^P%?CJO*j?F}pBLs%=n# zkY_K18NCi_MGRNk{ooxxt-Nmn^n!kwe~9{@=7eZ7N3RmVj>zXJ%a0O1Yx zwmG}_Y8OQU`JC?0*YX7P{3fG}`VnzVCsIes*Ku&1HzpFo!=Jb3gQ{B6{disXJM=lF zqI=D)=hhUL=u|XJG`zlEe#EUCl`JxP6&!{7ZKqL8Mv{ova)+*&Y}f5hy%g?yx1Z*6 z-wQJ>XpM@uTGr8Y-<}O}6QHH7kNJ+ZH&c4-$)p{#FX2x}Pt=?JyjFxt39b}0(~O7J zaA6kEiI0zOGnp(7!biaPI*Z7V(!iOEI+`dyQ0+QR;y0LKAYMB}{CYOJOY^FfY2dUa{ye}w!D1!KiavO(}a zed^_XZ7u(8%O4vdaWvZQHRm9lo)3FT&k*Mx_N9m~StP0Upn7Nfy`CpA(RhXTxUu_$ zJ=51`pzJ^S*y_CD`k{69(iLn;35hA1hq&L}c9??RiY&G^FEE;Mdv}{>c9;2sBXs{3 z5E9ya@~BpYxyJ7NPK_6(bo%{vwi?9@RdeI0x#rcr>(53|K&hOIxT|D&Qv1Dn8$9!~ zfvHnUd1%4RGPiKVm+))&HDUVC%#qA6C7=(I0TAfSwMe?}t#=x5Y%T#~Ba^jyt2I?Q z(S@stGLYi>?}ccz4~B8u)nn}R?nCxD*VIWkKf1kqle?oo1<{ zBSiAPSa)-3>RdmkP>M;nvAY#6O|^n@S}N?8Bio%P z%wFdP($MT18qy)(wi*?Emcx0|SFii4u^GhSeOubHu6B;JUe`nhlkVg_e?#aVpOloC zI8m>&wC4`=wV)18h$OUM7(%9fa@@LJwd%vfbeE9Q^-OfA4&Bdy##Y(9Yo396V@DEm zbs=0zZGEP$EpMgT|BJ}b74$yvB`fmT_oYHH3Min}Iy;O7-H&88VZd!CqM->R{Ah8z zN?B3X&R_4njWseeH8pkmR>>VgL;yZzlTndgB56eqQ+Br!3u)4(NgN@vip*AMb9*~` zB^|mnP$%~Ump?^(W_M2J{A5m9VkGXBe;a*K5ztG4prYJ%QiPY6_u$-Xr_6SS_P5Z! zXD-LX&tz8oRPt*-LQHHZA9)~%f__B-U@s*Vh(Zyr&fer=c=kN$TKa-Z#b%on$oQp7 z3#$mCtwXEO-QQaY&-0?9Z0g@;*iiY^=e%w-0u%xs&?KppNCjoZ6IA=*htk*N-# zB2&$IhM;4L2y(H@YSs{(*Xx#JBB4`Bb1%BibpOi};Rie+k~0pKkY8S*tB?XJU|T1f zY7M*7PP7_%=iW7A_eH-C)8;0mLw#UisF=W*a0I5aA@D=LZ=gTFlZ6(;{5SnD* zT5i`dT|Jc{sbs8u2KY*Pc5N+$klEwm<}*+0mFER6)&?SeGhdpyF}@H1Hu|UQmw3Eg zQvS4vu0F-spHT#cd{5hgTl1_?5I&tTHQTTO$E>lLU~K}LvIwl)3=v}Xn25yo;k?d= z9(?naBs5%Dd+z#nM%qrfT@u6Gax1x8YOqrI%R{4kfBxQ3hbG{8mWuP>2^py6=h|-U zG*5jXWC*Y!6QbEqe?gwIwS8UhUjfH3)#vr%%zdaLQ)lZ>_!JXx3DNjrm6wMM`VmCl zDyR=#<^mCUn}2sYZMR^cAuF1Vzc^g>;`D|0CNm!+s(B`%RgIhjMfi?XNmFI<`l%_`(xYId-`n#+0e z2GgNXI!blqNfKhfci$96Rzjp7gjb&U*iIToznmO6 zMab>f?vb|Di}@pwNEwr+$~xIu3S2r;8l*md8+O$M9}U_Fn(p?0(q%mq{=SQ3wJT71D#o$Jfj5Sp+~6(6GN~-apqp*LNeS1Se$7VoI-hVD zg0cDzlM1^xO3it&l_o|XkhTz zfYuZ#wch5U6K=$WBHT72=9MG;%f5=)g$>RJlbq~x4l`s@?TwM$8&9wONwpBJ)*~?c zxF!_wqP}M?F#C3N+KqQmUE}Pfd-DIp-YCS?`AWdr2J+~gPz+s+Wb*4m*ncj(AvnmtzlYQid#0amBg@#7#NT7&$YLwTdv2!azo=)PVU!Xjdz@nM1 zf%X0Yx_hc00ks`~m!1CQgHS_J;$mU>KiAM$oSSQ$w(AInX)w@W13qlxUZk`avNO+8$#a^ zqHFh*q}M1L+%qU`mpFd>rF-pCwRPN7IC3avHxt^D-vw=nru#}>c`jBqHU`GQzorZe zP+h>#aj|CB$|vV{jsTcmhZz(*yF&Sl(F*&S*H^Ct4kbZ?tu*sMM)7>!yYb0eUhRr_crZ zvY`;bI!IfdfItAr066b6eXOYg$X}mgW8W!=0vvI?yM%6bxh(pi<=Bb=WZ^TsMspn< zpP%dKSeI3)KU?oOtr=?n^Vq&IIa?;V5i8Auis z3e$5Qb`OZKIvAijodKaG8fdW|=H!X<+FMoy83c^MBnf*&{V*^#HKqSAbeRY}$hUoP z;B&S&bmZx+qZ6;e($(D^Et3Wj2h@6q|H9s+Brykur{s zjwHmyXym+ofL*`VbAMH{Mz79ct_`NcZsB4HB*@beZf0C;j=V+V(_qN0#uT_%?E~nx zY<&z&3{vPY2#a8#C!Wv#FDS@WnD_F^CtzA2L4qDFc!ykXZEX!YGFFnnH*aFdp1~B4 ze1>4jm=JC$Btc8s=yoC`C~g5%0z94yKLd>c8;_EPT}L23NHZP^=K>)%^9U9b*J)a$ zao7xodG-2D&z(+Q82sSj;bCWIr=_Lk;`-D)(`#NjvIF-+A1MQJatI@Jb)qr2L#Fj? z{y#oMMWIKV%WNknMn?rfxDBHiXs`u*07loIKkr4OFNB02K7XzN+N`vct9<|ztWaqJ zUW0{8UO(r69y&8MHU=TF%humUe{2Gf)~QNJ`~o)s?4QqNeQb4g74Y0x}7iLXztLwVKy zD=PTQM_xiw8jO01xXy_>A5rQ8&*P;GMDp_#Nf7m|1NQMofYuklA{@-u zKyQp#Z3O+VXzm1r4L+Dg@^103YS4ALlzs7l6VwPxOI! z7EsbSxpbHjgAfInBG8+c=(s>~dvUrT3SkKj8pf~15iiH9A{&kNHYP8CcOYZeqvEmt z+-rUcER*}e`t6%H_u&;azk_jCJUrj9mKI zn2ta~B5;Ed5ujbR6-ly<`Qt$Jix&^G5gViM8#ol|gI)nlWF5wX@0~%>EtwLIOqPq| z!K34XfmP35VE(p(0PucK{_yo<+7Hs)xKmXQb26S^(edkePaF{fBP$>b*UGyI8K2iC zp5IP z{tt{}b7S%D!*E2^K1ZSFV=@e>%_Gln>C^#Bt6PR28>S`xJzz?U0VU1&QI07mHy4QH z+S*!}f`ywB6x7aAIX*eT#>8xy10G)}L^2{ge0pI4J|2jSEmlGVCTKnSmzWbY-XG>AnW~|`5YKK)hrc=?@>W{H~EV`2oa)v8DrC!gh@^VP8sqZ zF$u}->ZN`#R!B8f5M6_BKsTIQ~! zGX_RG37X_?krQiBxDyA>)*MU#;SJ*xa6IQgn!$h?q)bru-X2r7Q`7GyQ(BF)mBVg=UVE4tLKO&6=46LCaQiLG_9;lWnsYmDFXV~9Z+M^& zH}^K^amX5=SG4{Aeow{)8io8XEdKwb+{O9_Q~uxifg{m(149va_^gD{Ykc}dULqf9 H@cMrM!kK=a literal 0 HcmV?d00001 diff --git a/images/kernel-test-flow.png b/images/kernel-test-flow.png new file mode 100644 index 0000000000000000000000000000000000000000..39c4a72626c62e685f3ade1e1b9f5a799a3d88b5 GIT binary patch literal 41938 zcmcG01y__&+wRb)lt>9fCaIN!6D!g6Mw+0VY~x~}`)Mkp&vVLv8)41qwfWu(R5K_Doc;P*vLWbk*HH_{FG z@yJ11+Zh7E>3aBsl=udR3<9Bo$cT%odHmh=b^oe9F>&{|V8PE~|1|LVZ-Phy6>(*8 z#!mrHKTpdkiwg6fMW%l3q9OG@6cg<>JXL)eiBG5|k`u00NZtE~6-&OQCBtvY{l6>t?*g#?#|r+t*8lal{(szpLj#n+h=9Pr z(@k=?d`$p>ggJ4)#2q4z{kwxs@A+x|+%e&hhXCbxlf-jkImmb*SBzvHIG+ri3` z!9hg<`7us)^`olNofrfpz)U;e>ThlI#t0`iq;iIl!X$Ow=%P8vYy-H>@Xocdoe^|?;JKtM!C5N*qsE%?p~I$&`mc$^i&dz4_*NjS5HrqN%}%q8V!4QCC9>fL%ieaKI%eHtwV zv4PCix;QXO^bIcuAj4nycVhG04Bg)-(I#A8AY$RMl=ppfSxbA_oAP72jV@asiS_Hf z{%y~}FVEwyDGKCMDy_z7i^%e=-sy-(liblH^w~_88y&Cqd2l4qDJ|4HnsAbKyGOH_ zEw=h1;IRkW+rMu;`7-i1y5tUKstQU=6{lPk#_ZZ|&*(BekXaO5(S;ec%9r+=4)V97 z?Xj`Z2oW70pOm$ny)VLAoZ!2mO_9&>>oI1Vt=(!_cIf941$%_y6EY|2@*}W}>il(l zpJ}P-rll)1Y~ecsb|LtCk`+r}nicEDZ}hv2f$u+c+62$TUl<0SYlL4CjYuv;)zt;` zRlb}I3H$H;tvN+MoY~TQ9k~L-@tc{LkXS4%-SK$ASGGp#rZ?53;LyM>y!nhX5!@<4 zk$j%5c&$D>HXHx%BxY$^nL%$n>eGVdx}?3IZ8ADQmsjrpxkK`R6qf6rO_#v8$VPgm$v(G81P)A;^Ic z{r-)UkDW#W>(V1(IXN$(Eulsiw|;t4>gujgDU~lPE5@AX^a*MVu2)+H8*6J3IbXA~ z>`6iy`$sG}6J=%MO|^+g8D-+dRm3ZH8Z$o#o$bwr#gDsYAYik#w|;H5vzM2bg=&#_ zLC1`*UuS=S{C=Xc{$Z*#L(toeHbHb=?Bl9uCh1~jWo5Ql*!srC*m?K?GE*f2W>=@9 zuAa_%d~$*mfcFwJh_Tkp(Lm#Sj9DQUwjT!s%8#)G(81^p#^pQzs*;)>% z%dpbo;{H9i+_^csIy3EB+a_J+{z^6VHHb$HA^;CVeyqc@pIu?L*0vKuRXOz}goi-k z&;z_zj4DI1AhDXU5fd+Y7ah{cjvw)bs>vf}Xs90pGCzz2!E|hzhXf%1dO;-Xi~6bpYA)r~5Ohx!6P z0s@H{D=jUx;ofm;YSAwn+Tg#xInjClzOk_pe3Irh!Q;Gluhk%WQ-wH)6Z0zV{z@Mq z%2rY}UIAx6KR-@VE-aI|ddC*;b99Irjhf!uaI4fl(^16Hw+_Tm)lA9IhAIKt5Vg!Z zDm)T4V^mV5G0vl2U3`}QN+Xg$kpMjWXvve46W$o-2Hn*ByfG^k0_@KA}F12Wo< zddRXo*HiN3l9~0JFQ(e={w@6q9;ud%AD@|tu#P<-7FIR?lp6Z>8)YH?_TS+EdSZ~=M`s!_txudH>J;XxRNG8=DW*g=i&8j z2;QmflKs&QtY%376{4V^5SjC>eWW+XsN*#f?=ZRh9GlHFYxP#|tj%nX1lnL~5EWHc ze#|@I1>CThlS`l8(H|l>L@BHa&nO?$(QIEIUHXjwJ`sq0^RijJZILos(lPT51H;=Z zvZ!t-Tr?(az}zzv7u9D8B4v|F4SZLj9X$pDKdJ0Tr3hcbQyF2>ap4u;k1X(AFcj4n zZ6~K{w<=1Ne>GNE{ih%KDe6m__~(#cjvg`6(1TIVCjn2wIxSy1Jnl+4_%~(H26jPQ zWp{U%CggN`;tBl8f-u}szig`3RxLZYK4{7{W8+eu_8BH>&@V%8Z~nn|FbLAOZ{Hvj zv#g&U^3-*{6*_WYUjiNObN25)en_jUZ}*EYBcL{^v`*x2G2bcq5dD^QOiWWOh_i~O z5cJv}FB~`c_%5ENuXb;t#XGlX?5XI#U$wSSUyBUaNp(%Vwe#zd>d2gMC~+)2Et|Pc znOM<#oDg zl$sAm%+%RaGSo8m7ZtrjM7-fl_m!2E1&NlKfE+H$E``xrRFq`)DnZ87)y&L{A{OI5 zB<#g406B=GrACP{PM<9h-jZgL%ZN3Q6_leLKiAgOjN}OcKazJa=N&(0eDmfBda$&l zH*k-gM)2!}35}Zp^#Q+ODmw@yt1N9{AsT zS6|q-{F}T!XuF>_i7hPRb6)?Wk}JuiQ(ID1RaIKLzn*4qXKTyE!Xg~xa!%ZK(G5!N z_1Rt&OG`z?Q7D<+#r%OE_zHCmjati*9Q9%i!Mh8Dz>tBGdwY8eP%JjO z?$kRX@)goFl$F2HKYn$353Gnvsw$x(K%pch1%F5pz)fpwYePdr4Ou~fs;QRQAA8$J3s62Bf8oHQCOM} zJN&q6gkr-`66orcYe#tDM>KKrZ6^!y;9+_2qoakrR}^^W36#FydUns0P(;z%3VZnI z$M)~IT|NpxSvt88+;7+M*KPK>bb6K00-io!F+*G=UEp#~C0DU|Bh#Bztqz!R*1pa6 zbaMn4@XhPjo~I*HPo6wcSGWD}fsE5qnLF)v&)#3hz<{OS$<@`Bp5V7BJBG?vY2TM| zm@!fcV=v=+QE&yyP2e#?zE^*~vgHEn$R#mq*E_tB+(R5)n8XTl`&>9wySjtj02?$? zXK#?X0U{}fK3jPcAYYH`y*fELIVJu*PT6=Tl%sf>ymy6{Cma3%QD!S!?oL*YGgcEEQ9K_uz7 zc#&J11l^@Qc%ea?U!1gQU;L|yiHT%gr#|qFk6!$!o3W|=@nf?<_|}S*WYzO7NyjdQ z>%-(TC4m>19RT^(8+S))8u)U>NkhGND}et!7w~Z4PE(*wC{!zIY~)Lk2St6UhKT8X zwcUIpNCt7#QVSl4==$k}CU<)a3k$#qusfGLg%V}af`6$oaDinBdY|dKPN<_`-~itz z1JT^%wD!Bfd85a;0>nOm(`LR`rxD^Wt1N~j58kV*PY`PAvVrusH(U24=x{`eQX!Rd zOG^0a>Cj2ORZdmXyFIZmzzzE=X=LKfQV{hX0W{st|xcGy$wcGxjOqIeR)*olLdX$XkFoAsel;b}fL+QXXhJ^10N`niOviyF( zE>Kff7rZ?k<5}>?E#2XF+pRwt6XkZ$;bbyv{g0=8_1~&CNmcst=PTTQgM~Iw0ruQpchOi<+7{ZwzLD4G%(l!rXeX z0(K#^bJ^f_TeHRMAE|)HA?5uA92lb_CkL6>qJD6&zyHLmC0qFicz~%=y-LQRG#>lq zj*m@ldsnvy{s#K`ejfRYzAPH0x}F1EQ;hGce}E(k@RO^vvokP-VXW4E(XZ#Zh0jqpF`_5h z8xRVUis{RZEMfCMUlFmwcYty>uSC8vuF#@QXjn_qS^txws;k>Htl8se1q7Hbx$SiM^7jPAtBEo{!e~j0fz(&wP^2(wqG_T4?5=efmb%Wl z@Wn8H6!D_ll-^Ja>v{3d+5L=v0m7Sd#e72%e)jDLPiptkOAhJ|L& zle}bougkUM52=$gGtVD?oU`LGcPA(KKDHGUgzSR0H{VoOShxkMjo-fsO*vWF^`We+ z0dw!uO;OcUhxYx&HcGK&z=euSOV?qFW7YZjv~{z$Hzz~(pm>AZD+yiFG`bCI2Q8?6 zf}yk*m$fzm{{+e19lS^p3*U3Q_G#5(3{A0s90KcI__TIM5w{)xegkq!iaae8M^jDB zqh+Dy!xXbl?Jf{680hIgn?0WpQJ*kmCBa0snPq$n(^3y}+dMV3@SXO&*3is^?PC| zg{?-v*DZMM6c%JQ?vys^du`6pBP_zlcopEPw^(V`pJI{O0FsPM0<9omU?iWn zwzs`@E8q)$*Lxu4%=_Qp0uoIf0i3I_xVQ-vqL*>fs;XnY=kw=acajqm9};zAskcDp z_O`XUI@|YPNsqPPiTCIJ_wGPAY6T96f{yio^oK9WAAcOy2`!bKfpc3kNi9epRy^IcR&auCOT>Ea7b-^Pia!!NXx^?@`{ zF9A>4-rfeA7u?63n3zc7?%trQsqeW4s(*|AK>!kp+uq-gba7_SZ=VA^1<LdY#ur;MYH>XK6I7jylXAF zs7RXj#!ncXg>^X8%?I$@AgV(F|Me6Lzq1*O&0gMjr^`TgIrnN|PTom(9(06SobS&s zJCIPrzIp~*eT+dEu8-}{H?j4!_}}|MF0OXUFmRwSAWY@t;n}36$AhP7mlv_s;Vc){VYmp+Kyb@b|pa*eub~ZVZD|KF0 zShzjr{8x#2w$W3LGij7d0x!f=pu%=%QpaKee0u!3ro#P6qMqRHHk`{;;? ziR)@>fwn;!e|^4uatZJ$ehH9&BA(G*`3Tj``hv)|pQ~pBYRs4wjbNb~AX!+GP4jjN zW5>tGjQzjOtAL_JNJtoic*q$WS^j>Y$by%^UfZUc4`m8EW(ol>d#IkkQ(uurm+I8* z1Bfju%jdSMC7}X5mJkbFEL+*p(eY)RrJdaZNTtAUT=mL+{`}|FQd3hC9UTp-y2omq z)PqEMcYB?CZ43D0l!572GfT$%@7@3)gB%ZctT*P2?P6M6=s0R=#GLw_t0kUv$ zbK~aXBDeMq5(T=p67Sjd~*X-R5gC38vMxi`%NkWgM;-qLc5>wnRSMy#u>r-uyz zYArVw3Wq9F|69ZjU|hGGQo^ZxuG;|Vg?|?i7N#UuDJo*tu3QB$sHCKX6Dt>BJQA$Q zsVNNPz{$x;P%w1mM-=RnyaSA~aC39R zF$hC|@VQ+I#?4pC%9PE6!k?ldp|IK7+JYkQ#hqqrZGG(3!o;q5A&ur?^=8SV>PU@IH9i$J>XMjQc2H>ahAyy`b{pa>0qVQHxb zfdqj>YGnpL&Iq2NaZ*vyrsBDW02ddR^lo|NO878F)=(HXd0vRdqgzT&TeN2#Q1~PC zVBnTtyVc*l178Z5^-|ndWC@O9R>6@yH#+=B+DRY+ug?!+w}HT;A`X^Km*Il%C)o)E z2Uop=kz9;*7*H$ak^9Z7v=#_s;VR&!5jhtis{@XK^30GeM11Ql6<)iksn4I$(NQf% zqG(A%2~R-Y%ZXkj%04hTidbsG+#B2kUK~4|ynL&88lx7>NlqY4TBi1#2oNoz2LF<# zsRWe|5Ln5VaR7P4VxGmqQ34^777!7FZfTm);o)+C+(F6CSsO5|YJWP5)Q=9oKENN7~*okz%F|VA`{vm@nd_zXQgWWM<@|u0rr9Y6FIoyA24}#5)y&d$7Epc=v_xYlvdk&#^Vau-Eaz9^(|15m605PkCgvV%uQ$fFTUmtR3>xWYGwplkq) z7$_U$XUuv1l{bv;5~)|4o}lAJFp|q`tra#ZMLAgFYVO1KHEd)?rML-Yu~5VoyeqWf zmOF8L&dZPdS(*ks*@)qCd4(+*R{-8SZS4lzS?+3I0N{X0svK&w>C zab0FjAZ~|s!lBMsUev>w9f8(VnHyFE)bTXmjkVy!n5e@ik&MqJR$+>aLvy^K^XE_C2F}{r&mrP3 zpQ^2U2qi9GfK=UM~>;`sISM z>2qeWA&qg^vicpOFH_!Cz7}0=)~}<=#3kPO&ny6n`%XXr*;2158mX>eKk$O2j|d90y(4(iWYsApBNojG}=je z%*06!`R@vEZhX>DfV|6>AxVl20pNJN#OMcSLZ6z32C|5b4%wjpCa>7GvYMI;kepKU zFCV(U2lsaefNAKesAOCH6~h(e1Q7l2-@ia*09aFmD!_z>;PDGrk;{|_^Zv_AHq6yc0f#Q1H#>~)%T(z`bgjve}1Us4)X0V zraGN=igUA$>Zz(GBqxWxeymk9KRSB6nxM1*L`z}=A4eK|;QijpVy$;qAHIMMmd;O$f>!e;l zjy6Gy|I>PDb@eF#pbdu|PuTb(=%!q6N99-qf$~;0v&&RBHwqL-(O~p!BvChM%?}<- zjD*79JGvSvJ6WBg*pBi{UQ!QGcU?&GW%PV5ro!}V%fcC@43MfUUvGO-atxGBQ0G>1 zV}%;6#`0bGT6`{#A5<%#E&iARW=xa?`q=|zEH6IChHykTK6s4$jbC9$!!ov(MKwq*(((SbXHCfQ^{me+RSLuUN& zq1blz;TIbGIGRUgK`mJHgS;E*Al*GMO2AS;Gy`N}WMT1uJL-V~23|7KGU|>b1ag)r zF}ZowN=T$5D(6R_nxO@N_1VtW{sUT5ZLLPq8UW)tNb2e}<>jo2vdhOAwHh6eH%T8d z-2Pet%G5cCJX>eKSZhnR>Z0Ar$2i6r{A)?H#ICb}@+Sk6g%04H$7}tIfCGRm3#8>D zP_(vO{vMpilCX7DmH-t(>g7}IB+we9!UXLdt&FhjjWI=j;GwvJ^y%#um5t_syT?e& zT=QMx3HpNG>0ZZu2Q4)Y6LfMsKlN)4CoYw4Rtk_U_`Ng;Jk_Tl0hp-a)Lyy}BCW=u zH{C1CK(TT~^wh-OY%e$a97IsZq6Caq0$C#oP#=G=JN|bkS#gVoK~E=#9tQ<<`nLxl z0UH7d-%3oZ^Fa7iIb8i={1;n2H0)(}CPA@gSx>DeOWVycVB8Fm#%+GLjQxM> z9dj2C7As7oe0>EKe*KEUBH7`@q81i@kj6oZ5_wkPu>3X_E|wO$bYG$9yxJQF7*H72(0nu+2^pVv;*!0Xf$eX|!qaFnH>};g8oR-_O zSz&oXP?bDAJ;56m>g=-^`+;JbkAPWZdaeWT4ytMxHW@AETQM;)0SY)=JTo#u4k%{; z&a=UxC}4=fR_vU2)N4G;g)chVDTaT)9_%EzsAnEDF}4F_l$dHII&=|-E|A{1xVhb2 zU1j23Cbcab{-hYAaA4OLZDpyu0!rchy8zQgeved@L& z!kjo@uCA^?gJ1s%Xd&H$(-q1VYl@Fh~1a?5r6_O0;vST>T_`jGRo~b zPg@1!8;aO0=RsbGva<3pa1LNaTOjD{Rt;rxT8#$d3SCrX-P&9pujSAKyyFW*z>v=# z0;NUUG35M!z3sU*NdasF1o<&e+|F$hto?7UJq{7JvjAEPdY#q-%mzTt;c_QS zw{KJled#WvG7r=yo;gHHJXsd%KLot^H?*o%#=#QeJ zR3Tq4pm_GiQJ?S4KJa+}fR2Iq*dhDI)DZY6trzx4p~J}l&lG?rjv!%vv&4MFE)QyD{0fbOA-W}G^(8#xXucD&j z?0f<+($2{x02x>RcHoY4KrsS3+^ZM_!<#oiU<$va0e5(q0^2Ic!hwK1;sQ@7YylcwhVS)R-q;pcm<@N-z6GTH zIJY3v%Nq0#K)VDe2davETQqP-z^Pf~knGXtqjTudgMa-B#(3xvOcbd<$Qd99v;i%0 z@&4`{FrNzNvJ0fnvth-w$QJxBsR9LI789pPD*0Dl!I} zarlV;`@oRwOTb6Lmp>*i7Ro9E;ATqS`)BD6$jiNEM{Kiy|AHBc(@!8p{M#C{xHw#X zAilJ6hVlC<q!D6;x5ub{zLlLkPwfFts$!E)}tl*>MTs!nKa5avj-qx7%ZCEOTB*vCM z6tNkTOKSbcaWp2u^^e-i8LO|_0~)H<@7%Fw9Rafih}42{wu`Jit-{tDKrN5X3o6*$ z;gTtP#Idl}H`yrPf14J~oD6QIX1QJxg!v5(DMitxJCQl`k#7xp?X55f9398O9%mXrO_4;9oI z#frEK?&-zYu`&*Iop1j0s?bcKkF3zb@{U1NAv&H`jj5?iP`(&C(VB^jHUe$E2T;PO z-|PWG<_R&gOVd1vyKeKMuU)|%H{4l*3BO!;{^}4~=n8N=eimE=+cWCG`-eJK_vtF) zxWJZy%>?CKxLuamWfu&|wWQTUy1&~Hpk3YI4x_-d;IHkK1JiJ(&(ppmsNPDjW1)6YCj0#Q;$)95>q8( za&q$ZtzUJPCC_lCVFwZ4mSd!-Fy25+hX~!aFh0&7reCVd79|*OcSPagU|{n-0qj^W zwK42YK!A+mvpntAcxYrJz}Fz7(>~RqHz|Zoxczzh9cd=m@};G>b7Lo}B*#%4n+lRW z79~+$$e<<^?MJa?y5s4x?HFupB!beIE-aX2I{zx7|ASf~2C;k}AYlTq## z$sS1#a(*Nmn-Blkx!lq57P1K_m=jac`%|`~U?Z)ZUVg|{HA91L5G3+({1eht>J~3T z7=$*!C;){j3Y-E6unW!$9zv@{Z#MDN69~YL!S1S%R~_MuL73Lz2B@laGxf{TruADo zy2hLaEs(5hP`l728DZR=f5YSR~|{2jN>a=l7w2NdL(R~wA)5misIW{o#GC> zw(XR&e?(cOQv)OKw)}-tlQK|)`;{FCt#4Tr44#i-LXqh`u_$wyBnArBS}p=J(_4ud z0_R4=;h5WW`$36m%J+44B(YMC*+NSaaaXGhtZPabZ=!LFvz=LL8f`?{>;9XgcKVBk{ESAzHO)47MQ*|%|cu>|0hjK~5 zk0& zAS3_<@8}+9n&?n~CdbIWohAGw#Dce04Ax(nXYU@03<0>TeG>mm*)Y@V`^goWb8HG- zzfjlK24@c6d9{r#!Ql+Itv45?$klaqCXF;dUHV&`|2&p5Pip!tu{x?OOIb*{@6Cj= z4QZ6Ns+*ll#A2{zNbG23g<+V~G4;-Pt~50mSe{$q!fk>9-vo zhjqG;(f?Wam5Q=E=NSHji8*uYl8r%v5(!Oi0F8FauS{L))z-SZ;p(n~d)gX~nvGAa zSBhc*nEki1vZ|Ya%+F+6W8silr)}al%vNrdM{X`VC6y`;YsVMQG{}CMBba9;k!?R~ zUUc;yegvWBuNRy%nm%WlDwfjG`?iCb!Rf*j6aCSNIQ&9vH&YhTF|K7uU9H>pmgkxL z9(&Bwx0rT%VK*?MsF9U=I~rUBmDXKEn$3!UxCtf6kDLUJI$jb))#=FQ-dd~W?u#HO z<{o=b2aZ>qfMi!=M#BC#dHYx|O2iQ?G~_OZtSmPdrD-59-mz`y2@YggePUQ)<*_ua z$lG8oB5vCcJD>7awCA0^C)FyFg#-!2ewRd_QlsFnFQTWML$3FrqCLb3Kxu}Ei15h2 zD)#izSA;;8AS)P2$EWBH?j^c0Q8P?1x6dxd6HMkf%G*BWNy!wkbefD>^PvfXdK$}- zPw!)k%k20*ZHFYrXDeeGV5s9a-|M!~nh^q>x$Qv->I&6Gi85CukkEzaM833m8zd1y zp0v4w7{(Ldx6I)Y_Od4yW#%Ojd&y7+PHkxR03^}SJP5M0D-%*iYJ|rKqHt+wc z#BX3$x)H8T_|DHi*0SJCEl^MAv`^A?{T)Y|;3+K?o0D68aHAO8w9*v4%cbJPQ|BBu zI2s1Z8l_8B!<^Lx#$n2Rzng%&U6pMbUKpP7`^HjIyVzI=8`HA@>aJ`fmVx0{Y&@Kh z5QrmH+JqoQAA2A%6@}dD8iqPo=SefYQIs6^GjVYhAuI{ewCp9=LG6!iqV0ibX}2nc z2BD(4Em49?!0V&kAYf!^|KFAEY?9eXB{Y+6#%2<62B|=@Ony9wG+6tKl~H>gi7H{F zSw98xHJ=F+3WGrM%~JFCoMfi0OUY6sOKdNB!tpQZdJLtLDmV6f7p$~5(+ocrI^nFbI&%;C)mTttM~*o z)?XXN+lE{XjJlpBWNn^CFa)wZ{U{=X??)-C;6XiFoe)KPS z%#vM*{nmf~x;yU)d54DM^fu@|wXUt>(Pl3qBlJG`7uFC`12auV1K z9dv|1D;~kx9Yfr4Tq>Mx@G$W(Aoo=eRERN3h0`?p1_VO{(?Lr4yeYUN5cg&gsT>Ig zp$MpADrdxlQ9&U{dK@%JBIVfBavpEPmtZ;A_)?g7viX5@?MPIYkXcCPeH3fox&FCM z=J_HAI#PJT^(SsT$P(u~(RX_bc4HJN8wW;Kd7Ua)9WLf34vmCX?a1Zki9%87BQGDz z`&Kws$?G?3sE-7o1bv^w(~dsXmdRZjk1d?VXo+x|jg*qVF5Sk$@$s|zc&;gQ!7HaHe8i;^E9T=m+QNz)? z{v4qkL7;)THt8@(7katVG(IkskDTcd1P5Y6l~hEl%lr#z7X25}Ao>Wp@wA~%x*P#! zfNfx@mHu;%44YY2*mi_~Y;Q`!^*35P3@CC)Re)H;Un0zyK2XC|~&;@DIPuv>#EiIwdvLUhhah*Qhq6icP9D{3UM3iQRH6RljEHWPZX?tyspUFomLJ37GGW*aTi22g11JfZg@4|x;DFP)ECZL4s zP~Yak)jENU6X4vM{1%G6iUkS3B<@3wHO{+Z?cfdh3TZ?uLQ8s-YNQi_f+W7Lqu*`D z*bTpa6b?OH)zN4V?j(0b9^j$_&`lD}IiL!H^~?iD9Ot&4+d&td%0~2UaNXi%b&Z*U zBfZOV!!u<4_SJN0l6aZT?N4uLn@{>Sv{wjE`q8nAcyCcl>u~2FWOXjgi*u91NhP}z z*Xb36)UxesmWDebsE|7B_Q}--xA?)J1{?|rmG?S2=esjwycf|f!@FhN? zO_5g2$$VA>6AuSE>`MJAstc(T!m&nk?q2O8@%P=XT?FF4(4?~I*fq6rL%7J7T>5E45OG6%hR>5;96rV2!}@yPlhv7_2R~zIT8HO0 zLmH6{_IA~<6#Fch>ohr_>R2~dYu%A*?cOIay7|s$*n%?yUPa9S0E+!gErM1DJBm!yp%JC-k3rsOUM_D zItJY;xAG((c_+-Iq^G3EmU^bdRkaC|lq)|U%d23*Tc=y$xa0Zkf`LVjiWGp77J!nE zU4-_WFvnm2T!wv?P|20S>-RFFC_ZW&b-ObWd?tR1e0w6d(SI@f^6hI3yRmiCK{3n2 zm?8-8)y0fKreJb8OT;wP}Ri})ad z;cLH)#4t0Ry>HhIX=k5MlO{i^Rf+)!Hi!^K7Eb23_N|8r* z4~OrwPPZ0-*?Ay1)xR2X-x#^i+9XUk8KK?C{h%+z2{U%Xyhw}r4sB!el2s|FxXW5y zYD0W{&WHJO8oHSz37ZdeXn;~ zCQRvsS^d;9NQc^Za-BdyNt(UDa;CQS%!M}1K#l$hMIhpl18Of)-Kt96S?&Z<2*eK6 za7F}k8bX9bzJ$PzVBzuA(^WebCdR2H-`oyZ4+Z@^l#pbzg+2T^A>5~)Qc2W(mQ++> z0$t;(+;}6J!_#^Gwb=_5vY}J;7!&nrWKE>+n@s&ZwIfznP=9fY4h7L88CmTHnC;f< zm6>US@h7b|KVQ&Go86n_#CC_4g6W^a!q;!!fN|sLQrxIHFyLllLT&dpGWyi15XH|h z6Blf&r@jzPqsw<;$OPnl@uwEliocZnDMkYv8swCAWZw5xpLHRqP)H1qytKqW-&wLr zu4=o)x!TShs%;#5{K4voeOS^!;OcpJyo9_^!XNIMskF4+7q^n?sMjdrIN@69GEOO- zDneQ~rV)LWsNhWNVY-P3cYf``^sPFg)oa#XCx8p>mlkVK^DC8XPbHbMj(%ku;=U{E?cQS9-x4ApcNDW#h;Q1SEhR}=`CdJ+P|<64Xwp#K0gaLeIu zFT_H@Y|;ofhQ~C}nCon2f(9DFWK@%T+2X;$R2k*_`8lgZg$nsWsmta+0v3zHBugSn z!fN5B?BKX+4Vae&opvy@LwH`;?Uya(6TyxI>D|unw0JbaK3rk9QxMIh!&|o%P8#0W zokqJE`ZM)gu;CH%Q_qItV!4s(<{JT(&$%No*jrDlSGeqk8yLu_0iSc(^B?(>j}F|5 zP!$*0>eG>8iKYB!7T}x3i$=TtjcD&WNB#MD0v$CQ{hA1KSkay(RLTRKbac0{kXQY4 z(~PY0xnqRhK_fPv0>2j;&P#l)|CE2R|3i$W#6a8|q>}e9Py?I;V9>7+a>olVi|f*| z^K`#aAd<=^;WVYaFnR1qh^k$>r#@F;A*wIp0AWu;>H1BVikVv45um?d67%!N;?I5T z;sud*whjgUNi0gvZ}}Z{EPs4!JTkb_6(JOvWz4*Oa zQRy?W9!+k6y4Wj$jU%rB1cCZL&*1lyIOtCTJ2i^q^HYk9rJ3FRzJ12%LtYBh-jjw*kTbovTy>pU#;a`YZX{G~R#u{7Y6&-UC#k=Dh+=3ov-NHls9Y&+c zFKSJo=1BJ+qu+~Q5`A!dX^(7)gK6inmH6sKvd7WtuS>fhi*YJYm70ikm|5ORyMD9W zP3=bJpQprq;gU^J?xw#iV7i%@1+kblZlC8Dck%Q>o=IOXgpN?h6qaE5w6xeNjom$y z((p2y;e^H|lSK6XGi3ygPj?Y8l-LPoi#g1(_ReG$h4wiD`R)E4EE&6}U)y8od#iit&$`DjkWUoxjMv8?-rLvt>G-|eDeKP+O=`%q$cCxL<2PYnv%JkBhWra8!9^r^6 z39PcF`kzcjDY{g`EO|X#bY>0_u4$T2Ss^xKqqq)<>8M&8O`XGcvAtOuXv?$$U`#HT z)`tCVXjnH*DCl-7kJiv~NlUyv$0qc}OUzf;Le;dq7m&k1|EEha4ciGICs zuE&vfzH-H*I6S88&rkK2{0zXMG>48ya4@~TsPZUt!?Eu#I56|~@B2A{3UI;(SdGp` z{xtvk6S-YNhv@Kze+2wcU~yrAB0}83?>V1~C72Hd7F}Oo*J70Hj`I44@`I=dW8dNa z;LAJX!IXz{y5MA$GnljJGsSnWx?S*mj$S&D;6TbiBxQXR@ZcdP!M4 zjMYx&HD*xh!^kBV`IiI#{Xkj{G^J~9rG2G6vU|33Q{IqadD+NmWPtEIzoi$(+)U>*Y<8qfwSwLl*;8RhX_;$dRe6^P6vJt7>;jD8I}nGoaj@Jo862oY z-G^S1^xc88mANBe01}LhyVn-zSYQSP$OVGgxoAmp_*SjhSseb9dpLRY#}hF-WH-(F zd8BA!Ncbz+W|}7G()cIZ2)ks0VJtM8&Oi7IZdndDmMn*9t=h%bRv1)?kD3I~Dom#W zoB}bJa|>u*dWlEiQ3OomaKGcXCZfY>55!J(cx${>xI`0z6j0BZKSQCJUBOmd z*R5Qg{+6dMnd7anuJf6q%I3CCS@r)Jsv8(IM`R^W2&LZJ~ zJ?tEIf=h}|Z-@80F{|+<>Eow8^G4(lvJ;0Z>Wa{KSP77&C5B-K816W~`8HNpLIDMh zY#Ao9Z$39}+@PVPTn;HweK;0kR)4Z5~EKjn{b*;G3JK>ruGN-(DQ#F z4%(dXoyU2S{FB>z3~UajBhLyx+^zeSGTP8UeR$Gwowyr?J*t~7Kiv6dUSST;PivXMnxeBR$G;Ur z?e2WYIJ=}$0!K-%>NDNa=87-mua;F+sELO?A4e3sK;K*V%6*racWoh;ug$v`I7&1* zS2ei?*cz#5wjGSDI&y3sZ_HAcJH>P`S7>$Vuq@mflxPUd`Sy?!bwcv+4$GCsbqnr= zE0#|Zt*{(CiWQV{Jg{9muXFUH_=knA=6uLrmbqwn-OD2SrolV+=f`7P#z)VEsm1SQ zZ2IoMvW={H9C3C{!oO*ZNsCiC^!6ox{4g8TK#64b%YSl5?b2$A-&@bxkDYhE`#$s= zE;e^;Ad2c?V+muOiK5L3>O1;V7aqnq|21fa;R+}}hgMH|64%6O`#te{2okaItaGrf za|~JPRasb{kIR^3-SKtX4G#U1ReV*x$<{4$ad$KR;ltMIYKx_sv%m*`7G726 zXujMDREdnDzRX1T9N7{T=eTb0LJ$L$T5vjh&CHLu*6Y&cCqZ)dV{yH!x9!%WP~C!Y zz19L5-sW#W@$2U|_Eyp;RZ;n8DYqI3koNXGxKik7X}Kv0^EapnaX>6>{>JT0Y08IL zKT!772MUCkgv79V%3peMa?;1~+5Ww4Gk)qD?q6n>mXg?qZ0c!kUg%XaY|m}ZHM46> zB`}7|#9BQvTvk8pOoqKSmfxnv4Wisqv3VOsK^44^pJPyTgX^ zuc@vmUp9)MP_T-lV3nYlJnq~;O4Y_iePh3xPbXZi_hEavhyCE4tuSNryW9dtxtBJu z^O?xBxE~PV#T-rBO&GpcVHuB7)s*z=3y*9!TTen->j-Ndm+bmfbp zx@;Hd*jp5xro*w6vvnrvDk@hf(vfwRB= z%<*5f(r2R^?035gM;;UAGs2gQX_-sFL-}#&qR;vCf%%4yq&aVzii%2I@i>gO9wbJZ zE1$k;&ew%b&6;r(%C$S*kH6Qt-t?tPN+s#9!IF}aMq5cuWO!zeRdr+8f%$|yZf_dba$BBl-;YnlqaI@lUG zbGdG&f`K7--|ZOHwl%wqYRTNuobHY0Hps&yKvem^h7-9j^C)y^vJj8^*r~tMCD39*}R-9q+nD!L`?SqX<5`ui#2GPz8 zVYu>2s*cIzch@O@wbw>l3g6^JgnBtJgD~+hZ6>QC)!H>3NH7iXR{fOUydalX5mQty z>VNh_xNq!ONpv0CZ;BBUiYndax!w%i*hsIn5X^W;vST34XT6EBs!Ei9Vz*mM^&Wd) zT&`U7Y1m*7bIPWr?g~rUZhb$_vFMw(AEk>+Ji2u7&Y0itg$Yrq=&Ow-W!}%84AOV` zbGk3@`=6si=rr+}nUyEC*1vu`Q;2FZ6UZquv9MU0o{m)prOreWXQNsDSMjNXZ@El2VlDjr+A5tLUfz>opZ!&B z6e=d3y`3ph@`E@bo|JO}crEt^)J+o7>5amLlwZ~k_BA$a__HWZjQt&G@6wOS!ayzS z54n0Ra$otfHYG@$rN~p7xF*jZUmE{Osn_$%lWzsd!rS>E?b4hus+D*xHh%O(^PiOP z+_X@2i{r+vzSCy|9D?2TZO@Y*Rnw8aFst&vSHa7*p=g+<(f4&+%|J7riO4$o1}-?rOqa$_fu_l-$T zzJ!Qli*(oA)M&8s`Ut?3!2}Gd2F}zj7-=w`2|4%F*!iO>Fs_Io*Be@E?($QI+RXeg zpU(15)FLUBq%ykCgGP?l+@~D=RB&YimtkW|uu~8R3?s@9o29g{cBJC#TM8^3O(E z?Ct`HH1i|E(q(ydmq@8sfyLRiUjwRYJpHAQhiZO5*?8H$S4<|1Igr_6b;cKIgJtFr z*&Cu#-Ad&hoMaSE*j*bnERqvBrY?A3!`$)=Ps#2hhrUFk{Q?W~s(v1Zk5?MONf*V? z;6Z14`W**<+b*U?%i?Ug3#(Ol_oy*J1HRf-Rc9-OYg~^}v zif^Q;t%KhGB}jbJH@eULJP_uT@|Nw{|h|@2s{uI&S08Rv4Jt zgmC4KMK&1^HUIhua#&sdrDK@>zf-N7>bU*+C3=#DEqO0^$uk!Oo<5Qbi;L}meMlMm zTs`$25F;>7MXEpSqx%e_taY0Gbjh8GLiQe8Qz z5>Kmzzw`I^hr&s4a4>w?nHDv7szdkiLdO;tL!<-tpzQVb?OVovAYe8B$}VI4Ea^Q0 zlFnx5Dc(c(WJ_8GLz~2qUZdTeo#55SS625=dU})^P2KL_AGH;RLAqP6ql%BvbIFa< z_lUX$sh^|`t{?vz2`TQssYNx#}{h}>=d(73Px;&lm@(Uiyg}|_sU{p?BWwQC^=cNERG4!Z`rJ5^Bh~ct%UjK)XSt`gQZ~F~;d&}Ok;nW&5=>7l?gmBnA~3KDBGfMPkVez1l!S_p z07_+ONOr8wD(|t3Syra?XfybkPFNv7_8tZ|Mw+7gm+jfNZ`<2j+GOr{-mYKeiSyu% zrsX85q$LU0!N8VhWr36?!ZAuKk)A#3Yn8j9SV(Kl#&0*gSF;++G}nyZK3pVj4K`{& z{=MUH%|tNNU2*R=PErXbv6m$y4y!FJA((=AVE(|`T2VI@hyXANEvXkSN060eMcd>C znk<<-Eu(6~eovTgj|}!s#>eN9NzPcyYppqJUs_nm8(b%h#wX29-K45{2`Tm8D8w@GD+HP$zBi#uA!~-y3oZQ`IBYdT%&VSW+G=y$cd>}@)d>)SE zO2FWb;v?(&&DNUj)+y&kGl2~+iGEMOufUx5JD^fc?Wo}1F#kyQ7>QO~#=`hKE;%~+ zx=X}R6;*S&%6_x1_Q5va-TcM6Kb7Wz77kU_-@}nuJW`tcE_|#m83n~D;b&Ke^XNHn zxTS4Vbx?=JILN1d|S7B;*YiQ97e$w%Mw$a091|J%=>^Y5&-uUX8Uzh5}J zhxbw~AuMvkOAeJQ%=qZrnTE%L=%=g0JK1GcU7Wmr(#gBOr*TsDzR!-0d4OR2DxfBB z>`TGS1RTG|eQ={}MEEgKtU3s;V6{yQ4fX<7q;i-K-bvNn-!k;sYiHFj2zw4#4?7OJ z)VNugzV8!8^5Q5K3U<0-2H0yN=bHm0D%mroKVL}hho=o8vbe|M zwwYPZIDvStt*tF_%`v;_zoqG>H*fl_g*3GCqH>IpObKr;-j@I1qT=uO9K6%3G!V!b zr?F;2u{(db59b63h&sPJUcG=`SZ3;M4G>I21@=e7gw{>922dHp zU`}P#Lx%Q;C*&~oyO|kbT(T)rn;#xevSpe+H9lSs;20POnEG|f zofg92d?}GF*@MAix>Ch>;3_FG$ti|5mq^POB^VF+(B|dz&X}H}N3*P9r5;11Y+fr1 zQ`?m1W13oL(l*I~y+A?M4jXDiv9$2hzBf(hKFeXI z|4xP_mlv0$O+0m@6qAWs?N3lZgos@JCzZZak|%~g2&qJwYOfTT2;dJo<}S5(X?3#^ z($mim)G@vw@lq=7H%(DV4o=2RdFdj~RJO2^4vWR5 zw!k`WRpm>)hULCytuMS?v9G^eN{OE!63BeXSBNQLN)@Cl!u9Lf#l;1B5_VyR32y}zzdfv~o*O0h^#T-=cs_s;f6zRczAzB)m^Utcm!yUypfYu~qfuBj{? zU#6!AsWJ36lzDnnxv9PKI&dGU;t-5-%wYn03D8yH;TTKDGNYFn8GFBcslWIx)F-GU zDkLN%DjG$%r&lQo%e~ul1JDySH3EIK)Y>-ZKe$yc_IRVX%wOJluroKcu#iC13}h*h zQE8DZ4MNtcDc-q*OYeTdV&Ier9A&&ZS5vbMQ2+$^>2?rZS7;iGimYI21wC}(exjqW zRw$dw?xHGTRX}GG1raw*!{Fo8W5s0hKFU7Ix}C?Fy_8};e~XLsyX2?DE)k~j@{R}N zInw4#)Z4B*ZSXv0vkcdvBE)7PR>03Zlr2Ze<*+%}I^)|82O3qxxMALzs~b$SZkMkt zY$c+lawc0QWF&GwhR2of{GY()_b+##?3lE(Lkjoet$vNE}V(8a3 z6oaFKxJRJ@?@DUX__%H#ZL5YWXG~?2e*C&LG{$6PWU|ZPl&{2A5AzP7hf0s{`76#{ za?$r_SxRbO_hYyooF&zHV55WnOKIu4xY&7PS)}8ZJn)mW8V&{)wb?w>I>@T)wQvR+n>=-g@#Z~MWn0Fex*r}#KFN_Jlhvy{ufj2&d(U=mZu#1BJJM&p_h zDK#cG<~N=P;aeSdaJ?4NeMu>Em^OQ?z_i*GF0M~ohcHmndHc|6yXmLWoY||OblgHBgzS;uq2`?L zLViAz`Lx7_i2abGn_I6`@|(PdxX1OzLkI8Rp@m5>=&RHi^JkaMPE15bzRAhX2D+Ij zw@AhyAQ|75m!p9$`~F{Ey|5K-ah(+odYzm~le;Mz-0-%-_p5)24mZD9vj>#_Tg+;T zidG=kyW1Z0#}l8LHs(?cOy8OVatfw`EVwBXE%WLlS*yNFQHM|vFekoefI!ii0Vz`4 z54x6RbkN^GKj%iL7H7R3?wX3KvIW{4%^OlZm&)*@u==>I6yh-}^&Rrs zHx)6|md2=Sp0*lX|7vJxH(+dU{laoyi!BScC`Cu}yS>Zbp+B#UL``4Q8nY0qAxIZO zi9_36uUyV>g97i5yI#t7%0g6MbZ3}dDqS^o9udckNRyCGylb&6anwhjDIM>Y`gi&o zoX?_iLjbmd`N|psO9i7?yFmn7FvO zq~siI_06&Wd|xqN71h!uM_5-3uArjEvZG(8=NU({Z@U$$Yf!K8`C$Kg-K#~fG8Yg4jpl)uoW$vH4V7C}lU$LH5cxGCk&5r_=j;9Ue5mrmil}6sUD{zm+3>7u46Zc44{l|cFCB|%RvYlb zrURrGURI@v24jAh?(;J?GYVso$FF-6S%-~?@1baMP#rC624aS9)WgfhTl)Z*<$6nL^6Hec}w62 zF^=?~=M!yi_k$yS*Qs`>Jw6^tj)!Ti8znsxxmgZrBlV;wKs&LRPvjd-6GU%$#B7hp zuKk}DVEOip-5sg!Fd?|cva0XU;R#y0wa_m}?0L8chqQX*p2r%F4ba4mAQCWc4+?<4 zco3T&U&$AroLg~xxpdUIA8UEQF&p?}K*)yEW%2EdSf|lg=ADWxeJ!4y&CN02V2BGd z>ihGxB5L=S$JumqN~G)sB`*L9vC`@eRj2+n;NM*G)3LsfYP!N)FH^u9VgfuX#@U+n zIY)0t3OqVOjdjnmjlgwzX_*=fJk--TeF5ZA_SBs4fi=;$;madCml=Kzt{)i2mTQXn z82872{?XO|A{HPFGl z|0Y6Z)!2VP%F5!zV)U7k1>Kj7yXjQe3S21z>t1C}>4WP_7mA}2Mg~Vroww=z^R1mx zr}Al;HGlFmR2|b$sth&f21dINew16>eNg!6;c$QY^bJLR85h?N-bYN9@;3ve)0{~# z^_UhZlf=a1w5Fnr_S1)coXCZG^mb7HiR5s_;T% zQcq6&D-HB0aX8EB`~mT9u<`xJ4|swksByE(=#-y3FMT6$4HG}q%PFJ7R)9HYfsF3i z)PC>AeJL~{HJXTq>8GR(3AbA4-|K@<(i!L^kjX_^9}gn-yV+$cYc910?*}&mdH>km z9^v33>QRh8L17q#St6F0JrrZ)wd%uFcYDJ1#&|N3I5ImDQ5L}huDqW{lnL9Z?y0Knts1@7AkiUd-b;C_c8J;lPXLNU-?Q@uf-fskO+6F@YSoY zM|RcnW(g!aM)J@3YJYAoSx6t}+Npor9`<~6vx5a&P<&h*VGqmkC02$H68oJ+7b3)F z0_hGdn@JFZd*q<6uUt)Rma=AGSLsFE-;GhfR9_XHwVQO!YH;o;$77z+n#&MkvW zie=+xGG>9Yv!7>=gVlKAO4oew&bI?iDvP;kL%P{#nivy873W;@!({H9hoG1&P zlSlnC{o(PfSeJ!T-YBXVb&iP3bF;9Zyzu#TxDgC0YKPi5`A_%Qi}J7*lJjGCCYD%A1wzUymcLmmIc&a<))+)a#KflNaBP4io<_ z$jJtLt`ZrMsFuw^NR}HUM67W)5eI>H2DDoYVI)s6>-g$cegiv1&f@phP~wrgz=t7K zg}84D1}v%=n*PMnNM7V!0Ze(}EZcair^MYoOieM2KO9#JFmLZ4*Aw)g)A5(Qd}9`9a0?5;n`(&RJBt3MP? z$9RN=Jm{oBaTjXFh<58(o?s|{5X=qhB2>g*Iu2gCHO1+(^1f{iFYH)>8n?v>WP*2SevtA6WiuI2ur zuGi7GO&5cyG)6#!*IO4!;6YL&agpLXAHuSc2tpO*1ZNeb(2HfoK!EHDYgPGjg!M{Z zrmang=wTj;mz38D4@3Nyt`bKUqU)iYRmLQ3aTkeVs@tlkOK2L`y%6x?-L{jkp+Af>~c<#=(L;%zav+;%co zt8)CcsZB`Z1+MmJ;+&tQe;l`Hbik9hl-4A(lTPO9=~oM;G7+i{#4_UM)9A1;OBTTb zh6+L|>reH{kC_f1&0y`%-y-thaW3D_&rQg>g|ez5nXw$h!8=pBxcFdtq@@^`FR=YX zT&rP&Cu30j{cqG);^Ah~mtG?!#b-Amv!VM`@rjQc?ZJRnutZ$4o^! zB&&I3ksTO{lDw_|fydN81`8AWUgILE9Ybc?g#EcD-ffZ165 zJDsj=UX4?~*46-pAiYZC^wO&DP-qYRl~NG2rEXCL>d#WW_nM}7Mu^e3;HRH^K9JlQ ze*b!rx%}qp=A*3-x0aknC+zZ)qXP0{gRii&vqMfA%XK%o>H<3TfEXgpJXUj2DZ9MY zefxJ@%E;^>XXdv|jW={c=2Mhv3LeWv%L@2x*vlu4<&@kxAMX+(Oj>D%JP73n9%{kJ zY}%FDeO*HNC6fVdmkSvDsp3h!Bp~QaZG9?$FOTS8#$y%{BbclaTWdqbtU0^!H{S6{ zNq8(HelF>QcJJCt)&j0{_8Zr@&0{-q_t+%KUI0P)E0Hp?jQObMp|rtTD`OW?ZTmUk zC@y2vs7WJ&lVjKxk>uX7pL8nmK9GONwB#)eqrnYW36D7Xaf)zYuVDHcD?OHU$ptQn zzM4oD@yLUIk$Qfjjg4jbZr~tW6(67$f^<8GTbxo?bAK)&niI4(Rem?R2f;k zr=!Mona6`xxRV1K9($X2E($s|dE>3IzJ%ox1^x;cKbuswzwC<)yV=a??lBm|)SBm^scUy$` zCd0$Xo0yAu^48{qVnW|UUoMTmojWLs)K0Fm2o04(SRz3N!ZH_I719_rdK}DgGkH<% z@Y9KpM&WCo)WP-5$0ue#2p;{o+RBKnP^wq&_7-YOGE>gKSsv0PU!4$zZVv;4fuXOf zD{*K7n>P}6U+^K}k#8}F>&_FkT;iur!^brBT(LpK5}VqGZ3X@x1$ctr^cKbz`o+pl zo>8^dygyle^6%*WKWgkwG}0Pe06@|Nod9o3ODI%V*R+0rH7Kwq4K&y*Xo3@ zgXS58o=y~89FG=M81VjcIA9L`U2U-dfGEJ=7+Y9a02#0ohsJfG0m6Lna;U)R3a~l% zy2+}mT7f+Ur3|^8VYKbk-6mI)M&~KuAK8r_v>%pUgR1^H%+G_U@@%GG)(-%|1gw~H zI)>ceyg{Y(@3i3GJ}~ltWO_*j4;NQ_1JHxNfj?en*%=Y~cRJJ@r2XpZgJ?H^>(|Sw zs@BB+1i^LxI|stpD=4}-j)-ayczqOTI+j5*41v!j4UA<##;BC)wSWx-2JoA;;iH9~ zy$L;oLWIwQ2WW?udgCAP2eg$y^1eZ}Atl>ly-h!`_d95}fBfL5egatL>J2m-Z3$hY z0-mbohRVKECII@ALwYU!cj?YrUURaQ<~P z*$Dk6QUGQgOF(UQpjF{b zJN=Z=et0AF;0Ewd&_q2n3@uBwl;I!RDWrvn4;}g0E8dI4aga={akDD571ANi3bVygVtWW9yy(m`#a&>h$dFo zn6-FWS}u1pUx4hZ1z=o}EGM7ewEqSg_80idfK}uGixiHO>grFdXrX|VjKh|93ZS4r zYXGzcD2JKclaQ*##iuuonTYF(M#yazOyh01b*T~@&t0Y@s0j3-PoRdhDUG{s}3QoT-=TlL0=!SCDKFTl3W zh|&&L33S8INFTI82U*ZE7C`<)KfO{cV#%64V*>3J`~xIK7-@liaPWk}!wGRlRg`k6B zSg>T~)5EsHo64e^BAOIr7l3p}liS~H1i&iW@}b93E?J4p_3jqD_;u~yGgL(6JQ;&H zifY~VwaJU8kTXRquE2S6#8wy$-gE-x+!apUFPvk=fpqT`V3^}A=_d`1$1Y!8`D!=b;Q0Lr9yc&Yj_-R2cg zLErTYPAAJ{4SCZH48@4Mn|2tb{LtJF&rf2o;$e4yCuLV#Lqp>oY>Kc;1p_u;&{YsR zZa}`ww(^9(7ZwxA*k)pz}|V?4W}q;BzKLU$(Ra0Hr%JG7v+hMlJ%WdiPjp~YiIS64Jge-Z68L`hh)P5nff!xpfSijTZn zT(j-#jdP_QpzDqeXAl!6me8;J*=SxMSe(iKfTf0kz!L;Ah>8?o0UA|TXQ-6|#p8D1Fqg-gkc1bOBU#rINy|~!3CmLsG^UJjX~T2TLi8N zQBc-^J%zS583JUkm6a7R7lBYR1y==Yc4oxuOWAw%4 zA$u$V!==B{YJ&hd*1pp;&p+4-yAptnC}LJ1DcFOA16~#ojY3aiL*dr~&=!r6M?Zh> z!N}mo{ly@V1m!~g-Qwa`2FCI;b%8&z@UwCkYjW-I}3 zR%qaK3&I{p8v_@Jb|3(wkcZ$iiMHvi*#>VH#Uc+tP5qdEvkP$I_us3k;YRqF?`~~D zJ=Zwk2q1;uyjql>P%n6NlRyTfYdGb*0GG3iw!DDd9pWH(eI0p0oz&63=?8&-s15Wx z(Scpi(hnfZA?th=CQYt%ymd$iL>D@neSZe3c<3wuFbxE%LiiDoNxTLcWT0@C^hT~X zBn=AqXxkC=-VF)L^)xlK^~@8RYt)JWGGU)EePw%FwqV+qF;EbFt~&eq_CEBY>Wy7t z-O-Y*<=XfUHSM6&BFGD9fK?tpstuTBSZ%c-zpEhx9|CwaTdfo*?oY1bfHbKYQjiRO zlj170rm9O23{#wdS)5;IXENbPJY7l5_SS)ug99Z#xt@HBiSF`cqKLxA#@|2#kH_9; zZ{wc!rL%!XDKAyGAgxW~+`6$4wG8J&Yf&NC+2;c56eYZ9{Yklv3!O}XgWYSL`KZ&w z^#1)_@H07W84dmkyK-6TX*+~16>dE>T^g*U_>D-`hRD?7C=}%C)gIiX>t3^9qiOK9 z%{k3~sYO<|v9YJ2ZsTUqJp~j>aqPR!N9JLFSJxNq?d_k*$65v7B>sK`CJc|Z!MiUA zz91lJ+H2^q_6VN)Q4&Tgv4yWa-vcf7axJ$=4B1WgevO9)SO}sz>0xdE@*9zC+mWqK zE4A+l>FH6lm2ss(@mOAi1D!<5)2EsFZ{Iw?f2yZe;$G`$YIk+(F+CI6Geo&HveAt8 zsSaiDfwTtIO3_$!>Y$ynE^&9qW6@N-`6iNX`G3qjFGVs~-KsPd-X)`az(eWBqc)ELk}DYA*E*xzv>_>kbt_on#1k~EV;JY9?~=u>iw zgNX-1PTX zWcgR`#TA1wp}%w@#$)Hk@2vu)A5$B}$orWg?DkyF_0CmgomN^^z24wOz`?mP(IEd?ewHTWuBCV5p=Oer;X<=zVbq#vSbv z{3~juSn{QM$wL~e;#jd_EAi@Io`wy@tN-`2|9UO@E_v7@y_*;FN=OrZOlK{ zq=^qEtLi_&MM_wR>+_J4oTJEb)EAHYTl+i4rb445(n6UuUknWi=GdOUGe0xs=s z-!$dq2nqae_!f%YJ&PA2jxnq@%pLjKTC;m5{=x|&8@iq&cUVM*J`IKbHVXAcr@oM5 z{)7=>-!=eZdKa910p-Fj*xtZF1AOovK%z?~bExCuvJL)*rA~8j&T53?9n3`f0BdsT z@6QLG5t^w#Q=f{D$j|E=Qt|Px^Q%8*4~s{ieHgq8>p;F;<|N#wBXLKU@&&XDLdT1h zA=Kv#Lq8VMybgoQ-ymD|18`?C%LW*!+0Qp^1OT`^WuUo63dr2~`T0PhhNC4U&F!_d z_u;UYzfm;j#?j5i>UkdIta+I*yKQp(>C^vYaJJZ0rj|Ftin+%pF%Ium7{-uahI3h} zp7}Ii8sW&BfX$*oapv`kFy&d$&H zuCcNTF|u~Qgbb-agrd=c6@qmrH@U#Q0ue>0?z>*IPJVP6y5|^VRPm`PX zNG$+GVdNI-8L&~=3ja3=qf*WSXNc+cBe5UD{27A?xL=BIpslz?2nZf;;r}Nni!(iH zu!=^hPJ+=k;xXcDeoKOzU*TrNdaJu^K~h=HT38v3!=SC-mi~-95P*{ikPhK6Z;ljJ z@d}IddwQ5^mFf|6>j{9{GGq4rkT*N3f2I z&nm9m;mN>YmDupY^yT-~d<`zfeyjNZ%oEjAIoBHlJrZS2i~G4kVHkB&I@Q)5Zt4W3 z@PFd@k87g(tJ>qPnwe_WD#wE3jY(*pZ@pt|M#PhmvHpdZbD>=QqWjn!CO#1xgTQN&_*ft0M$|f z7xmxL1Ot!A8iEO01mxHC{#6$Ls}o(fRy7F``49I!JrEXQ2^Lt6B&@Y&@Gq`BD`*=u zw~mX{G%pj^L3qQu=K0;e|Kj9Ea1<06d=%Ea0^rjBcjc>5;S7gQ#2@);F5Gseqr1{b zh7t=^2)Uj`o+hRJ|2;}cS)PQf2wtk6tHJ+i0Sd8^Fvg*Zp7|f@8sq|}*U75?cGr5! z@Y>FxZ^(b{#p7GJk3{ z`e<;7Jmeo~DRav82H%lVazwDNOVBpHCBPrKb+%M;=F=c}^7L`Q}y9ja(u7c|YN- z2Di7y{MWBv?_2Y9x_$+coenb40cyfP{WF*XV-LfgT*4+eNg`?J`yBO8F((5nX;4*<$OKS z`KF{u8NRp(JeWFw1D=P62UvvFY^SSn&NuOtw$@N6MzG*_qY~o0(az9tNCQVGIO+l= zp14REzx%1)LrQJqx!3<$P!rv;RxIIq*;?iMT_gycTMNWo(u=wCH>^HUl#t%qFZHDW zW>&QU5w?HHy(PZ;?*v8XC1L##%b=7A#bHt={sOv4pC;*|u}b*YqyS zBSJ|WKaFdSO>W?G3xWMk^%Qv9z@>oyo0G(qD{Mb|(N&sUZNHyPc!6c9=%cM99P=;V zaA#mtQ>R5vhfNP-4cq zuSk&q1AWYOCn7zApZ@Fi7)p z(1i)bSiN>`j9Whfc_=_sl)=?o7KyZipg$m#EK#ez z+6r&h8wY`ZE-oJ4)j@JaA75WUte4EMU-9(GqNA_cJxl!r&aQyc9_X}j9?jn{NlzQy zfO@{>;46GqsC&WjT*>)^!~^p;cj&v1{{or4_hK;P+I?r|F3d(qsw`^{bvJB3<(Cu9 zK;p-0q@buM{JNsG_c*xE7^>YJ9iXxM9USW8BbqS?ldV3In)sgPZ<^e{JyQ1*{A7r8 zx$u+4mzK~USGDu%l=G`G%0-t&=|*FibTNpPd1zF=>udH@;$3TW-pb}AN>5BAnMt#j zwn|r5X3wh7Zo-b>Rw4@?;f@}|!%VK*v0^2@xpb_H&x;?SR|$s{vLg0^%*;%<%2|g3 zH(FeQRMGsDl-wd2k-j|}XE!&f$VrzS5hL+J{hsg$p{*Z}ynCsP0RM5$$BM1Z^H5OD zbh(V1`iYpR=#MWg6a|ji_Tb}FI_a5PVY2aIMD71#AG!ZZ;`PJ0;vQ=uLqq6L)%l!4 zff=AvkV-?U2STO6l>PnpgWv8UsAUG;oa1+*ne$sIpkBzv2_Rz2b9X4H8_(C;%0#U`)C@X_1| zScI**&Ur>K-b~;a8iJtcgdw98O$=o*Pxg@JoWN1C(f78#TsMD}v}j1%U_JQaC3Z+z zM0S|Sl^_}IsNqOG;T2ih*E&+8>>#{+_vqc?jk?-;A(NMPu6X^i^;WmAD0uy0pU=9R zNSlJDnYls(x^K!Nq%PU|ji$Y-ksASjZEXKx@RI0o_f?{l|2>hS zxK%pVM@kc-2I0l2U=#SfzJ6RV{y*o502sPzgP93iTkPZ06Mc;pj}p?DOX|JP2n~1x z<|~X?5X>mu^mqr-!*$_VpUv?x0%gnZ26BPJ(M~V7GI9#Oyxx83lgC7OZ8=0b?FA5_ zo(Xj#hEQTGWm=Kd?jxD@{bQ9lr(_a<9t7$3;?$fFeCRpnRQ}Seg?t@1QJJ-+=d2^2 zO1To<%ZglM-QM-=6gUKaeP2`e5~G$%;k1nB?Y3Z*qB ziG1T3r2jPwRLJnU^U@TCM%tiKy}!S&R%)Hi6+C&(qyiYChn+D=Tvc>;vHRC+lp~rbA0dC ziy-(26(Wxzj*k;X_I2-_mOzF+SZo*q?{Gp%=7Ht}S~mjSjofOAfWJ27!qEAd2=Tw4g{W5znB_lQFP%26n8Me;*s2heTh{pz*2Y1)B2b&#FW|s9fDtZPD zwrf7I&uh0F|IwU$+jKwUUT}S=Pm;5uYSkLfT~-|ni(Ny1yRO@GS&8Y1w3%^fr^|L3 z3WXIuucj@3E!`)H&r`A+*ZqpM!D;DqU4uZBVZl?9d>1becR5!MEPG+ABZm0ZT3YN~TD`G{6sF!V14f}kNA1B6;_a(;(uaZe4c7b#T0pjfmFdH#LT4;f{ygI2aAdC%v1kLs9+Y6 zPfX?~XVJv~@H~QL%vXl`3EGzPG3kZY5yjuzhEtCpKSujZ_4bKxpyxlQE^gyXtY!!Q zZ*izHFnMe~L$VvQsRhi6DhKxKUs0)7SA7&!q5MhoR&RKAF;iPTjtSh5J}KE*H>31Z zc&f8bVk+(5e!Esuc*znwOeHmb^8EbY&Vq}F;d%?+p2NcNUoH;p6k652uOoL^CaWfg^erRb`m5b*wa8)0RVr031Jld?w zqG%@VWgF34#s`H@PO87-wnd>dBD3U)Irzj?PsV&2^V%nFCL?cLySA!T!>NF1zI5{z zGp?IlzjrTgraT>5wg@}3xd-K>dgB*&ssYeZUtizZ*$I>WA(#buUl$#fL?M&(YB>i< zNRYq3c)@6Om1(8H!@xi|7#s|;6%SwEL?OsOdh$!k9D|g)UzKZkD`zDFMt%GHucSOq zEpv{0QCY>7!hox=slTajd7DRuM*p>+AtB@1ua*U)-)03*-h8lCeLnCUH9&s*Uym*l z_Qa$1l5&id>NAKP5UIX?wQaixd;z$OOLzO5SLrX|Cuji_*`um1)vHA2V-sU?-zwZSEvkG0?D{ao^(BmlnSL5U7PDrv(EYJp$ z?_vALj&R(wv<#yWXz9rOf32N+G?aVW$ER&?+EGjgn@Vg66;Z^Hs1S09q9TXb2!nBm zkm#_t45e~N$OuW1Q6ywg4UcMSY*Z3C8)+2Nki&R*zSH}zcRl|;?|Q~sti>{O&pr2j z{eIVVeXj4KPXw z7bHpuvUuhwb3p0fuB-C<;;gp4{d`EVlrdvKrfFXWxZckcL62+9@3$}CO>xG{~>G5TsC!_SUmz`6PFm`aLwmlUZwxLaXMk#ZE6c;FE z_enIcx2a$B_&wDG^Cd--`!=O5d6B-b(pdg`CH4nr9k`wVkdr9V_% zqGUnMdmg;j?vHHts?2_}Yv@5aY^c}CRkbLUs_0xkveljxK_p6!h_5*qMJt3PN?(6Q z{~x@}xtn-JciV%7;m!Pr4^|W8%783M*?7BIqk@M~wd|4%=l0tBw`3J?)V38))&&R3 z)ZlAtqWH93z3vby+&Bg6R=O!hy_v3)HzmdvwZJu_Pf&N;w@fWmeUd|#tT&8}3~eIE z1iSh!Ik(H?!QT8_)dY8$+rA-xH7hKhr+(v(QFDSeAZBVGBY&joa5-VWZqlzxOOs2dpl8&Bcqt$W9^hI`X`k4f)Bl5`tRdJ(a)CdV70Mi!e1k9>_HB!YrMh-it`{H-ooAUwc}$`F|CCoxYjI z>@7Z=-!gvMO>H?(EU$j4ZY9S-#pakt@_7yVqks$U@L+DPx!z~9DncqG@q~4o8lvNY zyA4d}P{pSOKhVuNe*c*!j?HYNk>b~#o{r0ySN1=YiTnMXDpM0FGcSS>>3_lfeQRr; zumU~UKCDGExuu#A(CLw#dYlt)ZZ$yNA^mJ*(Vtp`Go0vhq+D3gzfDr(r=Mi{jK4aZ zd0tEs!?X$sc^Hgmpk;7_?%a-`01rJ(%B`U|CN>S!jn&Wv`G9%gIKNve$j4G?`PzDt z3$G&KH27jb7@{4h~H}(z-^zyg~7WARSgyK&XJPn&8|XpuO6lyUNEA z0nWSJBX0ZRpW4InRr~L1U{hwLjP^_qWxCGD(2$*fRpor7@lifoCuYZZ&H06U_M6t; zhs*mKtq?Z6{!BNa(>isYJt*Ip?6v+-D9VCJujj}tVDVU36gj}i^N(l=AN zP}`+Fa=BtUtD0VeF?^5!zwExr)i3pqb62%Epqw)D=~H~8hetPS>a=((C}djtsTRQWztZ%XnJ?g z)Xc|Q7zS1K&EyC>m*2mKKUkD>(zU24s@vOo7fIwyN8;t{I~V3q*sTUn2ca2U!O=j| z8L)KxJUI)xupXT_AnWdYh+~JS5n-henuYn!ZW#aYnd8<11=-l@0~a% z$PEZxrG?}hjQODXk+7M5q%!a$Kw9~@K2*jcfS*iEP6~Z0!Bb>qWWbQ5>%P13rZjz6 z=7GIsVl8QH4Z+2Pw$Ps3C&h?U+tc5l>BxA6c(h2<&@cx6jo2$eTmeUn_NHpWR|en4 z)K2J*h-xqQ@jM!@hwW0aiD_#S-mhqa;i@dY0(@TRwR_h-^ZNB8wNVw^#~6gQRlS&$ zlu(-%=Z>H39HX4Vu5B3-@<;pB=iCGz+M#nJ1R>#mEcCeSng3oH(E0uJLiedLj73JE zp(jN7Q7S`*2aSz|Z|U`Kw7)>f7^cSm1Xg(;%~A&k!R#!O*RXCopC4zP?C1CSJYXzz zihp#EKU8on-DW}QFoM=w9LR6cw1zJc>IdEB&ZSVI>csKiw$75)aAkm@gjE-Ri!W2N zVQ-8}d;QF73XVAzOMZbulyO%ZCay%>M@E8s>t^qQF}B z;|Vmkbyi=(ZS9GQHtdYi&8&l-onM8Mes&!vlYCA6=(`{M7a${`zC)y>s@kU%X>8Wd z#NR0_R6&-L@&b&EMs%MzKA0hyhQGG*XADjj63v>+I!1v$(O5$`zoU`emvp;@Tq_bw zVM7~l9b=XIcTU>N#DVye^mN2YGAb$h+{e6t2WDb-Y~K#mCV)`R#c%0oICK!Gf)Now=3-WTeCKO-Ri)X2@i;ZdbW&3wNIOxRRA2T~}*Azsm< zdW(Oe2jv#|8Rnn__CSrh9yrze_wS+Jicbs6wodgc%6s>8+6UHaK*!em5EG8`OpRyH7+mfiTZO zJqMW)d7Ju?`)4ZSd5yR2H#uJ0z$zOcb$#s6yr?OGq^r?22Wd^u6v{tB5H88XUB>C! zWJYr|2b~NOkT0;*A=O<%h#7b93gT~{Z)9X_wN6p#gfaot?~97Ac_Cn9GXV?{12bG~ z^~+Q(S;H1HSvu{9nWdEQ{vBe9{cA*N;r&!?Y$i*iWg4}u9UQom`h^b>CQvSNC_W@? zx0^IH{j{8FqMseUR%P)ykaAtb zC~)lvzgJU}75{RmOjTMFNrl2xPA3fIuYUIwl9`C ztsc<8a~SG~q@@EwFFA_xY~<2Z?xRo>Cav5tQM_*aceAg#E8~8zbomUGw4t=a5kcP` zO1F1g7PcAe+kb=9!kFyVytpQ?${mpd?LKGhV$iCo<6WZQPBLqIh%pZ}m&d8`-C(3K z?(jFC5p5WY>tO*f~_tvEvvPR>j%b0(9U$0{KSPLfsvavqzvz zsCqu{uB$`~>pi5!sCvQDjdn^Yls0MJK-+|mvhQ-_)zp)_5|wpcXZ@L}dm?ent5fHs z>0r}LMs$%)+e0us`%zXoL9bsCLsl4nqtT@CuIpoR`>yIOnRn`*im6wpt;o^n8+p&U zG-J1t*YVTWPf>kStGu}{;h4@6=9RH_FGp!28Lvp=?Qxsk{e=A9iz(B*$~<~+w& zYvP>=GC~6CJ)$pG<0k>DBpx{E|NH*GzxvPr@Lze7e}CEk_mTYjll;$7{patqx2Gv~ ZQ9b$TaLV98w15c&3sV~t`krIw{sj)3uzLUi literal 0 HcmV?d00001 diff --git a/jenkins/jobs/distro/fedora/f28-t88-ks.cfg b/jenkins/jobs/distro/fedora/f28-t88-ks.cfg new file mode 100644 index 00000000..dbc9b3b3 --- /dev/null +++ b/jenkins/jobs/distro/fedora/f28-t88-ks.cfg @@ -0,0 +1,50 @@ +#version=DEVEL +ignoredisk --only-use=sdc +# Partition clearing information +clearpart --all --initlabel --drives=sdc +# Use text mode install +text +# Use network installation +url --url="http://10.7.8.51/Fedora-server-28" +# Keyboard layouts +keyboard --vckeymap=us --xlayouts='' +# System language +lang en_US.UTF-8 + +# Network information +network --bootproto=dhcp --device=enP2p1s0f1 --ipv6=auto --activate +network --bootproto=dhcp --device=enP2p1s0f2 --ipv6=auto +network --bootproto=dhcp --device=enP2p1s0f3 --ipv6=auto +network --bootproto=dhcp --device=enP2p1s0f4 --ipv6=auto +network --bootproto=dhcp --device=enP2p1s0f5 --ipv6=auto --activate +network --bootproto=dhcp --device=enP6p1s0f1 --ipv6=auto +network --bootproto=dhcp --device=enP6p1s0f2 --ipv6=auto +network --hostname=localhost.localdomain +# Root password +rootpw --iscrypted $6$lMKgWWMW2ORdmzL7$HQKTznJooVSSiE0/2AOkLkw6brrUkRCSvydfN7S98nV81eRb3J0CQ/ZcZEbztJcB0d19K.ZryzhrbJL6imdH/. +# Run the Setup Agent on first boot +firstboot --enable +# Do not configure the X Window System +skipx +# System services +services --enabled="chronyd" +# System timezone +timezone America/Los_Angeles --isUtc +user --groups=docker,wheel --name=tester --password=$6$X9a.a329c6xSGjn4$5u3YAhUipvNgzGNWs3nPQSqPFLq.UTlPK/iHDZ/Xmp2ex7lMGgHH.kKxDWPFZaoKZYoVWNtfGdX59yve8gLgn/ --iscrypted +# System bootloader configuration +bootloader --location=mbr --boot-drive=sdc + +%packages +@^server-product-environment + +%end + +%addon com_redhat_kdump --disable --reserve-mb='128' + +%end + +%anaconda +pwpolicy root --minlen=6 --minquality=1 --notstrict --nochanges --notempty +pwpolicy user --minlen=6 --minquality=1 --notstrict --nochanges --emptyok +pwpolicy luks --minlen=6 --minquality=1 --notstrict --nochanges --notempty +%end diff --git a/jenkins/jobs/distro/fedora/f29-installer-test.groovy b/jenkins/jobs/distro/fedora/f29-installer-test.groovy new file mode 100644 index 00000000..94d3113a --- /dev/null +++ b/jenkins/jobs/distro/fedora/f29-installer-test.groovy @@ -0,0 +1,392 @@ +#!groovy +// Test install of Fedora. + + +script { + library identifier: "tdd-project@master", retriever: legacySCM(scm) +} + +String test_machine = 'gbt2s19' + +pipeline { + parameters { + booleanParam(name: 'DOCKER_PURGE', + defaultValue: false, + description: 'Remove existing tdd builder image and rebuild.') + string(name: 'FEDORA_KICKSTART_URL', + defaultValue: '', + description: 'URL of an alternate Anaconda kickstart file.') + booleanParam(name: 'FORCE', + defaultValue: false, + description: 'Force tests to run.') + string(name: 'FEDORA_INITRD_URL', + //defaultValue: 'https://dl.fedoraproject.org/pub/fedora/linux/development/29/Server/aarch64/os/images/pxeboot/initrd.img', + defaultValue: 'https://download.fedoraproject.org/pub/fedora/linux/development/29/Server/aarch64/os/images/pxeboot/initrd.img', + description: 'URL of Fedora Anaconda initrd.') + //string(name: 'FEDORA_ISO_URL', // TODO: Add iso support. + // defaultValue: 'https://dl.fedoraproject.org/pub/fedora/linux/development/29/Server/aarch64/iso/Fedora-Server-netinst-aarch64-29-???.iso', + // description: 'URL of Fedora Anaconda CD-ROM iso.') + string(name: 'FEDORA_KERNEL_URL', + //defaultValue: 'https://dl.fedoraproject.org/pub/fedora/linux/development/29/Server/aarch64/os/images/pxeboot/vmlinuz', + defaultValue: 'https://download.fedoraproject.org/pub/fedora/linux/development/29/Server/aarch64/os/images/pxeboot/vmlinuz', + description: 'URL of Fedora Anaconda kernel.') + booleanParam(name: 'RUN_QEMU_TESTS', + defaultValue: true, + description: 'Run kernel tests in QEMU emulator.') + booleanParam(name: 'RUN_REMOTE_TESTS', + defaultValue: false, + description: 'Run kernel tests on remote test machine.') + choice(name: 'TARGET_ARCH', + choices: "arm64\namd64\nppc64le", + description: 'Target architecture to build for.') + string(name: 'PIPELINE_BRANCH', + defaultValue: 'master', + description: 'Branch to use for fetching the pipeline jobs') + } + + options { + // Timeout if no node available. + timeout(time: 90, unit: 'MINUTES') + //timestamps() + buildDiscarder(logRotator(daysToKeepStr: '10', numToKeepStr: '5')) + } + + environment { + String tddStorePath = sh( + returnStdout: true, + script: "set -x; \ +if [ \${TDD_STORE} ]; then \ + echo -n \${TDD_STORE}; \ +else \ + echo -n /run/tdd-store/\${USER}; \ +fi") + jenkinsCredsPath = "${env.tddStorePath}/jenkins_creds" + String dockerCredsExtra = "-v ${env.jenkinsCredsPath}/group:/etc/group:ro \ + -v ${env.jenkinsCredsPath}/passwd:/etc/passwd:ro \ + -v ${env.jenkinsCredsPath}/shadow:/etc/shadow:ro \ + -v ${env.jenkinsCredsPath}/sudoers.d:/etc/sudoers.d:ro" + String dockerSshExtra = sh( + returnStdout: true, + script: "set -x; \ +if [ \${TDD_JENKINS} ]; then \ + echo -n ' '; \ +else \ + user=\$(id --user --real --name); \ + echo -n '-v /home/\${user}/.ssh:/home/\${user}/.ssh'; \ +fi") + String dockerTag = sh( + returnStdout: true, + script: './docker/builder/build-builder.sh --tag').trim() + String qemu_out = "qemu-console.txt" + String remote_out = test_machine + "-console.txt" + String tftp_initrd = 'tdd-initrd' + String tftp_kickstart = 'tdd-kickstart' + String tftp_kernel = 'tdd-kernel' + } + + agent { + //label "${params.NODE_ARCH} && docker" + label 'master' + } + + stages { + + stage('setup') { + steps { /* setup */ + tdd_setup_jenkins_creds() + } + } + + stage('parallel-setup') { + failFast false + parallel { /* parallel-setup */ + + stage('download-files') { + steps { /* download-files */ + tdd_print_debug_info("start") + + copyArtifacts( + projectName: "${JOB_NAME}", + selector: lastCompleted(), + fingerprintArtifacts: true, + optional: true, + ) + + sh("""#!/bin/bash +export PS4='+ [${STAGE_NAME}] \${BASH_SOURCE##*/}:\${LINENO}: ' +set -ex + +rm -f ${env.tftp_initrd} ${env.tftp_kickstart} ${env.tftp_kernel} + +if [[ -n "${params.FEDORA_KICKSTART_URL}" ]]; then + curl --silent --show-error --location ${params.FEDORA_KICKSTART_URL} > ${env.tftp_kickstart} +else + cp jenkins/jobs/distro/fedora/f29-qemu.ks ${env.tftp_kickstart} +fi +curl --silent --show-error --location ${params.FEDORA_INITRD_URL} > ${env.tftp_initrd} +curl --silent --show-error --location ${params.FEDORA_KERNEL_URL} > ${env.tftp_kernel} + +if [[ -f md5sum.txt ]]; then + last="\$(cat md5sum.txt)" +fi + +current=\$(md5sum ${env.tftp_initrd} ${env.tftp_kernel}) + +set +x +echo '------' +echo "last = \n\${last}" +echo "current = \n\${current}" +ls -l ${env.tftp_initrd} ${env.tftp_kernel} +echo '------' +set -x + +if [[ "${params.FORCE}" == 'true' || -z "\${last}" \ + || "\${current}" != "\${last}" ]]; then + echo "${STAGE_NAME}: Need test." + echo "\${current}" > md5sum.txt + echo "yes" > need-test +else + echo "${STAGE_NAME}: No change." + echo "no" > need-test +fi +""") + } + post { /* download-files */ + success { + archiveArtifacts( + artifacts: "md5sum.txt", + fingerprint: true + ) + } + cleanup { + echo "${STAGE_NAME}: done: ${currentBuild.currentResult}" + } + } + } + + stage('build-builder') { + steps { /* build-builder */ + tdd_print_debug_info("start") + tdd_print_result_header() + + echo "${STAGE_NAME}: dockerTag=@${env.dockerTag}@" + + sh("""#!/bin/bash -ex +export PS4='+ [${STAGE_NAME}] \${BASH_SOURCE##*/}:\${LINENO}: ' + +tag=${env.dockerTag} +docker images \${tag%:*} + +[[ "${params.DOCKER_PURGE}" != 'true' ]] || build_args=' --purge' + +./docker/builder/build-builder.sh \${build_args} + +""") + } + post { /* build-builder */ + cleanup { + echo "${STAGE_NAME}: done: ${currentBuild.currentResult}" + } + } + } + } + } + + stage('parallel-test') { + failFast false + parallel { /* parallel-test */ + + stage('remote-tests') { + + when { + expression { return params.RUN_REMOTE_TESTS == true \ + && readFile('need-test').contains('yes') } + } + + stages { /* remote-tests */ + + stage('upload-files') { + steps { + echo "${STAGE_NAME}: start" + tdd_upload_tftp_files('tdd-tftp-login-key', + env.tftp_server, env.tftp_root, + env.tftp_initrd + ' ' + env.tftp_kernel + ' ' + + env.tftp_kickstart) + } + } + + stage('run-remote-tests') { + + agent { /* run-remote-tests */ + docker { + image "${env.dockerTag}" + args "--network host \ + ${env.dockerCredsExtra} \ + ${env.dockerSshExtra} \ + " + reuseNode true + } + } + + environment { /* run-remote-tests */ + TDD_BMC_CREDS = credentials("${test_machine}_bmc_creds") + } + + options { /* run-remote-tests */ + timeout(time: 90, unit: 'MINUTES') + } + + steps { /* run-remote-tests */ + echo "${STAGE_NAME}: start" + tdd_print_debug_info("${STAGE_NAME}") + tdd_print_result_header() + + script { + sh("""#!/bin/bash -ex +export PS4='+ [${STAGE_NAME}] \${BASH_SOURCE##*/}:\${LINENO}: ' + +echo "--------" +printenv | sort +echo "--------" + +echo "${STAGE_NAME}: TODO" +""") + currentBuild.result = 'FAILURE' // FIXME. + } + } + + post { /* run-remote-tests */ + cleanup { + archiveArtifacts( + artifacts: "${STAGE_NAME}-result.txt, ${env.remote_out}", + fingerprint: true) + echo "${STAGE_NAME}: done: ${currentBuild.currentResult}" + } + } + } + } + } + + stage('run-qemu-tests') { + when { + expression { return params.RUN_QEMU_TESTS == true \ + && readFile('need-test').contains('yes') } + } + + agent { /* run-qemu-tests */ + docker { + image "${env.dockerTag}" + args "--network host \ + ${env.dockerCredsExtra} \ + ${env.dockerSshExtra} \ + " + reuseNode true + } + } + + options { /* run-qemu-tests */ + timeout(time: 90, unit: 'MINUTES') + } + + steps { /* run-qemu-tests */ + tdd_print_debug_info("start") + tdd_print_result_header() + + sh("""#!/bin/bash +export PS4='+ [${STAGE_NAME}] \${BASH_SOURCE##*/}:\${LINENO}: ' +set -ex + +rm -f ${env.qemu_out} +touch ${env.qemu_out} + +rm -f fedora.hda +qemu-img create -f qcow2 fedora.hda 20G + +rm -f test-login-key +ssh-keygen -q -f test-login-key -N '' + +scripts/run-fedora-qemu-tests.sh \ + --arch=${params.TARGET_ARCH} \ + --initrd=${env.tftp_initrd} \ + --kernel=${env.tftp_kernel} \ + --kickstart=${env.tftp_kickstart} \ + --out-file=${env.qemu_out} \ + --hda=fedora.hda \ + --ssh-key=test-login-key \ + --verbose + +""") + } + + post { /* run-qemu-tests */ + success { + script { + if (readFile("${env.qemu_out}").contains('reboot: Power down')) { + echo "${STAGE_NAME}: FOUND 'reboot' message." + } else { + echo "${STAGE_NAME}: DID NOT FIND 'reboot' message." + currentBuild.result = 'FAILURE' + } + } + } + cleanup { + archiveArtifacts( + artifacts: "${STAGE_NAME}-result.txt, ${env.qemu_out}", + fingerprint: true) + echo "${STAGE_NAME}: done: ${currentBuild.currentResult}" + } + } + } + } + } + } +} + +void tdd_setup_jenkins_creds() { + sh("""#!/bin/bash -ex +export PS4='+ [${STAGE_NAME}] \${BASH_SOURCE##*/}:\${LINENO}: ' + +sudo mkdir -p ${env.jenkinsCredsPath} +sudo chown \$(id --user --real --name): ${env.jenkinsCredsPath}/ +sudo cp -avf /etc/group /etc/passwd /etc/shadow /etc/sudoers.d ${env.jenkinsCredsPath}/ +""") +} + +void tdd_print_debug_info(String info) { + sh("""#!/bin/bash -ex +echo '${STAGE_NAME}: ${info}' +whoami +id +sudo true +""") +} + +void tdd_print_result_header() { + sh("""#!/bin/bash -ex + +echo "node=${NODE_NAME}" > ${STAGE_NAME}-result.txt +echo "--------" >> ${STAGE_NAME}-result.txt +echo "printenv" >> ${STAGE_NAME}-result.txt +echo "--------" >> ${STAGE_NAME}-result.txt +printenv | sort >> ${STAGE_NAME}-result.txt +echo "--------" >> ${STAGE_NAME}-result.txt +""") +} + + +void tdd_upload_tftp_files(String keyId, String server, String root, String files) { + echo 'upload_tftp_files: key = @' + keyId + '@' + echo 'upload_tftp_files: root = @' + root + '@' + echo 'upload_tftp_files: files = @' + files + '@' + + sshagent (credentials: [keyId]) { + sh("""#!/bin/bash -ex + +ssh ${server} ls -lh ${root} +for f in "${files}"; do + scp \${f} ${server}:${root}/\${f} +done +ssh ${server} ls -lh ${root} +""") + } +} diff --git a/jenkins/jobs/distro/fedora/f29-qemu.ks b/jenkins/jobs/distro/fedora/f29-qemu.ks new file mode 100644 index 00000000..0936d1db --- /dev/null +++ b/jenkins/jobs/distro/fedora/f29-qemu.ks @@ -0,0 +1,61 @@ +#version=DEVEL +ignoredisk --only-use=vda +# System bootloader configuration +bootloader --location=mbr --boot-drive=vda --append "text" +autopart --type=plain +# Partition clearing information +clearpart --drives=vda --all +# Use text mode install +text +# Use network installation +url --url="https://dl.fedoraproject.org/pub/fedora/linux/development/29/Server/aarch64/os/" +# Keyboard layouts +keyboard --vckeymap=us --xlayouts='' +# System language +lang en_US.UTF-8 + +# Network information +network --bootproto=dhcp --device=eth0 --ipv6=auto --activate +network --hostname=f29-install-test +# Root password +rootpw --plaintext r +# Run the Setup Agent on first boot +firstboot --disable +# Do not configure the X Window System +skipx +# System services +services --enabled="chronyd" +# System timezone +timezone America/Los_Angeles --isUtc +user --groups=sudo,docker,wheel --name=tdd-tester + +firewall --disabled + +poweroff + +%packages +@^server-product-environment + +%end + +#%addon com_redhat_kdump --disable --reserve-mb='128' +# +#%end + +%anaconda +pwpolicy root --minlen=6 --minquality=1 --notstrict --nochanges --notempty +pwpolicy user --minlen=6 --minquality=1 --notstrict --nochanges --emptyok +pwpolicy luks --minlen=6 --minquality=1 --notstrict --nochanges --notempty +%end + +%post + +# Setup ssh keys +mkdir -p -m0700 /root/.ssh/ +cat >> /root/.ssh/authorized_keys << EOF +@@ssh-keys@@ +EOF +chmod 0600 /root/.ssh/authorized_keys +restorecon -R /root/.ssh/ + +%end diff --git a/jenkins/jobs/distro/fedora/f30-installer-test.groovy b/jenkins/jobs/distro/fedora/f30-installer-test.groovy new file mode 100644 index 00000000..6311f908 --- /dev/null +++ b/jenkins/jobs/distro/fedora/f30-installer-test.groovy @@ -0,0 +1,393 @@ +#!groovy +// Test install of Fedora. + + +script { + library identifier: "tdd@master", retriever: legacySCM(scm) +} + +String test_machine = 'gbt2s19' + +pipeline { + parameters { + booleanParam(name: 'DOCKER_PURGE', + defaultValue: false, + description: 'Remove existing tdd builder image and rebuild.') + string(name: 'FEDORA_KICKSTART_URL', + defaultValue: '', + description: 'URL of an alternate Anaconda kickstart file.') + booleanParam(name: 'FORCE', + defaultValue: false, + description: 'Force tests to run.') + string(name: 'FEDORA_INITRD_URL', + //defaultValue: 'https://dl.fedoraproject.org/pub/fedora/linux/releases/30/Server/aarch64/os/images/pxeboot/initrd.img', + defaultValue: 'https://download.fedoraproject.org/pub/fedora/linux/releases/30/Server/aarch64/os/images/pxeboot/initrd.img', + description: 'URL of Fedora Anaconda initrd.') + //string(name: 'FEDORA_ISO_URL', // TODO: Add iso support. + // defaultValue: 'https://dl.fedoraproject.org/pub/fedora/linux/releases/30/Server/aarch64/iso/Fedora-Server-netinst-aarch64-30-1.2.iso', + // defaultValue: 'https://download.fedoraproject.org/pub/fedora/linux/releases/30/Server/aarch64/iso/Fedora-Server-netinst-aarch64-30-1.2.iso', + // description: 'URL of Fedora Anaconda CD-ROM iso.') + string(name: 'FEDORA_KERNEL_URL', + //defaultValue: 'https://dl.fedoraproject.org/pub/fedora/linux/releases/30/Server/aarch64/os/images/pxeboot/vmlinuz', + defaultValue: 'https://download.fedoraproject.org/pub/fedora/linux/releases/30/Server/aarch64/os/images/pxeboot/vmlinuz', + description: 'URL of Fedora Anaconda kernel.') + booleanParam(name: 'RUN_QEMU_TESTS', + defaultValue: true, + description: 'Run kernel tests in QEMU emulator.') + booleanParam(name: 'RUN_REMOTE_TESTS', + defaultValue: false, + description: 'Run kernel tests on remote test machine.') + choice(name: 'TARGET_ARCH', + choices: "arm64\namd64\nppc64le", + description: 'Target architecture to build for.') + string(name: 'PIPELINE_BRANCH', + defaultValue: 'master', + description: 'Branch to use for fetching the pipeline jobs') + } + + options { + // Timeout if no node available. + timeout(time: 90, unit: 'MINUTES') + //timestamps() + buildDiscarder(logRotator(daysToKeepStr: '10', numToKeepStr: '5')) + } + + environment { + String tddStorePath = sh( + returnStdout: true, + script: "set -x; \ +if [ \${TDD_STORE} ]; then \ + echo -n \${TDD_STORE}; \ +else \ + echo -n /run/tdd-store/\${USER}; \ +fi") + jenkinsCredsPath = "${env.tddStorePath}/jenkins_creds" + String dockerCredsExtra = "-v ${env.jenkinsCredsPath}/group:/etc/group:ro \ + -v ${env.jenkinsCredsPath}/passwd:/etc/passwd:ro \ + -v ${env.jenkinsCredsPath}/shadow:/etc/shadow:ro \ + -v ${env.jenkinsCredsPath}/sudoers.d:/etc/sudoers.d:ro" + String dockerSshExtra = sh( + returnStdout: true, + script: "set -x; \ +if [ \${TDD_JENKINS} ]; then \ + echo -n ' '; \ +else \ + user=\$(id --user --real --name); \ + echo -n '-v /home/\${user}/.ssh:/home/\${user}/.ssh'; \ +fi") + String dockerTag = sh( + returnStdout: true, + script: './docker/builder/build-builder.sh --tag').trim() + String qemu_out = "qemu-console.txt" + String remote_out = test_machine + "-console.txt" + String tftp_initrd = 'tdd-initrd' + String tftp_kickstart = 'tdd-kickstart' + String tftp_kernel = 'tdd-kernel' + } + + agent { + //label "${params.NODE_ARCH} && docker" + label 'master' + } + + stages { + + stage('setup') { + steps { /* setup */ + tdd_setup_jenkins_creds() + } + } + + stage('parallel-setup') { + failFast false + parallel { /* parallel-setup */ + + stage('download-files') { + steps { /* download-files */ + tdd_print_debug_info("start") + + copyArtifacts( + projectName: "${JOB_NAME}", + selector: lastCompleted(), + fingerprintArtifacts: true, + optional: true, + ) + + sh("""#!/bin/bash +export PS4='+ [${STAGE_NAME}] \${BASH_SOURCE##*/}:\${LINENO}: ' +set -ex + +rm -f ${env.tftp_initrd} ${env.tftp_kickstart} ${env.tftp_kernel} + +if [[ -n "${params.FEDORA_KICKSTART_URL}" ]]; then + curl --silent --show-error --location ${params.FEDORA_KICKSTART_URL} > ${env.tftp_kickstart} +else + cp jenkins/jobs/distro/fedora/f30-qemu.ks ${env.tftp_kickstart} +fi +curl --silent --show-error --location ${params.FEDORA_INITRD_URL} > ${env.tftp_initrd} +curl --silent --show-error --location ${params.FEDORA_KERNEL_URL} > ${env.tftp_kernel} + +if [[ -f md5sum.txt ]]; then + last="\$(cat md5sum.txt)" +fi + +current=\$(md5sum ${env.tftp_initrd} ${env.tftp_kernel}) + +set +x +echo '------' +echo "last = \n\${last}" +echo "current = \n\${current}" +ls -l ${env.tftp_initrd} ${env.tftp_kernel} +echo '------' +set -x + +if [[ "${params.FORCE}" == 'true' || -z "\${last}" \ + || "\${current}" != "\${last}" ]]; then + echo "${STAGE_NAME}: Need test." + echo "\${current}" > md5sum.txt + echo "yes" > need-test +else + echo "${STAGE_NAME}: No change." + echo "no" > need-test +fi +""") + } + post { /* download-files */ + success { + archiveArtifacts( + artifacts: "md5sum.txt", + fingerprint: true + ) + } + cleanup { + echo "${STAGE_NAME}: done: ${currentBuild.currentResult}" + } + } + } + + stage('build-builder') { + steps { /* build-builder */ + tdd_print_debug_info("start") + tdd_print_result_header() + + echo "${STAGE_NAME}: dockerTag=@${env.dockerTag}@" + + sh("""#!/bin/bash -ex +export PS4='+ [${STAGE_NAME}] \${BASH_SOURCE##*/}:\${LINENO}: ' + +tag=${env.dockerTag} +docker images \${tag%:*} + +[[ "${params.DOCKER_PURGE}" != 'true' ]] || build_args=' --purge' + +./docker/builder/build-builder.sh \${build_args} + +""") + } + post { /* build-builder */ + cleanup { + echo "${STAGE_NAME}: done: ${currentBuild.currentResult}" + } + } + } + } + } + + stage('parallel-test') { + failFast false + parallel { /* parallel-test */ + + stage('remote-tests') { + + when { + expression { return params.RUN_REMOTE_TESTS == true \ + && readFile('need-test').contains('yes') } + } + + stages { /* remote-tests */ + + stage('upload-files') { + steps { + echo "${STAGE_NAME}: start" + tdd_upload_tftp_files('tdd-tftp-login-key', + env.tftp_server, env.tftp_root, + env.tftp_initrd + ' ' + env.tftp_kernel + ' ' + + env.tftp_kickstart) + } + } + + stage('run-remote-tests') { + + agent { /* run-remote-tests */ + docker { + image "${env.dockerTag}" + args "--network host \ + ${env.dockerCredsExtra} \ + ${env.dockerSshExtra} \ + " + reuseNode true + } + } + + environment { /* run-remote-tests */ + TDD_BMC_CREDS = credentials("${test_machine}_bmc_creds") + } + + options { /* run-remote-tests */ + timeout(time: 90, unit: 'MINUTES') + } + + steps { /* run-remote-tests */ + echo "${STAGE_NAME}: start" + tdd_print_debug_info("${STAGE_NAME}") + tdd_print_result_header() + + script { + sh("""#!/bin/bash -ex +export PS4='+ [${STAGE_NAME}] \${BASH_SOURCE##*/}:\${LINENO}: ' + +echo "--------" +printenv | sort +echo "--------" + +echo "${STAGE_NAME}: TODO" +""") + currentBuild.result = 'FAILURE' // FIXME. + } + } + + post { /* run-remote-tests */ + cleanup { + archiveArtifacts( + artifacts: "${STAGE_NAME}-result.txt, ${env.remote_out}", + fingerprint: true) + echo "${STAGE_NAME}: done: ${currentBuild.currentResult}" + } + } + } + } + } + + stage('run-qemu-tests') { + when { + expression { return params.RUN_QEMU_TESTS == true \ + && readFile('need-test').contains('yes') } + } + + agent { /* run-qemu-tests */ + docker { + image "${env.dockerTag}" + args "--network host \ + ${env.dockerCredsExtra} \ + ${env.dockerSshExtra} \ + " + reuseNode true + } + } + + options { /* run-qemu-tests */ + timeout(time: 90, unit: 'MINUTES') + } + + steps { /* run-qemu-tests */ + tdd_print_debug_info("start") + tdd_print_result_header() + + sh("""#!/bin/bash +export PS4='+ [${STAGE_NAME}] \${BASH_SOURCE##*/}:\${LINENO}: ' +set -ex + +rm -f ${env.qemu_out} +touch ${env.qemu_out} + +rm -f fedora.hda +qemu-img create -f qcow2 fedora.hda 20G + +rm -f test-login-key +ssh-keygen -q -f test-login-key -N '' + +scripts/run-fedora-qemu-tests.sh \ + --arch=${params.TARGET_ARCH} \ + --initrd=${env.tftp_initrd} \ + --kernel=${env.tftp_kernel} \ + --kickstart=${env.tftp_kickstart} \ + --out-file=${env.qemu_out} \ + --hda=fedora.hda \ + --ssh-key=test-login-key \ + --verbose + +""") + } + + post { /* run-qemu-tests */ + success { + script { + if (readFile("${env.qemu_out}").contains('reboot: Power down')) { + echo "${STAGE_NAME}: FOUND 'reboot' message." + } else { + echo "${STAGE_NAME}: DID NOT FIND 'reboot' message." + currentBuild.result = 'FAILURE' + } + } + } + cleanup { + archiveArtifacts( + artifacts: "${STAGE_NAME}-result.txt, ${env.qemu_out}", + fingerprint: true) + echo "${STAGE_NAME}: done: ${currentBuild.currentResult}" + } + } + } + } + } + } +} + +void tdd_setup_jenkins_creds() { + sh("""#!/bin/bash -ex +export PS4='+ [${STAGE_NAME}] \${BASH_SOURCE##*/}:\${LINENO}: ' + +sudo mkdir -p ${env.jenkinsCredsPath} +sudo chown \$(id --user --real --name): ${env.jenkinsCredsPath}/ +sudo cp -avf /etc/group /etc/passwd /etc/shadow /etc/sudoers.d ${env.jenkinsCredsPath}/ +""") +} + +void tdd_print_debug_info(String info) { + sh("""#!/bin/bash -ex +echo '${STAGE_NAME}: ${info}' +whoami +id +sudo true +""") +} + +void tdd_print_result_header() { + sh("""#!/bin/bash -ex + +echo "node=${NODE_NAME}" > ${STAGE_NAME}-result.txt +echo "--------" >> ${STAGE_NAME}-result.txt +echo "printenv" >> ${STAGE_NAME}-result.txt +echo "--------" >> ${STAGE_NAME}-result.txt +printenv | sort >> ${STAGE_NAME}-result.txt +echo "--------" >> ${STAGE_NAME}-result.txt +""") +} + + +void tdd_upload_tftp_files(String keyId, String server, String root, String files) { + echo 'upload_tftp_files: key = @' + keyId + '@' + echo 'upload_tftp_files: root = @' + root + '@' + echo 'upload_tftp_files: files = @' + files + '@' + + sshagent (credentials: [keyId]) { + sh("""#!/bin/bash -ex + +ssh ${server} ls -lh ${root} +for f in "${files}"; do + scp \${f} ${server}:${root}/\${f} +done +ssh ${server} ls -lh ${root} +""") + } +} diff --git a/jenkins/jobs/distro/fedora/f30-qemu.ks b/jenkins/jobs/distro/fedora/f30-qemu.ks new file mode 100644 index 00000000..1163c30d --- /dev/null +++ b/jenkins/jobs/distro/fedora/f30-qemu.ks @@ -0,0 +1,61 @@ +#version=DEVEL +ignoredisk --only-use=vda +# System bootloader configuration +bootloader --location=mbr --boot-drive=vda --append "text" +autopart --type=plain +# Partition clearing information +clearpart --drives=vda --all +# Use text mode install +text +# Use network installation +url --url="https://dl.fedoraproject.org/pub/fedora/linux/releases/30/Server/aarch64/os" +# Keyboard layouts +keyboard --vckeymap=us --xlayouts='' +# System language +lang en_US.UTF-8 + +# Network information +network --bootproto=dhcp --device=eth0 --ipv6=auto --activate +network --hostname=f30-install-test +# Root password +rootpw --plaintext r +# Run the Setup Agent on first boot +firstboot --disable +# Do not configure the X Window System +skipx +# System services +services --enabled="chronyd" +# System timezone +timezone America/Los_Angeles --isUtc +user --groups=sudo,docker,wheel --name=tdd-tester + +firewall --disabled + +poweroff + +%packages +@^server-product-environment + +%end + +#%addon com_redhat_kdump --disable --reserve-mb='128' +# +#%end + +%anaconda +pwpolicy root --minlen=6 --minquality=1 --notstrict --nochanges --notempty +pwpolicy user --minlen=6 --minquality=1 --notstrict --nochanges --emptyok +pwpolicy luks --minlen=6 --minquality=1 --notstrict --nochanges --notempty +%end + +%post + +# Setup ssh keys +mkdir -p -m0700 /root/.ssh/ +cat >> /root/.ssh/authorized_keys << EOF +@@ssh-keys@@ +EOF +chmod 0600 /root/.ssh/authorized_keys +restorecon -R /root/.ssh/ + +%end diff --git a/jenkins/jobs/kernel/kernel-test-matrix.groovy b/jenkins/jobs/kernel/kernel-test-matrix.groovy new file mode 100644 index 00000000..19eaff24 --- /dev/null +++ b/jenkins/jobs/kernel/kernel-test-matrix.groovy @@ -0,0 +1,83 @@ +#!groovy +// Runs tests on a Linux kernel git repository. + +properties([ + buildDiscarder(logRotator(daysToKeepStr: '30', numToKeepStr: '5')), + + parameters([ + booleanParam(name: 'KERNEL_DEBUG', + defaultValue: false, + description: 'Run kernel with debug flags.'), + string(name: 'KERNEL_GIT_BRANCH', + defaultValue: 'master', + description: 'Repository branch of KERNEL_GIT_URL.'), + string(name: 'KERNEL_GIT_URL', + defaultValue: 'https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git', + description: 'URL of a Linux kernel git repository.'), + string(name: 'NODE_ARCH_LIST', + //defaultValue: 'amd64', // FIXME: For test only!!! + defaultValue: 'amd64 arm64', + description: 'List of Jenkins node architectures to build on.'), + string(name: 'TARGET_ARCH_LIST', + defaultValue: 'arm64', // FIXME: Need to setup amd64. + description: 'List of target architectures to build for.'), + booleanParam(name: 'USE_IMAGE_CACHE', + defaultValue: false, + description: 'Use cached disk image.'), + booleanParam(name: 'USE_KERNEL_CACHE', + defaultValue: false, + description: 'Use cached kernel image.') + string(name: 'PIPELINE_BRANCH', + defaultValue: 'master', + description: 'Branch to use for fetching the pipeline jobs') + ]) +]) + +def map_entry = { Boolean _kernel_debug, String _kernel_git_branch, + String _kernel_git_url, String _node_arch, String _target_arch, + Boolean _use_image_cache, Boolean _use_kernel_cache + -> + Boolean kernel_debug = _kernel_debug + String kernel_git_branch = _kernel_git_branch + String kernel_git_url = _kernel_git_url + String node_arch = _node_arch + String target_arch = _target_arch + Boolean use_image_cache = _use_image_cache + Boolean use_kernel_cache = _use_kernel_cache + + echo "${JOB_BASE_NAME}: Scheduleing ${node_arch}-${target_arch}" + + // Timeout if no node_arch node is available. + timeout(time: 45, unit: 'MINUTES') { + build(job: 'kernel-test', + parameters: [ + booleanParam(name: 'KERNEL_DEBUG', value: kernel_debug), + string(name: 'KERNEL_GIT_BRANCH', value: kernel_git_branch), + string(name: 'KERNEL_GIT_URL', value: kernel_git_url), + string(name: 'NODE_ARCH', value: node_arch), + string(name: 'TARGET_ARCH', value: target_arch), + booleanParam(name: 'USE_IMAGE_CACHE', value: use_image_cache), + booleanParam(name: 'USE_KERNEL_CACHE', value: use_kernel_cache) + string(name: 'PIPELINE_BRANCH', value: params.PIPELINE_BRANCH) + ] + ) + } +} + +def build_map = [:] +build_map.failFast = false + +for (node_arch in params.NODE_ARCH_LIST.split()) { + for (target_arch in params.TARGET_ARCH_LIST.split()) { + build_map[node_arch] = map_entry.curry( + params.KERNEL_DEBUG, + params.KERNEL_GIT_BRANCH, + params.KERNEL_GIT_URL, + node_arch, target_arch, + params.USE_CHEATER_CACHE) + } +} + +stage('Downstream') { + parallel build_map +} diff --git a/jenkins/jobs/kernel/kernel-test.groovy b/jenkins/jobs/kernel/kernel-test.groovy new file mode 100644 index 00000000..06bc4a13 --- /dev/null +++ b/jenkins/jobs/kernel/kernel-test.groovy @@ -0,0 +1,721 @@ +#!groovy +// Runs tests on a Linux kernel git repository. +// +// The `jenkins` user must be in the `docker` user group. +// Requires nodes with labels: `amd64`, `arm64`, `docker`. + +script { + library identifier: "tdd-project@master", retriever: legacySCM(scm) +} + +def stagingTokenBootstrap = [:] +boolean cacheFoundBootstrap = false +boolean cacheFoundKernel = false +boolean cacheFoundImage = false + +pipeline { + parameters { + booleanParam(name: 'DOCKER_PURGE', + defaultValue: false, + description: 'Remove existing tdd-builder image and rebuild.') + + string(name: 'KERNEL_CONFIG_URL', + defaultValue: '', + description: 'URL of an alternate kernel config.') + + string(name: 'KERNEL_GIT_BRANCH', + defaultValue: 'master', + description: 'Branch or tag of KERNEL_GIT_URL.') + + string(name: 'KERNEL_GIT_URL', + defaultValue: 'https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux-stable.git', + description: 'URL of a Linux kernel git repository.') + + choice(name: 'NODE_ARCH', + choices: "amd64\narm64", + description: 'Jenkins node architecture to build on.') + + choice(name: 'ROOTFS_TYPE', + choices: "debian\nalpine\nfedora", + description: 'Root file system type to build.') + + booleanParam(name: 'SYSTEMD_DEBUG', + defaultValue: false, + description: 'Run kernel with systemd debug flags.') + + choice(name: 'TARGET_ARCH', + choices: "arm64\namd64\nppc64le", + description: 'Target architecture to build for.') + + choice(name: 'TARGET_MACHINE', + choices: "qemu\ngbt2s18\ngbt2s19\nsaber25\nt88", + description: 'Target machine to run tests on.') + + choice(name: 'TEST_NAME', + choices: "ltp\nunixbench\nkselftest\nhttp-wrk\nnone", + description: 'Test to run on target machine.') + + string(name: 'PIPELINE_BRANCH', + defaultValue: 'master', + description: 'Branch to use for fetching the pipeline jobs') + + // Job debugging parameters. + choice(name: 'AGENT', + choices: "master\nlab2\nsaber25\ntdd2\ntdd3", + description: '[debugging] Which Jenkins agent to use.') + + booleanParam(name: 'USE_BOOTSTRAP_CACHE', + defaultValue: true, + description: '[debugging] Use cached rootfs bootstrap image.') + + booleanParam(name: 'USE_IMAGE_CACHE', + defaultValue: false, + description: '[debugging] Use cached rootfs disk image.') + + booleanParam(name: 'USE_KERNEL_CACHE', + defaultValue: false, + description: '[debugging] Use cached kernel build.') + + } + + options { + // Timeout if no node available. + timeout(time: 90, unit: 'MINUTES') + //timestamps() + buildDiscarder(logRotator(daysToKeepStr: '30', numToKeepStr: '10')) + } + + environment { + String topBuildDir = 'build' + String scriptsDir = 'scripts' + + String bootstrapPrefix="${env.topBuildDir}/${params.TARGET_ARCH}-${params.ROOTFS_TYPE}" + String outputPrefix="${env.bootstrapPrefix}-${params.TEST_NAME}" + + String bootstrapDir="${env.bootstrapPrefix}.bootstrap" + String imageDir="${env.outputPrefix}.image" + String testsDir="${env.outputPrefix}.tests" + String resultsDir="${env.outputPrefix}.results" + + String kernelSrcDir = sh( + returnStdout: true, + script: "set -x; \ +echo -n '${env.topBuildDir}/'; \ +echo '${params.KERNEL_GIT_URL}' | sed 's|://|-|; s|/|-|g'").trim() + String kernelBuildDir = "${env.topBuildDir}/${params.TARGET_ARCH}-kernel-build" + String kernelInstallDir = "${env.topBuildDir}/${params.TARGET_ARCH}-kernel-install" + + //String qemu_out = "${env.topBuildDir}/qemu-console.txt" + //String remote_out = "${env.topBuildDir}/${params.TEST_MACHINE}-console.txt" + + String tddStorePath = sh( + returnStdout: true, + script: "set -x; \ +if [ \${TDD_STORE} ]; then \ + echo -n \${TDD_STORE}; \ +else \ + echo -n /run/tdd-store/\${USER}; \ +fi") + String jenkinsCredsPath = "${env.tddStorePath}/jenkins_creds" + String dockerCredsExtra = "-v ${env.jenkinsCredsPath}/group:/etc/group:ro \ + -v ${env.jenkinsCredsPath}/passwd:/etc/passwd:ro \ + -v ${env.jenkinsCredsPath}/shadow:/etc/shadow:ro \ + -v ${env.jenkinsCredsPath}/sudoers.d:/etc/sudoers.d:ro" + String dockerTag = sh( + returnStdout: true, + script: './docker/builder/build-builder.sh --tag').trim() + + // Job debugging variables. + String fileStagingDir = 'staging' + String fileCacheDir = "${env.WORKSPACE}/../${env.JOB_BASE_NAME}--file-cache" + } + + agent { label "${params.AGENT}" } + + stages { + + stage('setup') { + steps { /* setup */ + clean_disk_image_build() + tdd_setup_file_cache() + tdd_setup_jenkins_creds() + sh("mkdir -p ${env.resultsDir}") + //cache_test() + echo "@${params.TEST_NAME}@" + } + } + + stage('build-builder') { + environment { /* build-builder */ + resultFile = "${env.resultsDir}/${STAGE_NAME}-result.txt" + } + steps { /* build-builder */ + echo "${STAGE_NAME}: dockerTag=@${env.dockerTag}@" + + tdd_print_debug_info("${STAGE_NAME}") + tdd_print_result_header(env.resultFile) + + sh("""#!/bin/bash -ex +export PS4='+ [${STAGE_NAME}] \${BASH_SOURCE##*/}:\${LINENO}: ' + +tag=${env.dockerTag} +docker images \${tag%:*} + +if [[ "${params.DOCKER_PURGE}" == 'true' ]]; then + builder_args+=' --purge' +fi + +./docker/builder/build-builder.sh \${builder_args} + +""") + } + post { /* build-builder */ + cleanup { + echo "${STAGE_NAME}: cleanup: ${currentBuild.currentResult} -> ${currentBuild.result}" + } + } + } + + stage('parallel-build') { + failFast false + parallel { /* parallel-build */ + + stage('build-kernel') { + environment { /* build-kernel */ + resultFile = "${env.resultsDir}/${STAGE_NAME}-result.txt" + } + + agent { /* build-kernel */ + docker { + image "${env.dockerTag}" + args "--network host \ + ${env.dockerCredsExtra} \ + " + reuseNode true + } + } + + steps { /* build-kernel */ + tdd_print_debug_info("${STAGE_NAME}") + tdd_print_result_header(env.resultFile) + + dir(env.kernelSrcDir) { + checkout scm: [ + $class: 'GitSCM', + branches: [[name: params.KERNEL_GIT_BRANCH]], + userRemoteConfigs: [[url: params.KERNEL_GIT_URL]], + ] + sh("git show -q") + } + + script { + if (params.USE_KERNEL_CACHE) { + cacheFoundKernel = newFileCache.get( + env.fileCacheDir, env.kernelInstallDir) + if (cacheFoundKernel) { + currentBuild.result = 'SUCCESS' + echo "${STAGE_NAME}: Using cached files." + return + } + } + + sh("""#!/bin/bash -ex +export PS4='+ [${STAGE_NAME}] \${BASH_SOURCE##*/}:\${LINENO}: ' + +src_dir="\$(pwd)/${env.kernelSrcDir}" +build_dir="\$(pwd)/${env.kernelBuildDir}" +install_dir="\$(pwd)/${env.kernelInstallDir}" + +rm -rf \${build_dir} "\${install_dir}" + +${env.scriptsDir}/build-linux-kernel.sh \ + --build-dir=\${build_dir} \ + --install-dir=\${install_dir} \ + ${params.TARGET_ARCH} \${src_dir} defconfig + +if [[ -n "${params.KERNEL_CONFIG_URL}" ]]; then + curl --silent --show-error --location ${params.KERNEL_CONFIG_URL} \ + > \${build_dir}/.config +else + ${env.scriptsDir}/set-config-opts.sh \ + --verbose \ + ${env.scriptsDir}/tx2-fixup.spec \${build_dir}/.config +fi + +${env.scriptsDir}/build-linux-kernel.sh \ + --build-dir=\${build_dir} \ + --install-dir=\${install_dir} \ + ${params.TARGET_ARCH} \${src_dir} fresh + +cp -vf \${install_dir}/boot/config \${install_dir}/boot/kernel-config +rm -rf \${build_dir} +""") + } + } + + post { /* build-kernel */ + success { + archiveArtifacts( + artifacts: "${env.resultFile}, ${env.kernelInstallDir}/boot/kernel-config", + fingerprint: true) + } + cleanup { + echo "${STAGE_NAME}: cleanup: ${currentBuild.currentResult} -> ${currentBuild.result}" + } + } + } + + stage('bootstrap-disk-image') { + environment { /* bootstrap-disk-image */ + resultFile = "${env.resultsDir}/${STAGE_NAME}-result.txt" + } + + agent { /* bootstrap-disk-image */ + docker { + image "${env.dockerTag}" + args "--network host \ + --privileged \ + ${env.dockerCredsExtra} \ + " + reuseNode true + } + } + + steps { /* bootstrap-disk-image */ + tdd_print_debug_info("${STAGE_NAME}") + tdd_print_result_header(env.resultFile) + + echo "${STAGE_NAME}: params.USE_BOOTSTRAP_CACHE=${params.USE_BOOTSTRAP_CACHE}" + + script { + if (params.USE_BOOTSTRAP_CACHE) { + cacheFoundBootstrap = newFileCache.get( + env.fileCacheDir, env.bootstrapDir) + if (cacheFoundBootstrap) { + currentBuild.result = 'SUCCESS' + echo "${STAGE_NAME}: Using cached files." + return + } + } + + echo "${STAGE_NAME}: dockerCredsExtra = @${env.dockerCredsExtra}@" + + sh("""#!/bin/bash -ex +export PS4='+ [${STAGE_NAME}] \${BASH_SOURCE##*/}:\${LINENO}: ' + +# for debug +id +whoami +#cat /etc/group || : +#ls -l /etc/sudoers || : +#ls -l /etc/sudoers.d || : +#cat /etc/sudoers || : +sudo -S true + +${env.scriptsDir}/build-rootfs.sh \ + --arch=${params.TARGET_ARCH} \ + --output-dir=${env.bootstrapDir} \ + --rootfs-type=${params.ROOTFS_TYPE} \ + --bootstrap \ + --verbose +""") + } + } + + post { /* bootstrap-disk-image */ + success { + script { + if (params.USE_BOOTSTRAP_CACHE + && !cacheFoundBootstrap) { + stagingTokenBootstrap = newFileCache.stage( + env.fileStagingDir, env.fileCacheDir, + env.bootstrapDir) + } + } + archiveArtifacts( + artifacts: "${env.resultFile}", + fingerprint: true) + } + cleanup { + echo "${STAGE_NAME}: cleanup: ${currentBuild.currentResult} -> ${currentBuild.result}" + } + } + } + + } + + post { /* parallel-build */ + failure { + clean_disk_image_build() + } + cleanup { + echo "${STAGE_NAME}: cleanup: ${currentBuild.currentResult} -> ${currentBuild.result}" + } + } + } + + stage('cache-bootstrap') { + when { /* cache-bootstrap */ + expression { return params.USE_BOOTSTRAP_CACHE } + } + steps { /* cache-bootstrap */ + tdd_print_debug_info("${STAGE_NAME}") + script { + if (stagingTokenBootstrap.isEmpty()) { + echo "${STAGE_NAME}: Bootstrap already cached." + } else { + newFileCache.commit(stagingTokenBootstrap, '**bootstrap stamp info**') + } + } + } + } + + stage('cache-kernel') { + when { /* cache-kernel */ + expression { return params.USE_KERNEL_CACHE } + } + steps { /* cache-kernel */ + tdd_print_debug_info("${STAGE_NAME}") + script { + if (cacheFoundKernel) { + echo "${STAGE_NAME}: Kernel already cached." + } else { + newFileCache.put(env.fileStagingDir, env.fileCacheDir, + env.kernelInstallDir, '**kernel stamp info**') + } + } + } + } + + stage('build-disk-image') { + environment { /* build-disk-image */ + resultFile = "${env.resultsDir}/${STAGE_NAME}-result.txt" + } + agent { /* build-disk-image */ + docker { + image "${env.dockerTag}" + args "--network host \ + --privileged \ + ${env.dockerCredsExtra} \ + " + reuseNode true + } + } + + steps { /* build-disk-image */ + tdd_print_debug_info("${STAGE_NAME}") + tdd_print_result_header(env.resultFile) + script { + if (params.USE_IMAGE_CACHE) { + if (newFileCache.get(env.fileCacheDir, + env.imageDir + '/initrd') == true + && newFileCache.get(env.fileCacheDir, + env.imageDir + '/manifest') == true + && newFileCache.get(env.fileCacheDir, + env.imageDir + '/login-key') == true) { + cacheFoundImage = true + echo "${STAGE_NAME}: Using cached files." + currentBuild.result = 'SUCCESS' + return + } + } + + sh("""#!/bin/bash -ex +export PS4='+ [${STAGE_NAME}] \${BASH_SOURCE##*/}:\${LINENO}: ' + +if [[ "${params.TEST_NAME}" != 'none' ]]; then + source ${env.scriptsDir}/test-plugin/${params.TEST_NAME}.sh + test_name="${params.TEST_NAME}" + extra_packages+="\$(test_packages_\${test_name//-/_} ${params.ROOTFS_TYPE})" +fi + +modules_dir="\$(find ${env.kernelInstallDir}/lib/modules/* -maxdepth 0 -type d)" + +${env.scriptsDir}/build-rootfs.sh \ + --arch=${params.TARGET_ARCH} \ + --output-dir=${env.imageDir} \ + --rootfs-type=${params.ROOTFS_TYPE} \ + --bootstrap-src=${env.bootstrapDir} \ + --kernel-modules=\${modules_dir} \ + --extra-packages="\${extra_packages}" \ + --rootfs-setup \ + --make-image \ + --verbose + + test_setup_\${test_name//-/_} ${params.ROOTFS_TYPE} ${env.imageDir}/rootfs +""") + } + } + + post { /* build-disk-image */ + success { + archiveArtifacts( + artifacts: "${env.resultFile}, ${env.rootfs_prefix}.manifest", + fingerprint: true) + } + failure { + clean_disk_image_build() + echo "${STAGE_NAME}: ${currentBuild.currentResult}" + } + cleanup { + echo "${STAGE_NAME}: cleanup: ${currentBuild.currentResult} -> ${currentBuild.result}" + } + } + } + + stage('cache-image') { + when { /* cache-image */ + expression { return params.USE_IMAGE_CACHE } + } + steps { /* cache-image */ + tdd_print_debug_info("${STAGE_NAME}") + script { + if (cacheFoundImage) { + echo "${STAGE_NAME}: Image already cached." + } else { + newFileCache.put(env.fileStagingDir, env.fileCacheDir, + env.imageDir + '/initrd', '**initrd stamp info**') + newFileCache.put(env.fileStagingDir, env.fileCacheDir, + env.imageDir + '/login-key', '**login-key stamp info**') + newFileCache.put(env.fileStagingDir, env.fileCacheDir, + env.imageDir + '/manifest', '**manifest stamp info**') + } + } + } + } + + stage('build-test') { + when { /* build-test */ + expression { return (params.TEST_NAME != 'none' && !params.USE_IMAGE_CACHE) } + } + + environment { /* build-test */ + resultFile = "${env.resultsDir}/${STAGE_NAME}-result.txt" + } + + agent { /* build-test */ + docker { + image "${env.dockerTag}" + args "--network host \ + ${env.dockerCredsExtra} \ + " + reuseNode true + } + } + + steps { /* build-test */ + tdd_print_debug_info("${STAGE_NAME}") + tdd_print_result_header(env.resultFile) + + sh("""#!/bin/bash -ex +export PS4='+ [${STAGE_NAME}] \${BASH_SOURCE##*/}:\${LINENO}: ' + +source ${env.scriptsDir}/test-plugin/${params.TEST_NAME}.sh + +test_name="${params.TEST_NAME}" +test_build_\${test_name//-/_} ${env.testsDir} "${env.imageDir}/rootfs" ${env.kernelSrcDir} + +""") + } + + post { /* run-test */ + cleanup { + echo "${STAGE_NAME}: cleanup: ${currentBuild.currentResult} -> ${currentBuild.result}" + archiveArtifacts( + artifacts: "${env.resultFile}", + fingerprint: true) + } + } + } + + stage('run-test') { + when { /* run-test */ + expression { return !(params.TEST_NAME == 'none') } + } + + environment { /* run-test */ + resultFile = "${env.resultsDir}/${STAGE_NAME}-result.txt" + outFile = "${env.resultsDir}/${params.TARGET_MACHINE}-console.txt" + TDD_BMC_CREDS = credentials("${params.TARGET_MACHINE}_bmc_creds") + } + + agent { /* run-test */ + docker { + image "${env.dockerTag}" + args "--network host \ + ${env.dockerCredsExtra} \ + " + reuseNode true + } + } + + steps { /* run-test */ + tdd_print_debug_info("${STAGE_NAME}") + tdd_print_result_header(env.resultFile) + + script { + switch (params.TARGET_MACHINE) { + case 'qemu': + sh("""#!/bin/bash -ex +export PS4='+ [${STAGE_NAME}] \${BASH_SOURCE##*/}:\${LINENO}: ' + +if [[ ${params.SYSTEMD_DEBUG} ]]; then + extra_args="--systemd-debug" +fi + +bash -x ${env.scriptsDir}/run-kernel-qemu-tests.sh \ + --kernel=${env.kernelInstallDir}/boot/Image \ + --initrd=${env.imageDir}/initrd \ + --ssh-login-key=${env.imageDir}/login-key \ + --test-name=${params.TEST_NAME} \ + --tests-dir=${env.testsDir} \ + --out-file=${env.outFile} \ + --result-file=${env.resultFile} \ + --arch=${params.TARGET_ARCH} \ + \${extra_args} \ + --verbose +""") + break + default: + sshagent (credentials: ['tdd-tftp-login-key']) { + sh("""#!/bin/bash -ex +export PS4='+ [${STAGE_NAME}] \${BASH_SOURCE##*/}:\${LINENO}: ' + +if [[ ${params.SYSTEMD_DEBUG} ]]; then + extra_args="--systemd-debug" +fi + +bash -x ${env.scriptsDir}/run-kernel-remote-tests.sh \ + --kernel=${env.kernelInstallDir}/boot/Image \ + --initrd=${env.imageDir}/initrd \ + --ssh-login-key=${env.imageDir}/login-key \ + --test-name=${params.TEST_NAME} \ + --tests-dir=${env.testsDir} \ + --out-file=${env.outFile} \ + --result-file=${env.resultFile} \ + --test-machine=${params.TARGET_MACHINE} \ + \${extra_args} \ + --verbose +""") + } + break + } + } + } + + post { /* run-test */ + success { + script { + if (readFile("${env.outFile}").contains('reboot: Power down')) { + echo "${STAGE_NAME}: FOUND 'reboot' message." + } else { + echo "${STAGE_NAME}: DID NOT FIND 'reboot' message." + currentBuild.result = 'FAILURE' + } + } + } + cleanup { + echo "${STAGE_NAME}: cleanup: ${currentBuild.currentResult} -> ${currentBuild.result}" + archiveArtifacts( + artifacts: "${env.resultFile}, ${env.outFile}", + fingerprint: true) + } + } + } + + } +} + +void tdd_setup_jenkins_creds() { + sh("""#!/bin/bash -ex +export PS4='+ [tdd_setup_jenkins_creds] \${BASH_SOURCE##*/}:\${LINENO}: ' + +sudo rm -rf ${env.jenkinsCredsPath} +sudo mkdir -p ${env.jenkinsCredsPath} +sudo chown \$(id --user --real --name): ${env.jenkinsCredsPath}/ +sudo cp -avf /etc/group ${env.jenkinsCredsPath}/ +sudo cp -avf /etc/passwd ${env.jenkinsCredsPath}/ +sudo cp -avf /etc/shadow ${env.jenkinsCredsPath}/ +sudo cp -avf /etc/sudoers.d ${env.jenkinsCredsPath}/ +""") +} + +void tdd_print_debug_info(String stage_name) { + sh("""#!/bin/bash -ex +echo 'In ${stage_name}:' +whoami +id +""") +} + +void tdd_print_result_header(String resultFile) { + sh("""#!/bin/bash -ex + +echo "node=${NODE_NAME}" > ${resultFile} +echo "--------" >> ${resultFile} +echo "printenv" >> ${resultFile} +echo "--------" >> ${resultFile} +printenv | sort >> ${resultFile} +echo "--------" >> ${resultFile} +""") +} + +void clean_disk_image_build() { + echo "cleaning disk-image" + sh("sudo rm -rf ${env.topBuildDir}/*.rootfs ${env.topBuildDir}/*.bootstrap") +} + +void tdd_setup_file_cache() { + sh("""#!/bin/bash -ex +mkdir -p ${env.fileCacheDir} +""") +} + +void cache_test() { + script { + sh("""#!/bin/bash -ex +mkdir -p ${env.topBuildDir}/c-test +echo "aaa" > ${env.topBuildDir}/c-test/aaa +echo "bbb" > ${env.topBuildDir}/c-test/bbb +""") + def token = newFileCache.stage( + env.fileStagingDir, env.fileCacheDir, + "${env.topBuildDir}/c-test") + + if (!token.isEmpty()) { + newFileCache.commit(token, "TEST Commit 1") + } else { + echo "c-test not staged 1." + } + + token = newFileCache.stage( + env.fileStagingDir, env.fileCacheDir, + "${env.topBuildDir}/c-test") + + if (!token.isEmpty()) { + newFileCache.commit(token, "TEST Commit 2") + } else { + echo "c-test not staged 2." + } + + sh("""#!/bin/bash -ex +mkdir -p ${env.topBuildDir}/c-test +echo "ccc" > ${env.topBuildDir}/c-test/ccc +""") + + newFileCache.put( + env.fileStagingDir, env.fileCacheDir, + "${env.topBuildDir}/c-test", "TEST Put 3") + + newFileCache.get(env.fileCacheDir, + "${env.topBuildDir}/c-test", + "${env.topBuildDir}/c-test-out") + + sh("""#!/bin/bash -ex +find "${env.topBuildDir}/c-test-out" +""") + + newFileCache.get(env.fileCacheDir, + "${env.topBuildDir}/c-test") + } +} diff --git a/jenkins/jobs/kernel/linux-4.19.y-stable-trigger.groovy b/jenkins/jobs/kernel/linux-4.19.y-stable-trigger.groovy new file mode 100644 index 00000000..800f9e27 --- /dev/null +++ b/jenkins/jobs/kernel/linux-4.19.y-stable-trigger.groovy @@ -0,0 +1,12 @@ +#!groovy +// Polls linux kernel repo for changes, builds kernel, runs tests. + +script { + library identifier: 'tdd@master', retriever: legacySCM(scm) +} + +kernelTrigger { + git_url = 'https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux-stable.git' + git_branch = 'linux-4.19.y' + cron_spec = 'H H/6 * * *' // Every 6 Hrs. +} diff --git a/jenkins/jobs/kernel/linux-4.20.y-stable-trigger.groovy b/jenkins/jobs/kernel/linux-4.20.y-stable-trigger.groovy new file mode 100644 index 00000000..9d182532 --- /dev/null +++ b/jenkins/jobs/kernel/linux-4.20.y-stable-trigger.groovy @@ -0,0 +1,12 @@ +#!groovy +// Polls linux kernel repo for changes, builds kernel, runs tests. + +script { + library identifier: 'tdd@master', retriever: legacySCM(scm) +} + +kernelTrigger { + git_url = 'https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux-stable.git' + git_branch = 'linux-4.20.y' + cron_spec = 'H H/6 * * *' // Every 6 Hrs. +} diff --git a/jenkins/jobs/kernel/linux-mainline-trigger.groovy b/jenkins/jobs/kernel/linux-mainline-trigger.groovy new file mode 100644 index 00000000..eba6c176 --- /dev/null +++ b/jenkins/jobs/kernel/linux-mainline-trigger.groovy @@ -0,0 +1,12 @@ +#!groovy +// Polls linux kernel repo for changes, builds kernel, runs tests. + +script { + library identifier: 'tdd@master', retriever: legacySCM(scm) +} + +kernelTrigger { + git_url = 'https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git' + git_branch = 'master' + cron_spec = '@hourly' +} diff --git a/jenkins/jobs/kernel/linux-next-trigger.groovy b/jenkins/jobs/kernel/linux-next-trigger.groovy new file mode 100644 index 00000000..7a6e9bf1 --- /dev/null +++ b/jenkins/jobs/kernel/linux-next-trigger.groovy @@ -0,0 +1,12 @@ +#!groovy +// Polls linux kernel repo for changes, builds kernel, runs tests. + +script { + library identifier: 'tdd@master', retriever: legacySCM(scm) +} + +kernelTrigger { + git_url = 'https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git' + git_branch = 'master' + cron_spec = '@hourly' +} diff --git a/jenkins/vars/fileCache.groovy b/jenkins/vars/fileCache.groovy new file mode 100644 index 00000000..167cce4c --- /dev/null +++ b/jenkins/vars/fileCache.groovy @@ -0,0 +1,127 @@ +#!groovy +// File caching routines. + +void info() { + echo 'version:1' +} + +boolean stage(String cache_path, String tag, String item, String stamp_info) { +// TODO +} + +boolean commit(String cache_path, String tag) { +// TODO +} + + +void put(String cache_path, String item, String stamp_info) { + +// FIXME: Need to avoid concurrent access by multiple runing jobs. + + sh(script: """#!/bin/bash -ex + export PS4='+\${BASH_SOURCE##*/}:\${LINENO}:' + + sum_file=${cache_path}/${item}.sum + stamp_file=${cache_path}/${item}.stamp + tmp_file=${cache_path}/${item}.tmp + + mkdir -p ${cache_path}/${item} + + if [[ -f ${item} ]]; then + cp -f ${item} \${tmp_file} + dest=${cache_path}/${item} + elif [[ -d ${item} ]]; then + sudo tar -cf \${tmp_file} ${item} + dest=${cache_path}/${item}.tar + else + echo "ERROR: Bad item: '${item}" >&2 + exit 1 + fi + + sudo chown \$(id --user --real --name): \${tmp_file} + sum=\$(md5sum \${tmp_file} | cut -d ' ' -f 1) + + if [[ -f \${stamp_file} && -f \${sum_file} \ + && "\${sum}" == "\$(cat \${sum_file})" ]]; then + echo "cache-put: Found '${item}' in ${cache_path}." >&2 + rm -f \${tmp_file} + exit 0 + fi + + rm -f \${stamp_file} + echo "\${sum}" > \${sum_file} + mv -f \${tmp_file} \${dest} + + echo "version:1" > \${stamp_file} + echo "item:${item}" >> \${stamp_file} + echo "date:\$(date)" >> \${stamp_file} + echo "md5sum:\${sum}" >> \${stamp_file} + echo "${stamp_info}" >> \${stamp_file} + + echo "cache-put: Wrote '${item}' to ${cache_path}." >&2 + exit 0 +""") +} + +boolean get(String cache_path, String item, boolean use_flag) { + if (!use_flag) { + echo 'cache-get: use_flag false, ignoring cache.' + return false + } + echo 'cache-get: use_flag true, checking cache.' + + def result = sh(returnStatus: true, + script: """#!/bin/bash -ex + export PS4='+\${BASH_SOURCE##*/}:\${LINENO}:' + + sum_file=${cache_path}/${item}.sum + stamp_file=${cache_path}/${item}.stamp + + if [[ -f ${cache_path}/${item} ]]; then + src=${cache_path}/${item} + elif [[ -f ${cache_path}/${item}.tar ]]; then + have_tar=1 + src=${cache_path}/${item}.tar + else + echo "cache-get: '${item}' not found in ${cache_path}." >&2 + exit 1 + fi + + if [[ ! -f \${stamp_file} ]]; then + echo "cache-get: '\${stamp_file}' not found in ${cache_path}." >&2 + exit 1 + fi + + if [[ ! -f \${sum_file} ]]; then + echo "cache-get: '\${sum_file}' not found in ${cache_path}." >&2 + exit 1 + fi + + if [[ ! -f \${src} ]]; then + echo "cache-get: '\${src}' not found in ${cache_path}." >&2 + exit 1 + fi + + echo "cache-get: Using '${item}' from ${cache_path}." >&2 + + cat "\${stamp_file}" + + rm -rf ${item}.old + + if [[ -e ${item} ]]; then + mv ${item} ${item}.old + fi + + if [[ \${have_tar} ]]; then + sudo tar -xf \${src} + else + cp -af \${src} ${item} + fi + exit 0 +""") + + echo "cache-get result: @${result}@" + + /* Return true if found. */ + return result ? false : true +} diff --git a/jenkins/vars/kernelTrigger.groovy b/jenkins/vars/kernelTrigger.groovy new file mode 100644 index 00000000..e305003d --- /dev/null +++ b/jenkins/vars/kernelTrigger.groovy @@ -0,0 +1,112 @@ +#!groovy + +/* + * kernelTrigger - Polls git repo for changes, runs kernel-test job. + * + * String cron_spec: Default = '@hourly', See + * https://jenkins.io/doc/book/pipeline/syntax/#cron-syntax. + * String git_branch: Default = 'master'. + * String git_url: Required. + */ + +def call(body) { + def args = [:] + body.resolveStrategy = Closure.DELEGATE_FIRST + body.delegate = args + body() + + args.cron_spec = args.cron_spec ?: '@hourly' + args.git_branch = args.git_branch ?: 'master' + + print "kernelTrigger: args = ${args}" + + pipeline { + parameters { + booleanParam(name: 'FORCE_BUILD', + defaultValue: false, + description: 'Force build and test of kernel.') + string(name: 'PIPELINE_BRANCH', + defaultValue: 'master', + description: 'Branch to use for fetching the pipeline jobs') + } + + options { + buildDiscarder(logRotator(daysToKeepStr: '2', numToKeepStr: '12')) + } + + triggers { + cron(args.cron_spec) + } + + agent { label "master" } + + stages { + stage('poll') { + steps { + echo "${STAGE_NAME}: start" + + copyArtifacts( + projectName: "${JOB_NAME}", + selector: lastCompleted(), + fingerprintArtifacts: true, + optional: true, + ) + + sh("""#!/bin/bash -ex +export PS4='+\${BASH_SOURCE##*/}:\${LINENO}:' + +if [[ -f linux.ref ]]; then + last="\$(cat linux.ref)" +fi + +current=\$(git ls-remote ${args.git_url} ${args.git_branch}) + +set +x +echo '------' +echo "last = @\${last}@" +echo "current = @\${current}@" +echo '------' +set -x + +if [[ "${params.FORCE_BUILD}" == 'true' \ + || -z "\${last}" || "\${current}" != "\${last}" ]]; then + echo "${STAGE_NAME}: Need build." + echo "\${current}" > linux.ref + echo "yes" > need-build +else + echo "${STAGE_NAME}: No change." + echo "no" > need-build +fi +""") + } + post { /* poll */ + success { + archiveArtifacts( + artifacts: "linux.ref", + fingerprint: true + ) + } + } + } + + stage ('downstream') { + when { + expression { return readFile('need-build').contains('yes') } + } + steps { + echo "${STAGE_NAME}: start" + + build( + job: 'kernel-test', + parameters: [ + string(name: 'KERNEL_GIT_BRANCH', value: args.git_branch), + string(name: 'KERNEL_GIT_URL', value: args.git_url), + booleanParam(name: 'USE_KERNEL_CACHE', value: false), + string(name: 'PIPELINE_BRANCH', value: params.PIPELINE_BRANCH) + ], + ) + } + } + } + } +} diff --git a/jenkins/vars/newFileCache.groovy b/jenkins/vars/newFileCache.groovy new file mode 100644 index 00000000..7bb3e7ef --- /dev/null +++ b/jenkins/vars/newFileCache.groovy @@ -0,0 +1,179 @@ +#!groovy +// File caching routines. + +void info() { + echo 'version:2' +} + +Map stage(String fileStagingDir, String fileCacheDir, String inPath) { + String stage = "${fileStagingDir}/${inPath}" + String cache = "${fileCacheDir}/${inPath}" + + String stageData = "${stage}/data" + String stageStamp = "${stage}/stamp" + String stageSum = "${stage}/sum" + String stageType = "${stage}/type" + String cacheData = "${cache}/data" + String cacheStamp = "${cache}/stamp" + String cacheSum = "${cache}/sum" + String cacheType = "${cache}/type" + + //echo 'fileCache.stage: stage: ' + stage + //echo 'fileCache.stage: cache: ' + cache + //echo 'fileCache.stage: inPath: ' + inPath + + def result = sh(returnStatus: true, + script: """#!/bin/bash -ex +export PS4='+fileCache.stage (script):\${LINENO}: ' + +if [[ -e ${stage} ]]; then + echo "fileCache.stage: ERROR: Stage exists: '${stage}'" >&2 + exit 2 +fi + +if [[ -f ${inPath} ]]; then + mkdir -p ${stage} + cp -f ${inPath} ${stageData} + echo "file" > ${stageType} +elif [[ -d ${inPath} ]]; then + mkdir -p ${stage} + sudo tar -C ${inPath} -cf ${stageData} . + echo "tar" > ${stageType} +else + echo "fileCache.stage: ERROR: Bad inPath: '${inPath}'" >&2 + exit 2 +fi + +sudo chown \$(id --user --real --name): ${stageData} +sum=\$(md5sum ${stageData} | cut -d ' ' -f 1) + +if [[ -f ${cacheStamp} && -f ${cacheSum} \ + && "\${sum}" == "\$(cat ${cacheSum})" ]]; then + rm -rf ${stage} + echo "fileCache.stage: Found '${inPath}' in '${fileCacheDir}'." >&2 + exit 1 +fi + +echo "\${sum}" > ${stageSum} + +echo "version:2" > ${stageStamp} +echo "item:${inPath}" >> ${stageStamp} +echo "date:\$(date)" >> ${stageStamp} +echo "md5sum:\${sum}" >> ${stageStamp} + +echo "fileCache.stage: Wrote '${inPath}' to '${stage}'." >&2 +exit 0 + +""") + + echo "fileCache.stage result: @${result}@" + + return result ? [:] : [stage_dir:"${stage}", cache_dir:"${cache}"] +} + +void commit(Map token, String stampInfo) { + String stage = token.stage_dir + String cache = token.cache_dir + + String stageStamp = "${stage}/stamp" + + //echo 'fileCache.stage: stage: ' + stage + //echo 'fileCache.stage: cache: ' + cache + //echo 'fileCache.commit: stampInfo: ' + stampInfo + + sh("""#!/bin/bash -ex +export PS4='+fileCache.commit (script):\${LINENO}: ' +echo "${stampInfo}" >> ${stageStamp} +mkdir -p ${cache} +rsync -av --delete ${stage}/ ${cache}/ +rm -rf ${stage} +""") +} + +void put(String fileStagingDir, String fileCacheDir, String inPath, + String stampInfo) { + Map token = stage(fileStagingDir, fileCacheDir, inPath) + if (! token.isEmpty()) { + commit(token, stampInfo) + } +} + +boolean get(String fileCacheDir, String inPath, String outPath = '') { + String cache = "${fileCacheDir}/${inPath}" + + String cacheData = "${cache}/data" + String cacheStamp = "${cache}/stamp" + String cacheSum = "${cache}/sum" + String cacheType = "${cache}/type" + + if (outPath.isEmpty()) { + outPath = inPath + } + + echo 'fileCache.get: inPath: ' + inPath + echo 'fileCache.get: outPath: ' + outPath + echo 'fileCache.get: cache: ' + cache + + def result = sh(returnStatus: true, + script: """#!/bin/bash -ex +export PS4='+fileCache.get (script):\${LINENO}: ' + +if [[ ! -d ${cache} ]]; then + echo "fileCache.get: '${cache}' not found." >&2 + exit 1 +fi + +if [[ ! -f ${cacheData} ]]; then + echo "fileCache.get: ERROR: '${cacheData}' not found in ${cache}." >&2 + exit 2 +fi + +if [[ ! -f ${cacheStamp} ]]; then + echo "fileCache.get: ERROR: '${cacheStamp}' not found in ${cache}." >&2 + exit 2 +fi + +if [[ ! -f ${cacheSum} ]]; then + echo "fileCache.get: ERROR: '${cacheSum}' not found in ${cache}." >&2 + exit 2 +fi + +if [[ ! -f ${cacheType} ]]; then + echo "fileCache.get: ERROR: '${cacheType}' not found in ${cache}." >&2 + exit 2 +fi + +echo "fileCache.get: Using '${inPath}' from ${cache}." >&2 + +cat "${cacheStamp}" + +if [[ -e ${outPath} ]]; then + mv -f ${outPath} ${outPath}.old +fi + +data_type="\$(cat ${cacheType})" + +case "\${data_type}" in +file) + tmp="${outPath}" + mkdir -p \${tmp%/*} + cp -af ${cacheData} ${outPath} + ;; +tar) + mkdir -p ${outPath} + sudo tar -C ${outPath} -xf ${cacheData} + ;; +*) + echo "fileCache.commit: ERROR: Bad data type: '\${data_type}" >&2 + exit 2 + ;; +esac + +exit 0 +""") + + echo "fileCache.get result: @${result}@" + + /* Return true if found. */ + return result ? false : true +} diff --git a/scripts/build-grub.sh b/scripts/build-grub.sh new file mode 100755 index 00000000..b2e476fe --- /dev/null +++ b/scripts/build-grub.sh @@ -0,0 +1,313 @@ +#!/usr/bin/env bash + +usage () { + local old_xtrace + old_xtrace="$(shopt -po xtrace || :)" + set +o xtrace + echo "${script_name} - Build grub bootloader." >&2 + echo "Usage: ${script_name} [flags]" >&2 + echo "Option flags:" >&2 + echo " -h --help - Show this help and exit." >&2 + echo " --src-dir - Top of sources. Default: '${src_dir}'." >&2 + echo " --grub-src - Grub source directory. Default: '${grub_src}'." >&2 + echo " --gnulib-src - Gnulib source directory. Default: '${gnulib_src}'." >&2 + echo " --dest-dir - Make DESTDIR. Default: '${dest_dir}'." >&2 + echo " --grub-config - Path to grub config file. Default: '${grub_config}'." >&2 + echo " --mok-key - Path to signing key (PEM format). Default: '${mok_key}'." >&2 + echo " --mok-cert - Path to signing certificate (PEM format). Default: '${mok_cert}'." >&2 + echo "Option steps:" >&2 + echo " -1 --git-clone - Clone git repos." >&2 + echo " -2 --configure - Run configure." >&2 + echo " -3 --build - Build grub." >&2 + echo " -4 --mk-image - Build grub image." >&2 + echo " -5 --sign-image - Sign grub image." >&2 + eval "${old_xtrace}" +} + +process_opts() { + local short_opts="h12345" + local long_opts="help,\ +src-dir:,grub-src:,gnulib-src:,dest-dir:,grub-config:,mok-key:,mok-cert:,\ +git-clone,configure,build,mk-image,sign-image" + + local opts + opts=$(getopt --options ${short_opts} --long ${long_opts} -n "${script_name}" -- "$@") + + eval set -- "${opts}" + + if [[ ${1} == '--' ]]; then + echo "${script_name}: ERROR: Must specify an option step." >&2 + usage + exit 1 + fi + + while true ; do + #echo "${FUNCNAME[0]}: @${1}@ @${2}@" + case "${1}" in + -h | --help) + usage=1 + shift + ;; + --src-dir) + src_dir="${2}" + shift 2 + ;; + --grub-src) + grub_src="${2}" + shift 2 + ;; + --gnulib-src) + gnulib_src="${2}" + shift 2 + ;; + --dest-dir) + dest_dir="${2}" + shift 2 + ;; + --grub-config) + grub_config="${2}" + shift 2 + ;; + --mok-key) + mok_key="${2}" + shift 2 + ;; + --mok-cert) + mok_cert="${2}" + shift 2 + ;; + -1 | --git-clone) + step_git_clone=1 + shift + ;; + -2 | --configure) + step_configure=1 + shift + ;; + -3 | --build) + step_build=1 + shift + ;; + -4 | --mk-image) + step_mk_image=1 + shift + ;; + -5 | --sign-image) + step_sign_image=1 + shift + ;; + --) + shift + break + ;; + *) + echo "${script_name}: ERROR: Internal opts: '${@}'" >&2 + exit 1 + ;; + esac + done +} + +on_exit() { + local result=${1} + + local end_time=${SECONDS} + set +x + echo "${script_name}: Done: ${result}: ${end_time} sec ($(sec_to_min ${end_time}) min)" >&2 +} + +test_for_src() { + if [[ ! -d "${grub_src}/grub-core" ]]; then + echo -e "${script_name}: ERROR: Bad grub_src: '${grub_src}'" >&2 + echo -e "${script_name}: ERROR: Must set grub_src to root of grub sources." >&2 + usage + exit 1 + fi + + if [[ ! -f "${gnulib_src}/gnulib-tool" ]]; then + echo -e "${script_name}: ERROR: Bad gnulib_src: '${gnulib_src}'" >&2 + echo -e "${script_name}: ERROR: Must set gnulib_src to root of gnulib sources." >&2 + usage + exit 1 + fi +} + +git_clone() { + git_checkout_safe ${gnulib_src} ${gnulib_repo} ${gnulib_branch} + git_checkout_safe ${grub_src} ${grub_repo} ${grub_branch} +} + +configure() { + local host=${1} + + test_for_src + + pushd "${grub_src}" + ./bootstrap --gnulib-srcdir=${gnulib_src} + ./configure + make -j ${cpus} distclean + popd + + make -j ${cpus} maintainer-clean || : + ${grub_src}/configure --host=${host} --enable-mm-debug \ + --prefix=${install_prefix} +} + +build_grub() { + test_for_src + + make clean + #make -j ${cpus} CFLAGS='-DMM_DEBUG=1' + make -j ${cpus} + make -j ${cpus} DESTDIR=${dest_dir} install +} + +mk_image() { + local image=${1} + local format=${2} + local config=${3} + + if ! test -x "$(command -v ${mkstandalone})"; then + echo "${script_name}: ERROR: Please install '${mkstandalone}'." >&2 + exit 1 + fi + + rm -f ${image} + + echo "configfile ${config}" > grub.cfg + + ${mkstandalone} \ + --directory="./grub-core" \ + --output=${image} \ + --format=${format} \ + --modules="part_gpt part_msdos ls help echo minicmd" \ + --locales="" \ + --verbose \ + /boot/grub/grub.cfg=./grub.cfg + + file ${image} + ls -lh ${image} +} + +sign_image() { + local image=${1} + local key=${2} + local cert=${3} + local out_file=${4} + + if ! test -x "$(command -v ${sbsign})"; then + echo "${script_name}: ERROR: Please install '${sbsign}'." >&2 + exit 1 + fi + + if ! test -x "$(command -v ${sbverify})"; then + echo "${script_name}: ERROR: Please install '${sbverify}'." >&2 + exit 1 + fi + + rm -f ${out_file} + ${sbsign} --key ${key} --cert ${cert} --output ${out_file} ${image} + + file ${out_file} + ls -lh ${out_file} + ${sbverify} --list ${out_file} +} + +#=============================================================================== +# program start +#=============================================================================== +export PS4='\[\e[0;33m\]+ ${BASH_SOURCE##*/}:${LINENO}:(${FUNCNAME[0]:-"?"}):\[\e[0m\] ' +set -ex + +script_name="${0##*/}" +trap "on_exit 'failed.'" EXIT + +SCRIPTS_TOP=${SCRIPTS_TOP:-"$(cd "${BASH_SOURCE%/*}" && pwd)"} +source ${SCRIPTS_TOP}/lib/util.sh + +process_opts "${@}" + +src_dir=${src_dir:-"$(pwd)/src"} +grub_src=${grub_src:-"${src_dir}/grub"} + +if [[ -d "${grub_src}/../gnulib" ]]; then + gnulib_src=${gnulib_src:-"$( cd "${grub_src}/../gnulib" && pwd )"} +else + gnulib_src=${gnulib_src:-"${src_dir}/gnulib"} +fi + +grub_config=${grub_config:-"(hd11,gpt2)/grub/grub.cfg"} + +gnulib_repo=${gnulib_repo:-'git://git.sv.gnu.org/gnulib'} +gnulib_branch=${gnulib_branch:-'master'} + +#grub_repo=${grub_repo:='git://git.savannah.gnu.org/grub.git'} +grub_repo=${grub_repo:='https://github.com/glevand/grub.git'} +grub_branch=${grub_branch:='master'} + +if [[ ${usage} ]]; then + usage + trap - EXIT + exit 0 +fi + +host_arch="$(uname -m)" +target_arch="arm64" + +case ${target_arch} in +arm64) + target_triple="aarch64-linux-gnu" + image_file=${image_file:-"$(pwd)/grubaa64.efi"} + image_format="arm64-efi" + ;; +*) + echo "${script_name}: ERROR: Unsupported target arch '${target_arch}'." >&2 + exit 1 + ;; +esac + +if [[ ${host_arch} != ${target_arch} ]]; then + mkstandalone=${mkstandalone:-"grub-mkstandalone"} + dest_dir=${dest_dir:-"$(pwd)/target-out"} +else + mkstandalone=${mkstandalone:-"./grub-mkstandalone"} + dest_dir=${dest_dir:-''} +fi + +sbsign=${sbsign:-"sbsign"} +sbverify=${sbverify:-"sbverify"} + +install_prefix=${install_prefix:-"$(pwd)/install"} + +cpus=$(cpu_count) +SECONDS=0 + +while true; do + if [[ ${step_git_clone} ]]; then + git_clone + unset step_git_clone + elif [[ ${step_configure} ]]; then + configure ${target_triple} + unset step_configure + elif [[ ${step_build} ]]; then + build_grub + unset step_build + elif [[ ${step_mk_image} ]]; then + mk_image ${image_file} ${image_format} ${grub_config} + unset step_mk_image + elif [[ ${step_sign_image} ]]; then + check_file ${image_file} + check_opt 'mok-key' ${mok_key} + check_file ${mok_key} + check_opt 'mok-cert' ${mok_cert} + check_file ${mok_cert} + + sign_image ${image_file} ${mok_key} ${mok_cert} \ + "${image_file%.efi}-signed.efi" + unset step_sign_image + else + break + fi +done + +trap "on_exit 'Success.'" EXIT diff --git a/scripts/build-linux-kernel.sh b/scripts/build-linux-kernel.sh new file mode 100755 index 00000000..f102dc34 --- /dev/null +++ b/scripts/build-linux-kernel.sh @@ -0,0 +1,418 @@ +#!/usr/bin/env bash + +usage() { + local target_list + target_list="$(clean_ws "${targets}")" + local op_list + op_list="$(clean_ws "${ops}")" + + local old_xtrace + old_xtrace="$(shopt -po xtrace || :)" + set +o xtrace + echo "${script_name} - Builds linux kernel." >&2 + echo "Usage: ${script_name} [flags] " >&2 + echo "Option flags:" >&2 + echo " -h --help - Show this help and exit." >&2 + echo " -v --verbose - Verbose execution." >&2 + echo " -b --build-dir - Build directory. Default: '${build_dir}'." >&2 + echo " -i --install-dir - Target install directory. Default: '${install_dir}'." >&2 + echo " -l --local-version - Default: '${local_version}'." >&2 + echo " -p --toolchain-prefix - Default: '${toolchain_prefix}'." >&2 + echo "Args:" >&2 + echo " - Build target {${target_list}}." >&2 + echo " Default target: '${target}'." >&2 + echo " - Kernel source directory." >&2 + echo " Default kernel-src: '${kernel_src}'." >&2 + echo " - Build operation {${op_list}}." >&2 + echo " Default op: '${op}'." >&2 + echo "Info:" >&2 + echo " ${cpus} CPUs available." >&2 + + eval "${old_xtrace}" +} + +process_opts() { + local short_opts="hvb:i:l:p:" + local long_opts="help,verbose,\ +build-dir:,install-dir:,local-version:,toolchain-prefix:" + + local opts + opts=$(getopt --options ${short_opts} --long ${long_opts} -n "${script_name}" -- "$@") + + eval set -- "${opts}" + + while true ; do + #echo "${FUNCNAME[0]}: @${1}@ @${2}@" + case "${1}" in + -h | --help) + usage=1 + shift + ;; + -v | --verbose) + verbose=1 + set -x + shift + ;; + -b | --build-dir) + build_dir="${2}" + shift 2 + ;; + -l | --local-version) + local_version="${2}" + shift 2 + ;; + -t | --install-dir) + install_dir="${2}" + shift 2 + ;; + -p | --toolchain-prefix) + toolchain_prefix="${2}" + shift 2 + ;; + --) + target=${2} + kernel_src=${3} + op=${4} + if [[ ${check} ]]; then + break + fi + if ! shift 4; then + echo "${script_name}: ERROR: Missing args:" >&2 + echo "${script_name}: ='${target}'" >&2 + echo "${script_name}: ='${kernel_src}'" >&2 + echo "${script_name}: ='${op}'" >&2 + usage + exit 1 + fi + if [[ -n "${1}" ]]; then + echo "${script_name}: ERROR: Got extra args: '${*}'" >&2 + usage + exit 1 + fi + break + ;; + *) + echo "${script_name}: ERROR: Internal opts: '${*}'" >&2 + exit 1 + ;; + esac + done +} + +on_exit() { + local result=${?} + local end_time + end_time="$(date)" + local sec="${SECONDS}" + + if [ -d "${tmp_dir}" ]; then + rm -rf "${tmp_dir}" + fi + + set +x + echo "" >&2 + echo "${script_name}: Done: result=${result}" >&2 + echo "${script_name}: target: ${target}" >&2 + echo "${script_name}: op: ${op}" >&2 + echo "${script_name}: kernel_src: ${kernel_src}" >&2 + echo "${script_name}: build_dir: ${build_dir}" >&2 + echo "${script_name}: install_dir: ${install_dir}" >&2 + echo "${script_name}: local_version: ${local_version}" >&2 + echo "${script_name}: make_options: ${make_options}" >&2 + echo "${script_name}: start_time: ${start_time}" >&2 + echo "${script_name}: end_time: ${end_time}" >&2 + echo "${script_name}: duration: ${sec} sec ($(sec_to_min ${sec} min) min)" >&2 + exit ${result} +} + +run_make_fresh() { + cp "${build_dir}/.config" "${tmp_dir}/config.tmp" + rm -rf "${build_dir:?}"/{*,.*} &>/dev/null || : + eval "${make_cmd} ${make_options} mrproper" + eval "${make_cmd} ${make_options} defconfig" + cp "${tmp_dir}/config.tmp" "${build_dir}/.config" + eval "${make_cmd} ${make_options} olddefconfig" +} + +run_make_targets() { + eval "${make_cmd} ${make_options} savedefconfig" + eval "${make_cmd} ${make_options} ${target_make_targets}" +} + +run_install_image() { + mkdir -p "${install_dir}/boot" + cp "${build_dir}"/{defconfig,System.map,vmlinux} "${install_dir}/boot/" + cp "${build_dir}/.config" "${install_dir}/boot/config" + "${toolchain_prefix}strip" -s -R .comment "${build_dir}/vmlinux" -o "${install_dir}/boot/vmlinux.strip" + + if [[ -z "${target_copy}" ]]; then + eval "${make_cmd} ${make_options} install" + else + for ((i = 0; i <= ${#target_copy[@]} - 1; i += 2)); do + cp --no-dereference "${build_dir}/${target_copy[i]}" "${install_dir}/${target_copy[i+1]}" + done + fi + + if [[ -n "${target_copy_extra}" ]]; then + for ((i = 0; i <= ${#target_copy_extra[@]} - 1; i += 2)); do + if [[ -f "${target_copy_extra[i]}" ]]; then + cp --no-dereference "${build_dir}/${target_copy_extra[i]}" "${install_dir}/${target_copy_extra[i+1]}" + fi + done + fi +} + +run_install_modules() { + mkdir -p "${install_dir}/lib/modules" + eval "${make_cmd} ${make_options} modules_install" +} + +default_toolchain_prefix() { + local target="${1}" + + case "${target}" in + amd64) + echo "x86_64-linux-gnu-" + ;; + arm64|arm64_be) + echo "aarch64-linux-gnu-" + ;; + ppc32|ppc64) + echo "powerpc-linux-gnu-" + ;; + ppc64le) + echo "powerpc64le-linux-gnu-" + ;; + ps3) + echo "powerpc-linux-gnu-" + ;; + *) + echo "" + ;; + esac +} + +set_target_variables() { + local target="${1}" + +# target_make_options: +# target_defconfig: +# target_copy: (src dest) +# target_copy_extra: (src dest) +# target_make_targets + + case "${target}" in + amd64) + target_make_options="ARCH=x86_64 CROSS_COMPILE='${ccache}${toolchain_prefix}'" + target_defconfig="${target_defconfig:-${target}_defconfig}" + target_copy=( + vmlinux boot/ + ) + ;; + arm64|arm64_be) + target_make_options="ARCH=arm64 CROSS_COMPILE='${ccache}${toolchain_prefix}'" + target_defconfig="${target_defconfig:-defconfig}" + target_copy=( + vmlinux boot/ + arch/arm64/boot/Image boot/ + ) + ;; + native) + target_make_options="CROSS_COMPILE='${ccache}'" + target_defconfig="${target_defconfig:-defconfig}" + target_make_targets="all" + ;; + ppc32|ppc64) + target_make_options="ARCH=powerpc CROSS_COMPILE='${ccache}${toolchain_prefix}'" + target_defconfig="${target_defconfig:-${target}_defconfig}" + target_copy=( + vmlinux boot/ + ) + ;; + ppc64le) + target_make_options="ARCH=powerpc CROSS_COMPILE='${ccache}${toolchain_prefix}'" + target_defconfig="${target_defconfig:-defconfig}" + target_copy=( + vmlinux boot/ + ) + ;; + ps3) + target_make_options="ARCH=powerpc CROSS_COMPILE='""${ccache}${toolchain_prefix}""'" + target_defconfig="${target_defconfig:-${target}_defconfig}" + target_copy=( + vmlinux boot/ + arch/powerpc/boot/dtbImage.ps3.bin boot/linux + ) + target_copy_extra=( + arch/powerpc/boot/otheros.bld boot/ + ) + ;; + *) + echo "${script_name}: ERROR: Unknown target: '${target}'" >&2 + usage + exit 1 + ;; + esac +} +#=============================================================================== +# program start +#=============================================================================== +export PS4='\[\e[0;33m\]+ ${BASH_SOURCE##*/}:${LINENO}:(${FUNCNAME[0]:-"?"}):\[\e[0m\] ' +set -e + +script_name="${0##*/}" + +SCRIPTS_TOP=${SCRIPTS_TOP:-"$(cd "${BASH_SOURCE%/*}" && pwd)"} +source "${SCRIPTS_TOP}/lib/util.sh" + +trap "on_exit 'failed.'" EXIT + +targets=" + amd64 + arm64 + arm64_be + native + ppc32 + ppc64 + ppc64le + ps3 + x86_64 +" +ops=" + all: fresh targets install_image install_modules + build: targets + defconfig + fresh + headers: mrproper defconfig prepare + image_install + install: install_image install_modules + modules_install + rebuild: clean targets + savedefconfig + targets + gconfig + menuconfig + oldconfig + olddefconfig + xconfig +" + +cpus="$(cpu_count)" + +make_cmd="${make_cmd:-env PS4='+ \${0##*/}: ' make}" + +process_opts "${@}" + +if [[ ${build_dir} ]]; then + build_dir="$(realpath "${build_dir}")" +else + build_dir="$(pwd)/${target}-kernel-build" +fi + +if [[ ${install_dir} ]]; then + mkdir -p "${install_dir}" + install_dir="$(realpath "${install_dir}")" +else + install_dir="${build_dir%-*}-install" + mkdir -p "${install_dir}" +fi + +if [[ ! ${local_version} ]]; then + local_version="${kernel_src##*/}" +fi + +toolchain_prefix="${toolchain_prefix:-$(default_toolchain_prefix "${target}")}" + +if [[ ${usage} ]]; then + usage + trap - EXIT + exit 0 +fi + +check_directory "${kernel_src}" "" "usage" + +if test -x "$(command -v ccache)"; then + ccache='ccache ' +else + echo "${script_name}: INFO: Please install ccache" >&2 +fi + +declare -a target_copy +declare -a target_copy_extra + +set_target_variables "${target}" + +declare -A target_args + +if [[ ${verbose} ]]; then + make_options_extra="V=1" +fi + +make_options_user="${make_options_user:-}" + +make_options="-j${cpus} ${target_make_options} INSTALL_MOD_PATH='${install_dir}' INSTALL_PATH='${install_dir}/boot' INSTALLKERNEL=non-existent-file O='${build_dir}' ${make_options_extra} ${make_options_user}" + +start_time="$(date)" +SECONDS=0 + +export CCACHE_DIR=${CCACHE_DIR:-"${build_dir}.ccache"} + +mkdir -p "${build_dir}" +mkdir -p "${CCACHE_DIR}" + +cd "${kernel_src}" + +tmp_dir="$(mktemp --tmpdir --directory "${script_name}.XXXX")" + +case "${op}" in +all) + run_make_fresh + run_make_targets + run_install_image + run_install_modules + ;; +defconfig) + if [[ -n ${target_defconfig} ]]; then + eval "make ${make_options} ${target_defconfig}" + else + eval "make ${make_options} defconfig" + fi + eval "make ${make_options} savedefconfig" + ;; +fresh) + run_make_fresh + ;; +headers) + eval "${make_cmd} ${make_options} mrproper" + eval "${make_cmd} ${make_options} defconfig" + eval "${make_cmd} ${make_options} prepare" + ;; +image_install) + run_install_image + ;; +install) + run_install_image + run_install_modules + ;; +modules_install) + run_install_modules + ;; +rebuild) + eval "${make_cmd} ${make_options} clean" + run_make_targets + ;; +savedefconfig) + eval "${make_cmd} ${make_options} savedefconfig" + ;; +build|targets) + run_make_targets + ;; +gconfig | menuconfig | oldconfig | olddefconfig | xconfig) + eval "${make_cmd} ${make_options} ${op}" + eval "${make_cmd} ${make_options} savedefconfig" + ;; +*) + echo "${script_name}: INFO: Unknown op: '${op}'" >&2 + eval "${make_cmd} ${make_options} ${op}" + ;; +esac diff --git a/scripts/build-rootfs.sh b/scripts/build-rootfs.sh new file mode 100755 index 00000000..45b7b1c9 --- /dev/null +++ b/scripts/build-rootfs.sh @@ -0,0 +1,504 @@ +#!/usr/bin/env bash + +usage() { + local old_xtrace + old_xtrace="$(shopt -po xtrace || :)" + set +o xtrace + echo "${script_name} - Builds a minimal Linux disk image." >&2 + echo "Usage: ${script_name} [flags]" >&2 + echo "Option flags:" >&2 + echo " -a --arch - Target architecture. Default: '${target_arch}'." >&2 + echo " -c --clean-rootfs - Delete bootstrap and rootfs directories. Default: ${clean_rootfs}" >&2 + echo " -h --help - Show this help and exit." >&2 + echo " -i --output-disk-image - Output a binary disk image file '${disk_img}'." >&2 + echo " -t --rootfs-type - Rootfs type {$(clean_ws ${known_rootfs_types})}." >&2 + echo " Default: '${rootfs_type}'." >&2 + echo " -v --verbose - Verbose execution." >&2 + echo " --bootstrap-dir - Bootstrap directory. Default: '${bootstrap_dir}'." >&2 + echo " --image-dir - Image output path. Default: '${image_dir}', ${rootfs_dir}', '${initrd}', '${disk_img}'." >&2 + echo "Option steps:" >&2 + echo " -1 --bootstrap - Run bootstrap rootfs step. Default: '${step_bootstrap}'." >&2 + echo " -2 --rootfs-setup - Run rootfs setup step. Default: '${step_rootfs_setup}'." >&2 + echo " --kernel-modules - Kernel modules to install. Default: '${kernel_modules}'." >&2 + echo " --extra-packages - Extra distro packages. Default: '${extra_packages}'." >&2 + echo " -3 --make-image - Run make image step. Default: '${step_make_image}'." >&2 + eval "${old_xtrace}" +} + +process_opts() { + local short_opts="a:chit:v123" + local long_opts="arch:,clean-rootfs,help,output-disk-image,rootfs-type:,verbose,\ +bootstrap-dir:,image-dir:,\ +bootstrap,rootfs-setup,kernel-modules:,extra-packages:,make-image" + + local opts + opts=$(getopt --options "${short_opts}" --long "${long_opts}" -n "${script_name}" -- "${@}") + + eval set -- "${opts}" + + while true ; do + #echo "${FUNCNAME[0]}: @${1}@${2}@" + case "${1}" in + -a | --arch) + target_arch=$(get_arch "${2}") + shift 2 + ;; + -c | --clean-rootfs) + clean_rootfs=1 + shift + ;; + -h | --help) + usage=1 + shift + ;; + -i | --output-disk-image) + output_disk_image=1 + shift + ;; + -m | --kernel-modules) + kernel_modules="${2}" + shift 2 + ;; + -p | --extra-packages) + extra_packages="${2}" + shift 2 + ;; + -t | --rootfs-type) + rootfs_type="${2}" + shift 2 + ;; + -v | --verbose) + set -x + verbose=1 + shift + ;; + --bootstrap-dir) + bootstrap_dir="${2}" + shift 2 + ;; + --image-dir) + image_dir="${2}" + shift 2 + ;; + -1 | --bootstrap) + step_bootstrap=1 + shift + ;; + -2 | --rootfs-setup) + step_rootfs_setup=1 + shift + ;; + -3 | --make-image) + step_make_image=1 + shift + ;; + --) + shift + break + ;; + *) + echo "${script_name}: ERROR: Internal opts: '${@}'" >&2 + exit 1 + ;; + esac + done +} + +on_exit() { + if [ -d ${tmp_dir} ]; then + ${sudo} rm -rf ${tmp_dir} + fi +} + +on_fail() { + local chroot=${1} + local mnt=${2} + + echo "${script_name}: Step ${current_step}: FAILED." >&2 + + cleanup_chroot ${chroot} + + ${sudo} chown -R $(id --user --real --name): ${chroot} + + if [ -d "${mnt}" ]; then + clean_make_disk_img "${mnt}" + rm -rf "${mnt}" + fi + + if [ -d ${tmp_dir} ]; then + ${sudo} rm -rf ${tmp_dir} + fi + + if [ ${need_clean_rootfs} ]; then + ${sudo} rm -rf ${chroot} + fi + + on_exit +} + +check_kernel_modules() { + local dir=${1} + + if [ ${dir} ]; then + if [ ! -d "${dir}" ]; then + echo "${script_name}: ERROR: directory not found: '${dir}'" >&2 + usage + exit 1 + fi + if [ "$(basename $(cd ${dir}/.. && pwd))" != "modules" ]; then + echo "${script_name}: ERROR: No kernel modules found in '${dir}'" >&2 + usage + exit 1 + fi + fi +} + +test_step_code() { + local step_code="${step_bootstrap}-${step_rootfs_setup}-${step_make_image}" + + case "${step_code}" in + 1--|1-1-|1-1-1|-1-|-1-1|--1) + #echo "${script_name}: Steps OK" >&2 + ;; + --) + step_bootstrap=1 + step_rootfs_setup=1 + step_make_image=1 + ;; + 1--1) + echo "${script_name}: ERROR: Bad flags: 'bootstrap + make_image'." >&2 + usage + exit 1 + ;; + *) + echo "${script_name}: ERROR: Internal bad step_code: '${step_code}'." >&2 + exit 1 + ;; + esac +} + +setup_network_ifupdown() { + local rootfs=${1} + + echo "${TARGET_HOSTNAME}" | sudo_write "${rootfs}/etc/hostname" + + sudo_append "${rootfs}/etc/network/interfaces" <&2 + return + fi + + local dest="${rootfs}/lib/modules/${src##*/}" + + if [ ${verbose} ]; then + local extra='-v' + fi + + ${sudo} mkdir -p ${dest} + ${sudo} rsync -av --delete ${extra} \ + --exclude '/build' --exclude '/source' \ + ${src}/ ${dest}/ + echo "${script_name}: INFO: Kernel modules size: $(directory_size_human ${dest})" +} + +setup_password() { + local rootfs=${1} + local pw=${2} + + pw=${pw:-"r"} + echo "${script_name}: INFO: Login password = '${pw}'." >&2 + + local i + local hash + for ((i = 0; ; i++)); do + hash="$(openssl passwd -1 -salt tdd${i} ${pw})" + if [ "${hash/\/}" == "${hash}" ]; then + break + fi + done + + ${sudo} sed --in-place "s/root:x:0:0/root:${hash}:0:0/" \ + ${rootfs}/etc/passwd + ${sudo} sed --in-place '/^root:.*/d' \ + ${rootfs}/etc/shadow +} + +delete_rootfs() { + local rootfs=${1} + + ${sudo} rm -rf ${rootfs} +} + +clean_make_disk_img() { + local mnt=${1} + + ${sudo} umount ${mnt} || : +} + +make_disk_img() { + local rootfs=${1} + local img=${2} + local mnt=${3} + + tmp_img="${tmp_dir}/tdd-disk.img" + + dd if=/dev/zero of=${tmp_img} bs=1M count=1536 + mkfs.ext4 ${tmp_img} + + mkdir -p ${mnt} + + ${sudo} mount ${tmp_img} ${mnt} + ${sudo} cp -a ${rootfs}/* ${mnt} + + ${sudo} umount ${mnt} || : + cp ${tmp_img} ${img} + rm -f ${tmp_img} +} + +make_ramfs() { + local fs=${1} + local out_file=${2} + + (cd ${fs} && ${sudo} find . | ${sudo} cpio --create --format='newc' --owner=root:root | gzip) > ${out_file} +} + +make_manifest() { + local rootfs=${1} + local out_file=${2} + + (cd ${rootfs} && ${sudo} find . -ls | sort --key=11) > ${out_file} +} + +print_usage_summary() { + local rootfs_dir=${1} + local kernel_modules=${2} + + rootfs_size="$(directory_size_bytes ${rootfs_dir})" + rootfs_size="$(bc <<< "${rootfs_size} / 1048576")" + + modules_size="$(directory_size_bytes ${kernel_modules})" + modules_size="$(bc <<< "${modules_size} / 1048576")" + + base_size="$(bc <<< "${rootfs_size} - ${modules_size}")" + + local old_xtrace + old_xtrace="$(shopt -po xtrace || :)" + set +o xtrace + echo "${script_name}: INFO: Base size: ${base_size} MiB" + echo "${script_name}: INFO: Modules size: ${modules_size} MiB" + echo "${script_name}: INFO: Total size: ${rootfs_size} MiB" + eval "${old_xtrace}" +} + +write_tdd_client_script() { + local out_file=${1} + local timeout=${2:-241} + + sudo cp -vf ${RELAY_TOP}/tdd-relay-client.sh "${out_file}" + sudo sed --in-place "{s/@@timeout@@/${timeout}/}" "${out_file}" + + ${sudo} chmod u+x "${out_file}" +} + +#=============================================================================== +# program start +#=============================================================================== +script_name="${0##*/}" + +SCRIPTS_TOP=${SCRIPTS_TOP:-"$( cd "${BASH_SOURCE%/*}" && pwd )"} +RELAY_TOP=${RELAY_TOP:-"$( cd "${SCRIPTS_TOP}/../relay" && pwd )"} + +source ${SCRIPTS_TOP}/lib/util.sh +source ${SCRIPTS_TOP}/lib/chroot.sh + +trap "on_exit" EXIT +set -e + +sudo="sudo -S" + +process_opts "${@}" + +TARGET_HOSTNAME=${TARGET_HOSTNAME:-"tdd-tester"} + +rootfs_type=${rootfs_type:-"debian"} +source "${SCRIPTS_TOP}/rootfs-plugin/${rootfs_type}.sh" + +host_arch=$(get_arch "$(uname -m)") +target_arch=${target_arch:-"${host_arch}"} + +image_dir=${image_dir:-"$(pwd)/${target_arch}-${rootfs_type}.image"} +bootstrap_dir=${bootstrap_dir:-"${image_dir%.image}.bootstrap"} + +image_rootfs="${image_dir}/rootfs" +disk_img="${image_dir}/disk.img" +initrd="${image_dir}/initrd" +manifest="${image_dir}/manifest" +server_key="${image_dir}/server-key" +login_key="${image_dir}/login-key" + +test_step_code + +if [ ${usage} ]; then + usage + exit 0 +fi + +${sudo} true + +cleanup_chroot ${image_rootfs} +cleanup_chroot ${bootstrap_dir} + +tmp_dir="$(mktemp --tmpdir --directory ${script_name}.XXXX)" + +if [ ${step_bootstrap} ]; then + current_step="bootstrap" + echo "${script_name}: INFO: Step ${current_step} (${rootfs_type}): start." >&2 + + sudo rm -rf ${bootstrap_dir} + mkdir -p ${bootstrap_dir} + + trap "on_fail ${bootstrap_dir} none" EXIT + bootstrap_rootfs ${bootstrap_dir} + ${sudo} chown -R $(id --user --real --name): ${bootstrap_dir} + + echo "${script_name}: INFO: Step ${current_step} (${rootfs_type}): Done (${bootstrap_dir})." >&2 + echo "${script_name}: INFO: Bootstrap size: $(directory_size_human ${bootstrap_dir})" +fi + +if [ ${step_rootfs_setup} ]; then + current_step="rootfs_setup" + echo "${script_name}: INFO: Step ${current_step} (${rootfs_type}): start." >&2 + echo "${script_name}: INFO: Step ${current_step}: Using ${bootstrap_dir}." >&2 + + check_directory "${bootstrap_dir}" + check_directory "${bootstrap_dir}/usr/bin" + + check_directory ${kernel_modules} + check_kernel_modules ${kernel_modules} + + trap "on_fail ${image_rootfs} none" EXIT + + mkdir -p ${image_rootfs} + ${sudo} rsync -a --delete ${bootstrap_dir}/ ${image_rootfs}/ + + setup_packages ${image_rootfs} $(get_default_packages) ${extra_packages} + + setup_initrd_boot ${image_rootfs} + setup_login ${image_rootfs} + setup_network ${image_rootfs} + setup_sshd ${image_rootfs} ${server_key} + setup_ssh_keys ${image_rootfs} ${login_key} + setup_kernel_modules ${image_rootfs} ${kernel_modules} + setup_relay_client ${image_rootfs} + + rootfs_cleanup ${image_rootfs} + + ${sudo} chown -R $(id --user --real --name): ${image_rootfs} + + print_usage_summary ${image_rootfs} ${kernel_modules} + echo "${script_name}: INFO: Step ${current_step} (${rootfs_type}): done." >&2 +fi + +if [ ${step_make_image} ]; then + current_step="make_image" + echo "${script_name}: INFO: Step ${current_step} (${rootfs_type}): start." >&2 + + check_directory ${image_rootfs} + + if [ ${output_disk_image} ]; then + tmp_mnt="${tmp_dir}/tdd-disk-mnt" + trap "on_fail ${image_rootfs} ${tmp_mnt}" EXIT + make_disk_img ${image_rootfs} ${disk_img} ${tmp_mnt} + trap "on_fail ${image_rootfs} none" EXIT + clean_make_disk_img "${tmp_mnt}" + fi + + make_ramfs ${image_rootfs} ${initrd} + make_manifest ${image_rootfs} ${manifest} + + if [ -d ${tmp_mnt} ]; then + rm -rf ${tmp_mnt} + fi + + need_clean_rootfs=${clean_rootfs} + + print_usage_summary ${image_rootfs} ${kernel_modules} + echo "${script_name}: INFO: Step ${current_step} (${rootfs_type}): done." >&2 + +fi + +if [ ${need_clean_rootfs} ]; then + ${sudo} rm -rf ${image_rootfs} +fi + +trap on_exit EXIT + +echo "${script_name}: INFO: Success: bootstrap='${bootstrap_dir}' image='${image_dir}'" >&2 diff --git a/scripts/build-tumbleweed-kernel.sh b/scripts/build-tumbleweed-kernel.sh new file mode 100755 index 00000000..b55bc6d6 --- /dev/null +++ b/scripts/build-tumbleweed-kernel.sh @@ -0,0 +1,256 @@ +#!/usr/bin/env bash + +usage() { + local old_xtrace + old_xtrace="$(shopt -po xtrace || :)" + set +o xtrace + echo "${script_name} - Builds OpenSUSE tumbleweed kernel." >&2 + echo "Usage: ${script_name} [flags]" >&2 + echo "Option flags:" >&2 + echo " -h --help - Show this help and exit." >&2 + echo " -v --verbose - Verbose execution." >&2 + echo " -c --config-file - Default: '${config_file}'." >&2 + echo " -p --toolchain-prefix - Default: '${toolchain_prefix}'." >&2 + echo "Option steps:" >&2 + echo " -1 --get - Get rpms." >&2 + echo " -2 --prepare - Prepare sources." >&2 + echo " -3 --build - Build kernel." >&2 + eval "${old_xtrace}" +} + +process_opts() { + local short_opts="hvc:p:123" + local long_opts="help,verbose,config-file:,toolchain-prefix:,get,prepare,build" + + local opts + opts=$(getopt --options ${short_opts} --long ${long_opts} -n "${script_name}" -- "$@") + + if [ $? != 0 ]; then + echo "${script_name}: ERROR: Internal getopt" >&2 + exit 1 + fi + + eval set -- "${opts}" + + while true ; do + case "${1}" in + -h | --help) + usage=1 + shift + ;; + -v | --verbose) + shift + set -x + verbose=1 + ;; + -c | --config-file) + config_file="${2}" + shift 2 + ;; + -p | --toolchain-prefix) + toolchain_prefix="${2}" + shift 2 + ;; + -1 | --get) + step_get=1 + shift + ;; + -2 | --prepare) + step_prepare=1 + shift + ;; + -3 | --build) + step_build=1 + shift + ;; + --) + shift + if [[ ${@} ]]; then + set +o xtrace + echo "${script_name}: ERROR: Got extra args: '${@}'" >&2 + usage + exit 1 + fi + break + ;; + *) + echo "${script_name}: ERROR: Internal opts" >&2 + exit 1 + ;; + esac + done +} + +on_exit() { + local result=${1} + + if [ -d ${tmp_dir} ]; then + rm -rf ${tmp_dir} + fi + + local end_time=${SECONDS} + set +x + echo "${script_name}: Done: ${result}: ${end_time} sec ($(sec_to_min ${end_time}) min)" >&2 +} + +get_rpms() { + #local base_url="http://download.opensuse.org/ports/aarch64/tumbleweed/repo/oss" + local base_url="http://download.opensuse.org/repositories/devel:/ARM:/Factory:/Contrib:/ILP32/standard" + + local k_bin_url="${base_url}/aarch64/${k_bin}.rpm" + local k_bin_dev_url="${base_url}/aarch64/${k_bin_dev}.rpm" + local k_src_url="${base_url}/noarch/${k_src}.rpm" + local k_dev_url="${base_url}/noarch/${k_dev}.rpm" + local k_mac_url="${base_url}/noarch/${k_mac}.rpm" + + echo "out_dir = '${out_dir}'" + + mkdir -p "${out_dir}" + + pushd "${out_dir}" + wget "${k_bin_url}" + wget "${k_src_url}" + wget "${k_dev_url}" + wget "${k_bin_dev_url}" + #wget "${k_mac_url}" + popd + + mkdir -p "${out_dir}/${k_bin}" + (cd "${out_dir}/${k_bin}" && ${SCRIPTS_TOP}/rpm2files.sh < "${out_dir}/${k_bin}.rpm") + + mkdir -p "${out_dir}/${k_src}" + (cd "${out_dir}/${k_src}" && ${SCRIPTS_TOP}/rpm2files.sh < "${out_dir}/${k_src}.rpm") + + mkdir -p "${out_dir}/${k_dev}" + (cd "${out_dir}/${k_dev}" && ${SCRIPTS_TOP}/rpm2files.sh < "${out_dir}/${k_dev}.rpm") + + #mkdir -p "${out_dir}/${k_bin_dev}" + #(cd "${out_dir}/${k_bin_dev}" && ${SCRIPTS_TOP}/rpm2files.sh < "${out_dir}/${k_bin_dev}.rpm") + + #mkdir -p "${out_dir}/${k_mac}" + #(cd "${out_dir}/${k_mac}" && ${SCRIPTS_TOP}/rpm2files.sh < "${out_dir}/${k_mac}.rpm") + + { + echo -e "${k_src_url}\n${k_dev_url}\n${k_mac_url}\n${k_bin_url}\n${k_def_url}\n" + ls -l "${out_dir}" + } > "${out_dir}/kernel-${k_ver}.manifest" + cat "${out_dir}/kernel-${k_ver}.manifest" +} + +prepare_sources() { + if [[ ! -d "${out_dir}/${k_bin}" ]]; then + echo "${script_name}: ERROR: Binary RPM directory not found: '${out_dir}/${k_bin}'" >&2 + exit 1 + fi + + if [[ ! -d "${out_dir}/${k_src}" ]]; then + echo "${script_name}: ERROR: Source RPM directory not found: '${out_dir}/${k_src}'" >&2 + exit 1 + fi + + if [[ ! -d "${out_dir}/${k_dev}" ]]; then + echo "${script_name}: ERROR: Devel RPM directory not found: '${out_dir}/${k_dev}'" >&2 + exit 1 + fi + + rm -rf "${src_dir}" + mkdir -p "${src_dir}" + + cp -a --link "${out_dir}/${k_src}/usr/src/"linux*/* "${out_dir}/${k_dev}/usr/src/"linux*/* "${src_dir}/" + cp -v "${out_dir}/${k_bin}/boot"/config-*-64kb "${suse_config}" +} + +build_kernel() { + if [[ ! -d "${src_dir}" ]]; then + echo "${script_name}: ERROR: Source directory not found: '${src_dir}'" >&2 + exit 1 + fi + + rm -rf "${build_dir}" "${install_dir}" + mkdir -p "${build_dir}" "${install_dir}" + + local log_file="${out_dir}/build.log" + + ${SCRIPTS_TOP}/build-linux-kernel.sh \ + ${verbose_build:+--verbose} \ + --build-dir="${build_dir}" \ + --install-dir="${install_dir}" \ + arm64 "${src_dir}" defconfig 2>&1 | tee --append "${log_file}" + + + cp -vf ${config_file} ${build_dir}/.config + ${SCRIPTS_TOP}/build-linux-kernel.sh \ + ${verbose_build:+--verbose} \ + --build-dir="${build_dir}" \ + --install-dir="${install_dir}" \ + arm64 "${src_dir}" olddefconfig 2>&1 | tee --append "${log_file}" + + ${SCRIPTS_TOP}/build-linux-kernel.sh \ + ${verbose_build:+--verbose} \ + --build-dir="${build_dir}" \ + --install-dir="${install_dir}" \ + arm64 "${src_dir}" all 2>&1 | tee --append "${log_file}" +} + +#=============================================================================== +# program start +#=============================================================================== +export PS4='\[\e[0;33m\]+ ${BASH_SOURCE##*/}:${LINENO}:(${FUNCNAME[0]:-"?"}):\[\e[0m\] ' + +script_name="${0##*/}" +SCRIPTS_TOP=${SCRIPTS_TOP:-"$(cd "${BASH_SOURCE%/*}" && pwd)"} +source ${SCRIPTS_TOP}/lib/util.sh + +trap "on_exit 'failed.'" EXIT +set -o pipefail +set -e + +process_opts "${@}" + +#k_ver=${k_ver:-"5.3.8-217.1"} +#k_ver=${k_ver:-"5.3.12-2.1"} +k_ver=${k_ver:-"5.4.7-227.1"} + +k_bin="kernel-64kb-${k_ver}.aarch64" +k_bin_dev="kernel-64kb-devel-${k_ver}.aarch64" + +k_src="kernel-source-${k_ver}.noarch" +k_dev="kernel-devel-${k_ver}.noarch" +k_mac="kernel-macros-${k_ver}.noarch" + +out_dir=${out_dir:-"$(cd . && pwd)/kernel-${k_ver}"} + +src_dir="${out_dir}/src" +build_dir="${out_dir}/build" +install_dir="${out_dir}/install" + +suse_config="${src_dir}/suse-config" +config_file="${config_file:-${suse_config}}" + +#verbose_build=1 + +if [[ ${usage} ]]; then + usage + trap - EXIT + exit 0 +fi + +SECONDS=0 + +if [[ ${step_get} ]]; then + trap "on_exit '[get] failed.'" EXIT + get_rpms +fi + +if [[ ${step_prepare} ]]; then + trap "on_exit '[prepare] failed.'" EXIT + prepare_sources +fi + +if [[ ${step_build} ]]; then + trap "on_exit '[build] failed.'" EXIT + build_kernel +fi + +trap "on_exit 'Success.'" EXIT +exit 0 diff --git a/scripts/checkin.sh b/scripts/checkin.sh new file mode 100755 index 00000000..e68dd85a --- /dev/null +++ b/scripts/checkin.sh @@ -0,0 +1,93 @@ +#!/usr/bin/env bash + +set -e + +script_name="${0##*/}" + +SCRIPTS_TOP=${SCRIPTS_TOP:-"$( cd "${BASH_SOURCE%/*}" && pwd )"} + +source ${SCRIPTS_TOP}/lib/util.sh +source ${SCRIPTS_TOP}/lib/checkout.sh + +usage() { + local old_xtrace + old_xtrace="$(shopt -po xtrace || :)" + set +o xtrace + echo "${script_name} - Checkin TDD resource." >&2 + echo "Usage: ${script_name} [flags] " >&2 + echo "Option flags:" >&2 + echo " -h --help - Show this help and exit." >&2 + echo " -v --verbose - Verbose execution." >&2 + echo "Args:" >&2 + echo " - Checkout reservation token" >&2 + eval "${old_xtrace}" +} + +short_opts="hv" +long_opts="help,verbose" + +opts=$(getopt --options ${short_opts} --long ${long_opts} -n "${script_name}" -- "$@") + +if [ $? != 0 ]; then + echo "${script_name}: ERROR: Internal getopt" >&2 + exit 1 +fi + +eval set -- "${opts}" + +while true ; do + case "${1}" in + -h | --help) + usage=1 + shift + ;; + -v | --verbose) + set -x + verbose=1 + shift + ;; + --) + shift + token=${1} + if ! shift 1; then + set +o xtrace + echo "${script_name}: ERROR: Missing args:" >&2 + echo "${script_name}: ='${token}'" >&2 + usage + exit 1 + fi + if [[ -n "${1}" ]]; then + set +o xtrace + echo "${script_name}: ERROR: Got extra args: '${@}'" >&2 + usage + exit 1 + fi + break + ;; + *) + set +o xtrace + echo "${script_name}: ERROR: Internal opts: '${@}'" >&2 + exit 1 + ;; + esac +done + +if [[ -n "${usage}" ]]; then + usage + exit 0 +fi + +on_err() { + set +x + set +e + + echo "${script_name}: token: ${token}" + echo "${script_name}: Done, failed." >&2 +} + +trap on_err EXIT + +checkin "${token}" + +trap - EXIT +echo "${script_name}: Done, success." >&2 diff --git a/scripts/checkout-query.sh b/scripts/checkout-query.sh new file mode 100755 index 00000000..f488f342 --- /dev/null +++ b/scripts/checkout-query.sh @@ -0,0 +1,99 @@ +#!/usr/bin/env bash + +usage() { + local old_xtrace + old_xtrace="$(shopt -po xtrace || :)" + set +o xtrace + echo "${script_name} - Query TDD resource." >&2 + echo "Usage: ${script_name} [flags] " >&2 + echo "Option flags:" >&2 + echo " -h --help - Show this help and exit." >&2 + echo " -v --verbose - Verbose execution." >&2 + echo "Args:" >&2 + echo " - Resource to query. Default: '${resource}'." >&2 + eval "${old_xtrace}" +} + +process_opts() { + local short_opts="hv" + local long_opts="help,verbose" + + local opts + opts=$(getopt --options ${short_opts} --long ${long_opts} -n "${script_name}" -- "$@") + + eval set -- "${opts}" + + while true ; do + case "${1}" in + -h | --help) + usage=1 + shift + ;; + -v | --verbose) + set -x + verbose=1 + shift + ;; + --) + shift + resource=${1} + if ! shift 1; then + set +o xtrace + echo "${script_name}: ERROR: Missing args:" >&2 + echo "${script_name}: ='${resource}'" >&2 + usage + exit 1 + fi + if [[ -n "${1}" ]]; then + set +o xtrace + echo "${script_name}: ERROR: Got extra args: '${@}'" >&2 + usage + exit 1 + fi + break + ;; + *) + set +o xtrace + echo "${script_name}: ERROR: Internal opts: '${@}'" >&2 + exit 1 + ;; + esac + done +} + +on_err() { + set +x + set +e + + echo "${script_name}: resource: ${resource}" + echo "${script_name}: Done, failed." >&2 +} + +#=============================================================================== +# program start +#=============================================================================== +export PS4='\[\e[0;33m\]+ ${BASH_SOURCE##*/}:${LINENO}:(${FUNCNAME[0]:-"?"}):\[\e[0m\] ' +script_name="${0##*/}" + +trap on_err EXIT +set -e + +SCRIPTS_TOP=${SCRIPTS_TOP:-"$(cd "${BASH_SOURCE%/*}" && pwd)"} + +source ${SCRIPTS_TOP}/lib/util.sh +source ${SCRIPTS_TOP}/lib/checkout.sh + +process_opts "${@}" + +if [[ ${usage} ]]; then + usage + trap - EXIT + exit 0 +fi + + +seconds="XXX" +checkout_query "${resource}" seconds + +trap - EXIT +echo "${token}" diff --git a/scripts/checkout.sh b/scripts/checkout.sh new file mode 100755 index 00000000..42f0509b --- /dev/null +++ b/scripts/checkout.sh @@ -0,0 +1,98 @@ +#!/usr/bin/env bash + +set -e + +script_name="${0##*/}" + +SCRIPTS_TOP=${SCRIPTS_TOP:-"$( cd "${BASH_SOURCE%/*}" && pwd )"} + +source ${SCRIPTS_TOP}/lib/util.sh +source ${SCRIPTS_TOP}/lib/checkout.sh + +usage() { + local old_xtrace + old_xtrace="$(shopt -po xtrace || :)" + set +o xtrace + echo "${script_name} - Checkout TDD resource." >&2 + echo "Usage: ${script_name} [flags] " >&2 + echo "Option flags:" >&2 + echo " -h --help - Show this help and exit." >&2 + echo " -v --verbose - Verbose execution." >&2 + echo "Args:" >&2 + echo " - Resource to reserve. Default: '${resource}'." >&2 + echo " - Reservation time." >&2 + eval "${old_xtrace}" +} + +short_opts="hv" +long_opts="help,verbose" + +opts=$(getopt --options ${short_opts} --long ${long_opts} -n "${script_name}" -- "$@") + +if [ $? != 0 ]; then + echo "${script_name}: ERROR: Internal getopt" >&2 + exit 1 +fi + +eval set -- "${opts}" + +while true ; do + case "${1}" in + -h | --help) + usage=1 + shift + ;; + -v | --verbose) + set -x + verbose=1 + shift + ;; + --) + shift + resource=${1} + seconds=${2} + if ! shift 2; then + set +o xtrace + echo "${script_name}: ERROR: Missing args:" >&2 + echo "${script_name}: ='${resource}'" >&2 + echo "${script_name}: ='${seconds}'" >&2 + usage + exit 1 + fi + if [[ -n "${1}" ]]; then + set +o xtrace + echo "${script_name}: ERROR: Got extra args: '${@}'" >&2 + usage + exit 1 + fi + break + ;; + *) + set +o xtrace + echo "${script_name}: ERROR: Internal opts: '${@}'" >&2 + exit 1 + ;; + esac +done + +if [[ -n "${usage}" ]]; then + usage + exit 0 +fi + +on_err() { + set +x + set +e + + echo "${script_name}: resource: ${resource}" + echo "${script_name}: seconds: ${seconds}" + echo "${script_name}: Done, failed." >&2 +} + +trap on_err EXIT + +token="XXX" +checkout "${resource}" "${seconds}" token + +trap - EXIT +echo "${token}" diff --git a/scripts/embed-initrd.sh b/scripts/embed-initrd.sh new file mode 100755 index 00000000..5540f567 --- /dev/null +++ b/scripts/embed-initrd.sh @@ -0,0 +1,181 @@ +#!/usr/bin/env bash + +# TODO: Not working yet. +# +# arm64 variables of interest: +# ffff0000111a8120 g O .init.data 0000000000000008 phys_initrd_start +# ffff0000111a8128 g O .init.data 0000000000000008 phys_initrd_size +# ffff00001122d1f0 g .init.data 0000000000000000 __initramfs_start +# ffff00001eb61708 g .init.data 0000000000000000 __initramfs_size +# ffff00001ed21070 g O .bss 0000000000000008 initrd_end +# ffff00001ed21078 g O .bss 0000000000000008 initrd_start + +usage() { + local old_xtrace + old_xtrace="$(shopt -po xtrace || :)" + set +o xtrace + echo "${script_name} - Embed initrd into kernel (work in progress)." >&2 + echo "Usage: ${script_name} [flags]" >&2 + echo "Option flags:" >&2 + echo " -h --help - Show this help and exit." >&2 + echo " -v --verbose - Verbose execution." >&2 + echo " --arch - Target architecture. Default: '${target_arch}'." >&2 + echo " --kernel - Kernel image. Default: '${kernel}'." >&2 + echo " --initrd - Initrd image. Default: '${initrd}'." >&2 + echo " --out-file - Output image. Default: '${out_file}'." >&2 + eval "${old_xtrace}" +} + +process_opts() { + local short_opts="hv" + local long_opts="help,verbose,arch:,kernel:,initrd:,out-file:" + + local opts + opts=$(getopt --options ${short_opts} --long ${long_opts} -n "${script_name}" -- "$@") + + eval set -- "${opts}" + + while true ; do + case "${1}" in + -h | --help) + usage=1 + shift + ;; + -v | --verbose) + set -x + verbose=1 + shift + ;; + --arch) + target_arch=$(get_arch "${2}") + shift 2 + ;; + --kernel) + kernel="${2}" + shift 2 + ;; + --initrd) + initrd="${2}" + shift 2 + ;; + --out-file) + out_file="${2}" + shift 2 + ;; + --) + shift + break + ;; + *) + echo "${script_name}: ERROR: Internal opts: '${@}'" >&2 + exit 1 + ;; + esac + done +} + +on_exit() { + local result=${1} + + if [[ -d ${tmp_dir} ]]; then + rm -rf ${tmp_dir} + fi + + echo "${script_name}: ${result}" >&2 +} + +embed_initrd() { + local dir=${initrd%/*} + local in_file=${initrd##*/} + + rm -f ${out_file} + + pushd ${dir} + ${target_tool_prefix}objcopy \ + -I binary \ + -O ${target_bfdname} \ + -B ${target_arch} \ +- -N initramfs_start \ +- -N initramfs_size \ + --redefine-sym phys_initrd_start=.init.ramfs \ + --redefine-sym phys_initrd_size=.init.ramfs + ??? \ + ${in_file} ${initrd_elf} + popd + + ${target_tool_prefix}objcopy \ + -I ${target_bfdname} \ + -O ${target_bfdname} \ + -R .init.ramfs \ + --add-section .init.ramfs=${initrd_elf} \ + ${kernel} ${out_file} +} + +#=============================================================================== +# program start +#=============================================================================== + +set -e + +script_name="${0##*/}" + +SCRIPTS_TOP=${SCRIPTS_TOP:-"$( cd "${BASH_SOURCE%/*}" && pwd )"} + +source ${SCRIPTS_TOP}/lib/util.sh + +trap "on_exit 'failed.'" EXIT + +process_opts "${@}" + +host_arch=$(get_arch "$(uname -m)") + +target_arch=${target_arch:-"${host_arch}"} + +if [[ ! ${out_file} ]]; then + out_file="${kernel}.embedded" +fi + +if [[ -n "${usage}" ]]; then + usage + trap "on_exit 'Done, success.'" EXIT + exit 0 +fi + +case ${target_arch} in +arm64|aarch64) + target_arch="aarch64" + target_bfdname="elf64-littleaarch64" + target_tool_prefix=${target_tool_prefix:-"aarch64-linux-gnu-"} + ;; +amd64|x86_64) + target_arch="x86_64" + target_tool_prefix=${target_tool_prefix:-"x86_64-linux-gnu-"} + echo "${script_name}: ERROR: Unsupported target arch '${target_arch}'. Must be arm64." >&2 + exit 1 + ;; +*) + echo "${script_name}: ERROR: Unsupported target arch '${target_arch}'. Must be arm64." >&2 + exit 1 + ;; +esac + + +check_opt 'kernel' ${kernel} +check_file "${kernel}" + +check_opt 'initrd' ${initrd} +check_file "${initrd}" + +tmp_dir="$(mktemp --tmpdir --directory ${script_name}.XXXX)" +initrd_elf="${tmp_dir}/initrd.elf" + +embed_initrd + +${target_tool_prefix}objcopy -O binary -R .note -R .note.gnu.build-id -R .comment -S ${out_file} ${out_file}.Image + +${target_tool_prefix}objdump --syms ${kernel} > ${out_file}.orig.syms +${target_tool_prefix}objdump --syms ${initrd_elf} > ${out_file}.initrd.syms +${target_tool_prefix}objdump --syms ${out_file} > ${out_file}.syms + +echo "${script_name}: INFO: Output file: '${out_file}'." >&2 +trap "on_exit 'Done, success.'" EXIT +exit 0 diff --git a/scripts/enter-tdd-jenkins-service.sh b/scripts/enter-tdd-jenkins-service.sh new file mode 100755 index 00000000..2a9bbfd9 --- /dev/null +++ b/scripts/enter-tdd-jenkins-service.sh @@ -0,0 +1,71 @@ +#!/usr/bin/env bash + +set -e + +script_name="${0##*/}" + +usage () { + local old_xtrace + old_xtrace="$(shopt -po xtrace || :)" + set +o xtrace + echo "${script_name} - Enter tdd-jenkins.server container." >&2 + echo "Usage: ${script_name} [flags]" >&2 + echo "Option flags:" >&2 + echo " -c --clean - Clean kernel rootfs files." >&2 + echo " -h --help - Show this help and exit." >&2 + echo " -v --verbose - Verbose execution." >&2 + eval "${old_xtrace}" +} + +short_opts="chv" +long_opts="clean,help,verbose" + +opts=$(getopt --options ${short_opts} --long ${long_opts} -n "${script_name}" -- "$@") + +if [ $? != 0 ]; then + echo "${script_name}: ERROR: Internal getopt" >&2 + exit 1 +fi + +eval set -- "${opts}" + +while true ; do + case "${1}" in + -c | --clean) + clean=1 + shift + ;; + -h | --help) + usage=1 + shift + ;; + -v | --verbose) + set -x + verbose=1 + export PS4='\[\e[0;33m\]+ ${BASH_SOURCE##*/}:${LINENO}:(${FUNCNAME[0]:-"?"}):\[\e[0m\] ' + shift + ;; + --) + shift + break + ;; + *) + echo "${script_name}: ERROR: Internal opts: '${@}'" >&2 + exit 1 + ;; + esac +done + +if [[ ${usage} ]]; then + usage + exit 0 +fi + +kernel_rootfs=/var/jenkins_home/workspace/tdd/kernel/kernel-test/arm64-debian-buster.rootfs + +if [[ ${clean} ]]; then + exec docker exec --privileged tdd-jenkins.service sudo rm -rf ${kernel_rootfs} +else + exec docker exec -it --privileged tdd-jenkins.service /bin/bash +fi + diff --git a/scripts/lib/checkout.sh b/scripts/lib/checkout.sh new file mode 100644 index 00000000..094133ab --- /dev/null +++ b/scripts/lib/checkout.sh @@ -0,0 +1,102 @@ +#!/usr/bin/env bash + +# TDD checkout client library routines. + +checkout_split_reply() { + local reply=${1} + local -n _checkout_split_reply__cmd=${2} + local -n _checkout_split_reply__data=${3} + + _checkout_split_reply__cmd="$(echo ${reply} | cut -d ':' -f 1)" + _checkout_split_reply__data="$(echo ${reply} | cut -d ':' -f 2)" +} + +checkout_at_server() { + local server=${1} + local port=${2} + local resource=${3} + local seconds=${4} + local -n _checkout_at_server__token=${5} + + set +e + local reply_msg + reply_msg="$(echo -n "CKO:${resource}:${seconds}" | netcat ${server} ${port})" + local reply_result=${?} + set -e + + if [[ ${reply_result} -ne 0 ]]; then + echo "${script_name}: checkout_at_server failed: command failed: ${reply_result}" >&2 + return ${reply_result} + fi + + echo "${script_name}: reply_msg='${reply_msg}'" >&2 + + if [[ ! ${reply_msg} ]]; then + echo "${script_name}: checkout_at_server failed: no reply." >&2 + return -1 + fi + + local cmd + local data + checkout_split_reply ${reply_msg} cmd data + + if [[ ${cmd} == "ERR" ]]; then + echo "${script_name}: checkout_at_server failed: ${reply_msg}" >&2 + return -1 + fi + + if [[ ! ${data} ]]; then + echo "${script_name}: checkout_at_server failed: no data" >&2 + return -2 + fi + + _checkout_at_server__token="${data}" +} + +checkout() { + local resource=${1} + local seconds=${2} + local -n _checkout__token=${3} + + checkout_at_server ${TDD_CHECKOUT_SERVER} ${TDD_CHECKOUT_PORT} ${resource} ${seconds} _checkout__token +} + +checkout_query() { + local resource=${1} + local -n _checkout_query__seconds=${2} + + echo "${script_name}: WARNING: checkout_query: TODO" >&2 + return 0 +} + +checkin_at_server() { + local server=${1} + local port=${2} + local token=${3} + + set +e + local reply_msg + reply_msg="$(echo -n "CKI:${token}" | netcat ${server} ${port})" + local reply_result=${?} + set -e + + echo "${script_name}: reply_msg='${reply_msg}'" >&2 + + local cmd + local data + checkout_split_reply ${reply_msg} cmd data + + if [[ ${cmd} == "ERR" ]]; then + echo "${script_name}: checkin_at_server failed: ${reply_msg}" >&2 + return -1 + fi +} + +checkin() { + local token=${1} + + checkin_at_server ${TDD_CHECKOUT_SERVER} ${TDD_CHECKOUT_PORT} ${token} +} + +TDD_CHECKOUT_SERVER=${TDD_CHECKOUT_SERVER:-${TDD_RELAY_SERVER:-"tdd-relay"}} +TDD_CHECKOUT_PORT=${TDD_CHECKOUT_PORT:-${TDD_RELAY_PORT:-"9600"}} diff --git a/scripts/lib/chroot.sh b/scripts/lib/chroot.sh new file mode 100644 index 00000000..739c6c48 --- /dev/null +++ b/scripts/lib/chroot.sh @@ -0,0 +1,86 @@ +#!/usr/bin/env bash + +get_qemu_static() { + local qemu_static + + case "${host_arch}--${target_arch}" in + amd64--arm64) + qemu_static="/usr/bin/qemu-aarch64-static" + ;; + amd64--ppc32) + qemu_static="/usr/bin/qemu-ppc-static" + ;; + amd64--ppc64) + qemu_static="/usr/bin/qemu-ppc64-static" + ;; + arm64--amd64) + qemu_static="/usr/bin/qemu-x86_64-static" + ;; + *) + echo "${script_name}: ERROR: Unsupported host--target combo: '${"${host_arch}--${target_arch}"}'." >&2 + exit 1 + ;; + esac + + if ! test -x "$(command -v ${qemu_static})"; then + echo "${script_name}: ERROR: Please install QEMU user emulation '${qemu_static}'." >&2 + exit 1 + fi + + echo "${qemu_static}" +} + +clean_qemu_static() { + local chroot=${1} + local qemu_static + + if [ "${host_arch}" != "${target_arch}" ]; then + qemu_static="$(get_qemu_static)" + ${sudo} rm -f ${chroot}${qemu_static} + fi +} + +copy_qemu_static() { + local chroot=${1} + local qemu_static + + if [ "${host_arch}" != "${target_arch}" ]; then + qemu_static="$(get_qemu_static)" + ${sudo} cp -f ${qemu_static} ${chroot}${qemu_static} + fi +} + + +enter_chroot() { + local chroot=${1} + shift + local script=${@} + + check_directory ${chroot} + copy_qemu_static ${chroot} + + ${sudo} mount -l -t proc + ${sudo} umount ${chroot}/proc || : + + mkdir -p ${chroot}/proc ${chroot}/sys ${chroot}/dev ${chroot}/run + + ${sudo} mount -t proc -o nosuid,nodev,noexec /proc ${chroot}/proc + ${sudo} mount --rbind /sys ${chroot}/sys + ${sudo} mount --rbind /dev ${chroot}/dev + ${sudo} mount --rbind /run ${chroot}/run + + ${sudo} LANG=C.UTF-8 PS4="+ chroot: " chroot ${chroot} /bin/sh -x < /dev/null || : + + mount | egrep ${chroot} || : +} diff --git a/scripts/lib/ipmi.sh b/scripts/lib/ipmi.sh new file mode 100644 index 00000000..eec1fff9 --- /dev/null +++ b/scripts/lib/ipmi.sh @@ -0,0 +1,68 @@ +#!/usr/bin/env bash + +# ipmi library routines. + +ipmi_get_power_status() { + local ipmi_args="${1}" + + #echo "*** ipmi_args = @${ipmi_args} @" >&2 + local msg + msg="$(ipmitool ${ipmi_args} power status)" + + case ${msg: -3} in + ' on') + echo 'on' + ;; + 'off') + echo 'off' + ;; + *) + echo "${script_name}: ERROR: Bad ipmi message '${msg}'" >&2 + exit 1 + ;; + esac +} + +ipmi_wait_power_state() { + local ipmi_args="${1}" + local state="${2}" + local timeout_sec=${3} + timeout_sec=${timeout_sec:-60} + + #echo "*** ipmi_args = @${ipmi_args} @" >&2 + + let count=1 + while [[ $(ipmi_get_power_status "${ipmi_args}") != "${state}" ]]; do + let count=count+5 + if [[ count -gt ${timeout_sec} ]]; then + echo "${script_name}: ipmi_wait_power_state '${state}' ${ipmi_args} failed." + exit -1 + fi + sleep 5s + done +} + +ipmi_set_power_state() { + local ipmi_args="${1}" + local state="${2}" + local timeout_sec=${3} + + #echo "*** ipmi_args = @${ipmi_args} @" >&2 + + ipmitool ${ipmi_args} power ${state} + ipmi_wait_power_state "${ipmi_args}" "${state}" ${timeout_sec} +} + +ipmi_power_on() { + local ipmi_args="${1}" + + #echo "*** ipmi_args = @${ipmi_args} @" >&2 + ipmi_set_power_state "${ipmi_args}" 'on' +} + +ipmi_power_off() { + local ipmi_args="${1}" + + #echo "*** ipmi_args = @${ipmi_args} @" >&2 + ipmi_set_power_state "${ipmi_args}" 'off' +} diff --git a/scripts/lib/relay.sh b/scripts/lib/relay.sh new file mode 100644 index 00000000..ec6d8f4d --- /dev/null +++ b/scripts/lib/relay.sh @@ -0,0 +1,180 @@ +#!/usr/bin/env bash + +# TDD relay client library routines. + +relay_double_to_server() { + echo ${1} | cut -d ':' -f 1 +} + +relay_double_to_port() { + echo ${1} | cut -d ':' -f 2 +} + +relay_test_triple() { + [[ ${1} =~ .:[[:digit:]]{3,5}:. ]] +} + +relay_verify_triple() { + local triple=${1} + + if ! relay_test_triple ${triple}; then + echo "${script_name}: ERROR: Bad triple: '${triple}'" >&2 + exit 1 + fi +} + +relay_random_token() { + echo "$(cat /proc/sys/kernel/random/uuid)" +} + +relay_make_random_triple() { + local server=${1} + local port=${2} + + server=${server:-${TDD_RELAY_SERVER}} + port=${port:-${TDD_RELAY_PORT}} + + echo "${server}:${port}:$(relay_random_token)" +} + +relay_triple_to_server() { + local triple=${1} + + relay_verify_triple ${triple} + echo ${triple} | cut -d ':' -f 1 +} + +relay_triple_to_port() { + local triple=${1} + + relay_verify_triple ${triple} + echo ${triple} | cut -d ':' -f 2 +} + +relay_triple_to_token() { + local triple=${1} + + relay_verify_triple ${triple} + echo ${triple} | cut -d ':' -f 3 +} + +relay_split_triple() { + local triple=${1} + local -n _relay_split_triple__server=${2} + local -n _relay_split_triple__port=${3} + local -n _relay_split_triple__token=${4} + + relay_verify_triple ${triple} + + _relay_split_triple__server="$(echo ${triple} | cut -d ':' -f 1)" + _relay_split_triple__port="$(echo ${triple} | cut -d ':' -f 2)" + _relay_split_triple__token="$(echo ${triple} | cut -d ':' -f 3)" +} + +relay_resolve_triple() { + local triple=${1} + local server port token addr + + relay_split_triple ${triple} server port token + + find_addr addr "/etc/hosts" ${server} + + echo "${addr}:${port}:${token}" +} + +relay_init_triple() { + local server=${1} + local port addr token triple + + if [[ ${server} ]]; then + port=$(relay_double_to_port ${server}) + server=$(relay_double_to_server ${server}) + else + port=${TDD_RELAY_PORT} + server=${TDD_RELAY_SERVER} + fi + + find_addr addr "/etc/hosts" ${server} + token=$(relay_random_token) + triple="${addr}:${port}:${token}" + + old_xtrace="$(shopt -po xtrace || :)" + set +o xtrace + echo "relay_triple: ${triple}" >&2 + echo " relay_server: ${server}" >&2 + echo " relay_addr: ${addr}" >&2 + echo " relay_port: ${port}" >&2 + echo " relay_token: ${token}" >&2 + eval "${old_xtrace}" + + echo "${triple}" +} + +relay_split_reply() { + local reply=${1} + local -n _relay_split_reply__cmd=${2} + local -n _relay_split_reply__data=${3} + + _relay_split_reply__cmd="$(echo ${reply} | cut -d ':' -f 1)" + _relay_split_reply__data="$(echo ${reply} | cut -d ':' -f 2)" +} + +relay_get() { + local timeout=${1} + local triple=${2} + local -n _relay_get__remote_addr=${3} + + local server + local port + local token + relay_split_triple ${triple} server port token + + echo "${script_name}: relay client: Waiting ${timeout}s for msg at ${server}:${port}..." >&2 + + SECONDS=0 + local reply_msg + local reply_result + + #timeout="3s" # For debug. + set +e + reply_msg="$(echo -n "GET:${token}" | netcat -w${timeout} ${server} ${port})" + reply_result=${?} + set -e + + local boot_time="$(sec_to_min ${SECONDS})" + + echo "${script_name}: reply_result='${reply_result}'" >&2 + echo "${script_name}: reply_msg='${reply_msg}'" >&2 + + if [[ ${reply_result} -eq 1 ]]; then + echo "${script_name}: relay GET ${server} failed (${reply_result}): Host unreachable? Server down?" >&2 + ping -c 1 -n ${server} + return 1 + fi + + if [[ ${reply_result} -eq 124 || ! ${reply_msg} ]]; then + echo "${script_name}: relay GET ${server} failed (${reply_result}): Timed out ${timeout}." >&2 + return 1 + fi + + if [[ ${reply_result} -ne 0 ]]; then + echo "${script_name}: relay GET ${server} failed (${reply_result})." >&2 + return ${reply_result} + fi + + echo "${script_name}: reply_msg='${reply_msg}'" >&2 + local cmd + relay_split_reply ${reply_msg} cmd _relay_get__remote_addr + + if [[ "${cmd}" != 'OK-' ]]; then + echo "${script_name}: relay_get failed: ${reply_msg}" >&2 + _relay_get__remote_addr="server-error" + return 1 + fi + + echo "${script_name}: Received msg from '${_relay_get__remote_addr}" >&2 + echo "${script_name}: ${_relay_get__remote_addr} boot time = ${boot_time} min" >&2 +} + +TDD_RELAY_SERVER=${TDD_RELAY_SERVER:-"tdd-relay"} +TDD_RELAY_PORT=${TDD_RELAY_PORT:-"9600"} diff --git a/scripts/lib/toolchain.sh b/scripts/lib/toolchain.sh new file mode 100644 index 00000000..7a2d0b6f --- /dev/null +++ b/scripts/lib/toolchain.sh @@ -0,0 +1,53 @@ +#!/usr/bin/env bash + +print_gcc_info() { + local gcc=${1} + local log_file=${2:-"/dev/null"} + + local old_xtrace + old_xtrace="$(shopt -po xtrace || :)" + set -o xtrace + local old_errexit="$(shopt -po errexit || :)" + set +o errexit + + echo "=============================" | tee --append ${log_file} + echo "${gcc} --version" | tee --append ${log_file} + ${gcc} --version 2>&1 | tee --append ${log_file} + echo "-----------------------------" | tee --append ${log_file} + echo "${gcc} -dumpspecs" | tee --append ${log_file} + ${gcc} -dumpspecs 2>&1 | tee --append ${log_file} + echo "-----------------------------" | tee --append ${log_file} + echo "${gcc} -dumpversion" | tee --append ${log_file} + ${gcc} -dumpversion 2>&1 | tee --append ${log_file} + echo "-----------------------------" | tee --append ${log_file} + echo "${gcc} -dumpmachine" | tee --append ${log_file} + ${gcc} -dumpmachine 2>&1 | tee --append ${log_file} + echo "-----------------------------" | tee --append ${log_file} + echo "${gcc} -print-search-dirs" | tee --append ${log_file} + ${gcc} -print-search-dirs 2>&1 | tee --append ${log_file} + echo "-----------------------------" | tee --append ${log_file} + echo "${gcc} -print-libgcc-file-name" | tee --append ${log_file} + ${gcc} -print-libgcc-file-name 2>&1 | tee --append ${log_file} + echo "-----------------------------" | tee --append ${log_file} + echo "${gcc} -print-multiarch" | tee --append ${log_file} + ${gcc} -print-multiarch 2>&1 | tee --append ${log_file} + echo "-----------------------------" | tee --append ${log_file} + echo "${gcc} -print-multi-directory" | tee --append ${log_file} + ${gcc} -print-multi-directory 2>&1 | tee --append ${log_file} + echo "-----------------------------" | tee --append ${log_file} + echo "${gcc} -print-multi-lib" | tee --append ${log_file} + ${gcc} -print-multi-lib 2>&1 | tee --append ${log_file} + echo "-----------------------------" | tee --append ${log_file} + echo "${gcc} -print-multi-os-directory" | tee --append ${log_file} + ${gcc} -print-multi-os-directory 2>&1 | tee --append ${log_file} + echo "-----------------------------" | tee --append ${log_file} + echo "${gcc} -print-sysroot" | tee --append ${log_file} + ${gcc} -print-sysroot 2>&1 | tee --append ${log_file} + echo "-----------------------------" | tee --append ${log_file} + echo "${gcc} -print-sysroot-headers-suffix" | tee --append ${log_file} + ${gcc} -print-sysroot-headers-suffix 2>&1 | tee --append ${log_file} + echo -e "=============================" | tee --append ${log_file} + + eval "${old_errexit}" + eval "${old_xtrace}" +} diff --git a/scripts/lib/util.sh b/scripts/lib/util.sh new file mode 100644 index 00000000..280730c6 --- /dev/null +++ b/scripts/lib/util.sh @@ -0,0 +1,461 @@ +#!/usr/bin/env bash + +clean_ws() { + local in="$*" + + shopt -s extglob + in="${in//+( )/ }" in="${in# }" in="${in% }" + echo -n "$in" +} + +substring_has() { + local string=${1} + local substring=${2} + + [ -z "${string##*${substring}*}" ]; +} + +substring_begins() { + local string=${1} + local substring=${2} + + [ -z "${string##${substring}*}" ]; +} + +substring_ends() { + local string=${1} + local substring=${2} + + [ -z "${string##*${substring}}" ]; +} + +sec_to_min() { + local sec=${1} + local min=$((sec / 60)) + local frac_10=$(((sec - min * 60) * 10 / 60)) + local frac_100=$(((sec - min * 60) * 100 / 60)) + + if ((frac_10 != 0)); then + unset frac_10 + fi + + echo "${min}.${frac_10}${frac_100}" +} + +test_sec_to_min() { + local start=${1:-1} + local end=${2:-100} + local enc=${3:-1} + + for ((sec = start; sec <= end; sec += enc)); do + echo "${sec} sec = $(sec_to_min ${sec}) ($(echo "scale=2; ${sec}/60" | bc -l | sed 's/^\./0./')) min" >&2 + done +} + +directory_size_bytes() { + local dir=${1} + + local size + size="$(du -sb "${dir}")" + echo "${size%%[[:space:]]*}" +} + +directory_size_human() { + local dir=${1} + + local size + size="$(du -sh "${dir}")" + echo "${size%%[[:space:]]*}" +} + +check_directory() { + local src="${1}" + local msg="${2}" + local usage="${3}" + + if [[ ! -d "${src}" ]]; then + echo "${script_name}: ERROR (${FUNCNAME[0]}): Directory not found${msg}: '${src}'" >&2 + [[ -z "${usage}" ]] || usage + exit 1 + fi +} + +check_file() { + local src="${1}" + local msg="${2}" + local usage="${3}" + + if [[ ! -f "${src}" ]]; then + echo -e "${script_name}: ERROR: File not found${msg}: '${src}'" >&2 + [[ -z "${usage}" ]] || usage + exit 1 + fi +} + +check_opt() { + option=${1} + shift + value="${*}" + + if [[ ! ${value} ]]; then + echo "${script_name}: ERROR (${FUNCNAME[0]}): Must provide --${option} option." >&2 + usage + exit 1 + fi +} + +check_not_opt() { + option1=${1} + option2=${2} + shift 2 + value2="${*}" + + if [[ ${value2} ]]; then + echo "${script_name}: ERROR (${FUNCNAME[0]}): Can't use --${option2} with --${option1}." >&2 + usage + exit 1 + fi +} + +find_common_parent() { + local dir1 + dir1="$(realpath -m "${1}")" + local dir2 + dir2="$(realpath -m "${2}")" + local A1 + local A2 + local sub + + IFS="/" read -ra A1 <<< "${dir1}" + IFS="/" read -ra A2 <<< "${dir2}" + + #echo "array len = ${#A1[@]}" >&2 + + for ((i = 0; i < ${#A1[@]}; i++)); do + echo "${i}: @${A1[i]}@ @${A2[i]}@" >&2 + if [[ "${A1[i]}" != "${A2[i]}" ]]; then + break; + fi + sub+="${A1[i]}/" + done + + #echo "sub = @${sub}@" >&2 + echo "${sub}" +} + +relative_path_2() { + local base="${1}" + local target="${2}" + local root="${3}" + + base="${base##${root}}" + base="${base%%/}" + base=${base%/*} + target="${target%%/}" + + local back="" + while :; do + set +x + echo "target: ${target}" >&2 + echo "base: ${base}" >&2 + echo "back: ${back}" >&2 + set -x + if [[ "${base}" == "/" || ! ${base} ]]; then + break + fi + back+="../" + if [[ "${target}" == ${base}/* ]]; then + break + fi + base=${base%/*} + done + + echo "${back}${target##${base}/}" +} + +relative_path() { + local base="${1}" + local target="${2}" + local root="${3}" + + base="${base##${root}}" + base="${base%%/}" + base=${base%/*} + target="${target%%/}" + + local back="" + while :; do + #echo "target: ${target}" >&2 + #echo "base: ${base}" >&2 + #echo "back: ${back}" >&2 + if [[ "${base}" == "/" || "${target}" == ${base}/* ]]; then + break + fi + back+="../" + base=${base%/*} + done + + echo "${back}${target##${base}/}" +} + +copy_file() { + local src="${1}" + local dest="${2}" + + check_file "${src}" + cp -f "${src}" "${dest}" +} + +cpu_count() { + local result + + if result="$(getconf _NPROCESSORS_ONLN)"; then + echo "${result}" + else + echo "1" + fi +} + +get_user_home() { + local user=${1} + local result; + + if ! result="$(getent passwd "${user}")"; then + echo "${script_name}: ERROR (${FUNCNAME[0]}): No home for user '${user}'" >&2 + exit 1 + fi + echo "${result}" | cut -d ':' -f 6 +} + +get_arch() { + local a=${1} + + case "${a}" in + arm64|aarch64) echo "arm64" ;; + amd64|x86_64) echo "amd64" ;; + ppc|powerpc|ppc32|powerpc32) echo "ppc32" ;; + ppc64|powerpc64) echo "ppc64" ;; + ppc64le|powerpc64le) echo "ppc64le" ;; + *) + echo "${script_name}: ERROR (${FUNCNAME[0]}): Bad arch '${a}'" >&2 + exit 1 + ;; + esac +} + +get_triple() { + local a=${1} + + case "${a}" in + amd64) echo "x86_64-linux-gnu" ;; + arm64) echo "aarch64-linux-gnu" ;; + ppc32) echo "powerpc-linux-gnu" ;; + ppc64) echo "powerpc64-linux-gnu" ;; + ppc64le) echo "powerpc64le-linux-gnu" ;; + *) + echo "${script_name}: ERROR (${FUNCNAME[0]}): Bad arch '${a}'" >&2 + exit 1 + ;; + esac +} + +kernel_arch() { + local a=${1} + + case "${a}" in + amd64) echo "x86_64" ;; + arm64*) echo "arm64" ;; + ppc*) echo "powerpc" ;; + *) + echo "${script_name}: ERROR (${FUNCNAME[0]}): Bad arch '${a}'" >&2 + exit 1 + ;; + esac +} + +sudo_write() { + sudo tee "${1}" >/dev/null +} + +sudo_append() { + sudo tee -a "${1}" >/dev/null +} + +is_ip_addr() { + local host=${1} + local regex_ip="[[:digit:]]{1,3}\.[[:digit:]]{1,3}{3}" + + [[ "${host}" =~ ${regex_ip} ]] +} + +find_addr() { + local -n _find_addr__addr=${1} + local hosts_file=${2} + local host=${3} + + _find_addr__addr="" + + if is_ip_addr "${host}"; then + _find_addr__addr="${host}" + return + fi + + if [[ ! -x "$(command -v dig)" ]]; then + echo "${script_name}: WARNING: Please install dig (dnsutils)." >&2 + else + _find_addr__addr="$(dig "${host}" +short)" + fi + + if [[ ! ${_find_addr__addr} ]]; then + _find_addr__addr="$(grep -E -m 1 "${host}[[:space:]]*$" "${hosts_file}" \ + | grep -E -o '\b([0-9]{1,3}\.){3}[0-9]{1,3}\b' || :)" + + if [[ ! ${_find_addr__addr} ]]; then + echo "${script_name}: ERROR (${FUNCNAME[0]}): '${host}' DNS entry not found." >&2 + exit 1 + fi + fi +} + +my_addr() { + ip route get 8.8.8.8 | grep -E -o 'src [0-9.]*' | cut -f 2 -d ' ' +} + +wait_pid() { + local pid="${1}" + local timeout_sec=${2} + timeout_sec=${timeout_sec:-300} + + echo "${script_name}: INFO: Waiting ${timeout_sec}s for pid ${pid}." >&2 + + local count=1 + while kill -0 "${pid}" &> /dev/null; do + ((count = count + 5)) + if [[ count -gt ${timeout_sec} ]]; then + echo "${script_name}: ERROR (${FUNCNAME[0]}): wait_pid failed for pid ${pid}." >&2 + exit 2 + fi + sleep 5s + done +} + +git_get_repo_name() { + local repo=${1} + + if [[ "${repo: -1}" == "/" ]]; then + repo=${repo:0:-1} + fi + + local repo_name="${repo##*/}" + + if [[ "${repo_name:0:1}" == "." ]]; then + repo_name="${repo%/.*}" + repo_name="${repo_name##*/}" + echo "${repo_name}" + return + fi + + repo_name="${repo_name%.*}" + + if [[ -z "${repo_name}" ]]; then + echo "${script_name}: ERROR (${FUNCNAME[0]}): Bad repo: '${repo}'" >&2 + exit 1 + fi + + echo "${repo_name}" +} + +git_set_remote() { + local dir=${1} + local repo=${2} + local remote + + remote="$(git -C "${dir}" remote -v | grep -E --max-count=1 'origin' | cut -f2 | cut -d ' ' -f1)" + + if ! remote="$(git -C "${dir}" remote -v | grep -E --max-count=1 'origin' | cut -f2 | cut -d ' ' -f1)"; then + echo "${script_name}: ERROR (${FUNCNAME[0]}): Bad git repo ${dir}." >&2 + exit 1 + fi + + if [[ "${remote}" != "${repo}" ]]; then + echo "${script_name}: INFO: Switching git remote '${remote}' => '${repo}'." >&2 + git -C "${dir}" remote set-url origin "${repo}" + git -C "${dir}" remote -v + fi +} + +git_checkout_force() { + local dir=${1} + local repo=${2} + local branch=${3:-'master'} + + if [[ ! -d "${dir}" ]]; then + mkdir -p "${dir}/.." + git clone "${repo}" "${dir}" + fi + + git_set_remote "${dir}" "${repo}" + + git -C "${dir}" checkout -- . + git -C "${dir}" remote update -p + git -C "${dir}" reset --hard origin/"${branch}" + git -C "${dir}" checkout --force "${branch}" + git -C "${dir}" pull "${repo}" "${branch}" + git -C "${dir}" status +} + +git_checkout_safe() { + local dir=${1} + local repo=${2} + local branch=${3:-'master'} + + if [[ -e "${dir}" ]]; then + if [[ ! -e "${dir}/.git/config" ]]; then + mv "${dir}" "${dir}.backup-$(date +%Y.%m.%d-%H.%M.%S)" + elif ! git -C "${dir}" status --porcelain; then + echo "${script_name}: INFO: Local changes: ${dir}." >&2 + cp -a --link "${dir}" "${dir}.backup-$(date +%Y.%m.%d-%H.%M.%S)" + fi + fi + + git_checkout_force "${dir}" "${repo}" "${branch}" +} + +run_shellcheck() { + local file=${1} + + shellcheck=${shellcheck:-"shellcheck"} + + if ! test -x "$(command -v "${shellcheck}")"; then + echo "${script_name}: ERROR: Please install '${shellcheck}'." >&2 + exit 1 + fi + + ${shellcheck} "${file}" +} + +get_container_id() { + local cpuset + cpuset="$(cat /proc/1/cpuset)" + local regex="^/docker/([[:xdigit:]]*)$" + local container_id + + if [[ "${cpuset}" =~ ${regex} ]]; then + container_id="${BASH_REMATCH[1]}" + echo "${script_name}: INFO: Container ID '${container_id}'." >&2 + else + echo "${script_name}: WARNING: Container ID not found." >&2 + fi + + echo "${container_id}" +} + +if [[ ${PS4} == '+ ' ]]; then + if [[ ${JENKINS_URL} ]]; then + export PS4='+ [${STAGE_NAME}] \${BASH_SOURCE##*/}:\${LINENO}: ' + else + export PS4='\[\e[0;33m\]+ ${BASH_SOURCE##*/}:${LINENO}:(${FUNCNAME[0]:-"?"}):\[\e[0m\] ' + fi +fi + +script_name="${script_name:-${0##*/}}" diff --git a/scripts/prepare-ld-scripts.sh b/scripts/prepare-ld-scripts.sh new file mode 100755 index 00000000..e9329ef4 --- /dev/null +++ b/scripts/prepare-ld-scripts.sh @@ -0,0 +1,147 @@ +#!/usr/bin/env bash + +usage () { + local old_xtrace + old_xtrace="$(shopt -po xtrace || :)" + set +o xtrace + echo "${script_name} - Convert ld scripts to relative or absolute paths." >&2 + echo "Usage: ${script_name} [flags]" >&2 + echo "Option flags:" >&2 + echo " -a --absolute - Convert to absolute paths. Default: '${absolute}'." >&2 + echo " -d --dry-run - Do not execute commands. Default: '${dry_run}'." >&2 + echo " -h --help - Show this help and exit." >&2 + echo " -r --root-dir - Root of file system. Default: '${root_dir}'." >&2 + echo " -s --start-dir - Top of directory tree to convert. Default: '${start_dir}'." >&2 + echo " -v --verbose - Verbose execution." >&2 + eval "${old_xtrace}" +} + +process_opts() { + local short_opts="adhr:s:v" + local long_opts="absolute,dry-run,help,root-dir:,start-dir:,verbose" + + local opts + opts=$(getopt --options ${short_opts} --long ${long_opts} -n "${script_name}" -- "$@") + + eval set -- "${opts}" + + while true ; do + #echo "${FUNCNAME[0]}: @${1}@ @${2}@" + case "${1}" in + -a | --absolute) + absolute=1 + shift + ;; + -d | --dry-run) + dry_run=1 + shift + ;; + -h | --help) + usage=1 + shift + ;; + -r | --root-dir) + root_dir="${2}" + shift 2 + ;; + -s | --start-dir) + start_dir="${2}" + shift 2 + ;; + -v | --verbose) + set -x + verbose=1 + shift + ;; + --) + shift + break + ;; + *) + echo "${script_name}: ERROR: Internal opts: '${@}'" >&2 + exit 1 + ;; + esac + done +} + +on_exit() { + local result=${1} + + if [[ -d ${tmp_dir} ]]; then + rm -rf ${tmp_dir} + fi + + set +x + echo "${script_name}: Done: ${result}" >&2 +} + + +#=============================================================================== +# program start +#=============================================================================== +export PS4='\[\e[0;33m\]+ ${BASH_SOURCE##*/}:${LINENO}:(${FUNCNAME[0]:-"?"}):\[\e[0m\] ' +script_name="${0##*/}" + +trap "on_exit 'failed.'" EXIT +set -e + +SCRIPTS_TOP=${SCRIPTS_TOP:-"$(cd "${BASH_SOURCE%/*}" && pwd)"} +source ${SCRIPTS_TOP}/lib/util.sh + +process_opts "${@}" + +if [[ ${usage} ]]; then + usage + trap - EXIT + exit 0 +fi + +check_opt 'root-dir' "${root_dir}" +check_directory "${root_dir}" "" "usage" +root_dir="$(realpath ${root_dir})" + +check_opt 'start-dir' "${start_dir}" +check_directory "${start_dir}" "" "usage" +start_dir="$(realpath ${start_dir})" + +tmp_dir="$(mktemp --tmpdir --directory ${script_name}.XXXX)" + +# FIXME: Need to fixup /etc/ld.so.conf? + +files=$(find "${start_dir}" -name '*.so') + +for file in ${files}; do + if [[ "$(file -b ${file})" != "ASCII text" ]]; then + continue + fi + + echo "${script_name}: ${file}" >&2 + + while read -r line_in; do + if [[ "${line_in:0:5}" != "GROUP" ]]; then + echo "${line_in}" >> ${tmp_dir}/1 + else + line_out="" + for w in ${line_in}; do + if [[ ${w} != *"/lib/"* ]]; then + line_out+="${w} " + else + if [[ ${absolute} ]]; then + line_out+="${root_dir}${w} " + else + line_out+="$(relative_path ${file} ${w} ${root_dir}) " + fi + fi + done + echo "${script_name}: in: ${line_in}" >&2 + echo "${script_name}: out: ${line_out}" >&2 + echo "${line_out}" >> ${tmp_dir}/1 + fi + done < "${file}" + + #cat ${tmp_dir}/1 + cp -f ${tmp_dir}/1 ${file} +done + +trap "on_exit 'Success.'" EXIT diff --git a/scripts/prepare-sysroot.sh b/scripts/prepare-sysroot.sh new file mode 100755 index 00000000..633bd6e3 --- /dev/null +++ b/scripts/prepare-sysroot.sh @@ -0,0 +1,121 @@ +#!/usr/bin/env bash + +usage () { + local old_xtrace + old_xtrace="$(shopt -po xtrace || :)" + set +o xtrace + echo "${script_name} - Convert sysroot to relative or absolute paths." >&2 + echo "Usage: ${script_name} [flags] " >&2 + echo "Option flags:" >&2 + echo " -a --absolute - Convert to absolute paths. Default: '${absolute}'." >&2 + echo " -d --dry-run - Do not execute commands. Default: '${dry_run}'." >&2 + echo " -h --help - Show this help and exit." >&2 + echo " -v --verbose - Verbose execution." >&2 + echo "Args:" >&2 + echo " - Sysroot directory. Default: '${sysroot}'." >&2 + eval "${old_xtrace}" +} + +process_opts() { + local short_opts="adhv" + local long_opts="absolute,dry-run,help,verbose" + + local opts + opts=$(getopt --options ${short_opts} --long ${long_opts} -n "${script_name}" -- "$@") + + eval set -- "${opts}" + + while true ; do + #echo "${FUNCNAME[0]}: @${1}@ @${2}@" + case "${1}" in + -a | --absolute) + absolute=1 + shift + ;; + -d | --dry-run) + dry_run=1 + shift + ;; + -h | --help) + usage=1 + shift + ;; + -v | --verbose) + set -x + verbose=1 + shift + ;; + --) + sysroot=${2} + if ! shift 2; then + echo "${script_name}: ERROR: Missing arg: ='${sysroot}'" >&2 + usage + exit 1 + fi + if [[ -n "${1}" ]]; then + echo "${script_name}: ERROR: Got extra args: '${@}'" >&2 + usage + exit 1 + fi + break + ;; + *) + echo "${script_name}: ERROR: Internal opts: '${@}'" >&2 + exit 1 + ;; + esac + done +} + +on_exit() { + local result=${1} + + set +x + echo "${script_name}: Done: ${result}" >&2 +} + + +#=============================================================================== +# program start +#=============================================================================== +export PS4='\[\e[0;33m\]+ ${BASH_SOURCE##*/}:${LINENO}:(${FUNCNAME[0]:-"?"}):\[\e[0m\] ' +set -e + +script_name="${0##*/}" +trap "on_exit 'failed.'" EXIT + +SCRIPTS_TOP=${SCRIPTS_TOP:-"$(cd "${BASH_SOURCE%/*}" && pwd)"} +source ${SCRIPTS_TOP}/lib/util.sh + +process_opts "${@}" + +if [[ ${usage} ]]; then + usage + trap - EXIT + exit 0 +fi + +check_opt 'sysroot' "${sysroot}" +check_directory "${sysroot}" "" "usage" + + +if [[ ${absolute} ]]; then + extra_args+="--absolute " +fi + +if [[ ${dry_run} ]]; then + extra_args+="--dry-run " +fi + +if [[ ${verbose} ]]; then + extra_args+="--verbose " +fi + +# FIXME: Need to fixup /etc/ld.so.conf? + +${SCRIPTS_TOP}/relink.sh ${extra_args} --root-dir=${sysroot} \ + --start-dir=${sysroot} +${SCRIPTS_TOP}/prepare-ld-scripts.sh ${extra_args} --root-dir=${sysroot} \ + --start-dir=${sysroot} + +trap "on_exit 'Success.'" EXIT diff --git a/scripts/relink.sh b/scripts/relink.sh new file mode 100755 index 00000000..86fbe37c --- /dev/null +++ b/scripts/relink.sh @@ -0,0 +1,139 @@ +#!/usr/bin/env bash + +usage () { + local old_xtrace + old_xtrace="$(shopt -po xtrace || :)" + set +o xtrace + echo "${script_name} - Convert broken absolute symlinks to relative or absolute symlinks." >&2 + echo "Usage: ${script_name} [flags]" >&2 + echo "Option flags:" >&2 + echo " -a --absolute - Convert to absolute links. Default: '${absolute}'." >&2 + echo " -d --dry-run - Do not execute commands. Default: '${dry_run}'." >&2 + echo " -h --help - Show this help and exit." >&2 + echo " -r --root-dir - Root of file system. Default: '${root_dir}'." >&2 + echo " -s --start-dir - Top of directory tree to convert. Default: '${start_dir}'." >&2 + echo " -v --verbose - Verbose execution." >&2 + eval "${old_xtrace}" +} + +process_opts() { + local short_opts="adhr:s:v" + local long_opts="absolute,dry-run,help,root-dir:,start-dir:,verbose" + + local opts + opts=$(getopt --options ${short_opts} --long ${long_opts} -n "${script_name}" -- "$@") + + eval set -- "${opts}" + + while true ; do + #echo "${FUNCNAME[0]}: @${1}@ @${2}@" + case "${1}" in + -a | --absolute) + absolute=1 + shift + ;; + -d | --dry-run) + dry_run=1 + shift + ;; + -h | --help) + usage=1 + shift + ;; + -r | --root-dir) + root_dir="${2}" + shift 2 + ;; + -s | --start-dir) + start_dir="${2}" + shift 2 + ;; + -v | --verbose) + set -x + verbose=1 + shift + ;; + --) + shift + break + ;; + *) + echo "${script_name}: ERROR: Internal opts: '${@}'" >&2 + exit 1 + ;; + esac + done +} + +on_exit() { + local result=${1} + + set +x + echo "${script_name}: Done: ${result}" >&2 +} + + +#=============================================================================== +# program start +#=============================================================================== +export PS4='\[\e[0;33m\]+ ${BASH_SOURCE##*/}:${LINENO}:(${FUNCNAME[0]:-"?"}):\[\e[0m\] ' +set -e + +script_name="${0##*/}" +trap "on_exit 'failed.'" EXIT + +SCRIPTS_TOP=${SCRIPTS_TOP:-"$(cd "${BASH_SOURCE%/*}" && pwd)"} +source ${SCRIPTS_TOP}/lib/util.sh + +process_opts "${@}" + +if [[ ${usage} ]]; then + usage + trap - EXIT + exit 0 +fi + +check_opt 'root-dir' "${root_dir}" +check_directory "${root_dir}" "" "usage" +root_dir="$(realpath ${root_dir})" + +check_opt 'start-dir' "${start_dir}" +check_directory "${start_dir}" "" "usage" +start_dir="$(realpath ${start_dir})" + +links=$(find "${start_dir}" -xtype l) + +for link in ${links}; do + orig_target="$(realpath -m ${link})" + + if [[ "${orig_target:0:1}" != "/" ]]; then + echo "${script_name}: INFO: Not an absolute path: ${link} -> ${orig_target}" >&2 + continue + fi + #echo "${link} -> ${orig_target}" >&2 + + rel_target="$(relative_path ${link} ${orig_target} ${root_dir})" + abs_target="${root_dir}${orig_target}" + + if [[ ${verbose} || ${dry_run} ]]; then + comb_target=${link%/*}/${rel_target} + resolved_target="$(realpath -m ${comb_target})" + + echo "${link}" >&2 + echo " original: ${orig_target}" >&2 + echo " relative: ${rel_target}" >&2 + echo " absolute: ${abs_target}" >&2 + echo " resolved: ${resolved_target}" >&2 + fi + + if [[ ! ${dry_run} ]]; then + if [[ ${absolute} ]]; then + ln -sf "${abs_target}" "${link}" + else + ln -sf "${rel_target}" "${link}" + fi + ls -l "${link}" | cut -d ' ' -f 10- + fi +done + +trap "on_exit 'Success.'" EXIT diff --git a/scripts/rootfs-plugin/alpine.sh b/scripts/rootfs-plugin/alpine.sh new file mode 100644 index 00000000..735c683e --- /dev/null +++ b/scripts/rootfs-plugin/alpine.sh @@ -0,0 +1,216 @@ +# Alpine linux plug-in routines for build-rootfs.sh. + +download_minirootfs() { + local download_dir=${1} + local -n _download_minirootfs__archive_file=${3} + + unset _download_minirootfs__archive_file + + case "${target_arch}" in + amd64) alpine_arch="x86_64" ;; + arm64) alpine_arch="aarch64" ;; + *) + echo "${script_name}: ERROR: Unsupported target arch '${target_arch}'." >&2 + exit 1 + ;; + esac + local base_url="${alpine_os_mirror}/${alpine_arch}" + + mkdir -p ${download_dir} + pushd ${download_dir} + + local releases_yaml="latest-releases.yaml" + wget "${base_url}/${releases_yaml}" + + local latest + latest="$(egrep --only-matching "file: alpine-minirootfs-[0-9.]*-${alpine_arch}.tar.gz" ${releases_yaml})" + if [[ ! ${latest} ]]; then + echo "${script_name}: ERROR: Bad releases file '${releases_yaml}'." >&2 + cat ${releases_yaml} + exit 1 + fi + latest=${latest##* } + wget "${base_url}/${latest}" + + popd + echo "${script_name}: INFO: Download '${latest}'." >&2 + _download_minirootfs__archive_file="${download_dir}/${latest}" +} + +extract_minirootfs() { + local archive=${1} + local out_dir=${2} + + mkdir -p ${out_dir} + tar -C ${out_dir} -xf ${archive} +} + +bootstrap_rootfs() { + local bootstrap_dir=${1} + + local download_dir="${tmp_dir}/downloads" + local archive_file + + ${sudo} rm -rf ${bootstrap_dir} + + download_minirootfs ${download_dir} ${alpine_os_mirror} archive_file + extract_minirootfs ${archive_file} ${bootstrap_dir} + + rm -rf ${download_dir} + + setup_resolv_conf ${bootstrap_dir} + + enter_chroot ${bootstrap_dir} " + set -e + apk update + apk upgrade + apk add openrc \ + busybox-initscripts \ + dropbear \ + dropbear-scp \ + haveged \ + net-tools \ + strace + cat /etc/os-release + apk info | sort + " + + ${sudo} ln -s /etc/init.d/{hwclock,modules,sysctl,hostname,bootmisc,syslog} \ + ${bootstrap_dir}/etc/runlevels/boot/ + ${sudo} ln -s /etc/init.d/{devfs,dmesg,mdev,hwdrivers} \ + ${bootstrap_dir}/etc/runlevels/sysinit/ + ${sudo} ln -s /etc/init.d/{networking} \ + ${bootstrap_dir}/etc/runlevels/default/ + ${sudo} ln -s /etc/init.d/{mount-ro,killprocs,savecache} \ + ${bootstrap_dir}/etc/runlevels/shutdown/ + + ${sudo} sed --in-place 's/^net.ipv4.tcp_syncookies/# net.ipv4.tcp_syncookies/' \ + ${bootstrap_dir}/etc/sysctl.d/00-alpine.conf + ${sudo} sed --in-place 's/^kernel.panic/# kernel.panic/' \ + ${bootstrap_dir}/etc/sysctl.d/00-alpine.conf +} + +setup_network() { + local rootfs=${1} + + setup_network_ifupdown ${rootfs} +} + +rootfs_cleanup() { + local rootfs=${1} + + #${sudo} rm -rf ${rootfs}/var/cache/apk +} + +setup_packages() { + local rootfs=${1} + shift 1 + local packages="${@//,/ }" + + enter_chroot ${rootfs} " + set -e + apk add ${packages} + apk add efivar-libs --repository http://dl-3.alpinelinux.org/alpine/edge/community --allow-untrusted + apk add efibootmgr --repository http://dl-3.alpinelinux.org/alpine/edge/community --allow-untrusted + apk info | sort + " + + ${sudo} ln -s /etc/init.d/{haveged,dropbear} \ + ${rootfs}/etc/runlevels/sysinit/ + + # for openrc debugging + echo 'rc_logger="YES"' | sudo_append ${rootfs}/etc/rc.conf + echo 'rc_verbose="YES"' | sudo_append ${rootfs}/etc/rc.conf +} + +setup_initrd_boot() { + local rootfs=${1} + + ln -s sbin/init ${rootfs}/init +} + +setup_login() { + local rootfs=${1} + local pw=${2} + + setup_password ${rootfs} ${pw} + + ${sudo} sed --in-place \ + 's|/sbin/getty|/sbin/getty -n -l /bin/sh|g' \ + ${rootfs}/etc/inittab + + ${sudo} sed --in-place \ + 's|#ttyS0|ttyS0|g' \ + ${rootfs}/etc/inittab + + egrep 'ttyS0' ${rootfs}/etc/inittab | sed 's|ttyS0|ttyAMA0|g' | sudo_append ${rootfs}/etc/inittab +} + +setup_sshd() { + local rootfs=${1} + local srv_key=${2} + + enter_chroot ${rootfs} " + set -e + mkdir -p /etc/dropbear/ + /usr/bin/dropbearkey -t rsa -f /etc/dropbear/dropbear_rsa_host_key + /usr/bin/dropbearkey -t dss -f /etc/dropbear/dropbear_dss_host_key + /usr/bin/dropbearkey -t ecdsa -f /etc/dropbear/dropbear_ecdsa_host_key + " + + #echo "${script_name}: USER=@$(id --user --real --name)@" >&2 + ${sudo} cp -f "${rootfs}/etc/dropbear/dropbear_rsa_host_key" ${srv_key} + ${sudo} chown $(id --user --real --name): ${srv_key} + + #echo 'DROPBEAR_OPTS=""' | sudo_write ${rootfs}/etc/conf.d/dropbear +} + +setup_relay_client() { + local rootfs=${1} + + local tdd_script="/usr/sbin/tdd-relay-client.sh" + local tdd_service="/etc/init.d/tdd-relay-client" + local tdd_log="/var/log/tdd-relay-client.log" + + write_tdd_client_script ${rootfs}${tdd_script} + + sudo_write "${rootfs}/${tdd_service}" <&2 + exit 1 + ;; + esac + + (${sudo} debootstrap --foreign --arch ${debian_arch} --no-check-gpg \ + ${debootstrap_extra} \ + ${debian_os_release} ${rootfs} ${debian_os_mirror}) + + debug_check "${FUNCNAME[0]}:${LINENO}" + + copy_qemu_static ${rootfs} + + ${sudo} mount -l -t proc + ${sudo} ls -la ${rootfs} + ${sudo} find ${rootfs} -type l -exec ls -la {} \; | egrep ' -> /' + ${sudo} rm -f ${rootfs}/proc + ${sudo} mkdir -p ${rootfs}/proc + ${sudo} mount -t proc -o nosuid,nodev,noexec /proc ${rootfs}/proc + ${sudo} mount -l -t proc + + ${sudo} LANG=C.UTF-8 chroot ${rootfs} /bin/sh -x <&2 + exit 1 + fi + + ${sudo} cp -f ${rootfs}/etc/ssh/ssh_host_rsa_key ${srv_key} + echo "${script_name}: USER=@$(id --user --real --name)@" >&2 + #printenv + #${sudo} chown $(id --user --real --name): ${srv_key} +} + +setup_relay_client() { + local rootfs=${1} + + local tdd_script="/bin/tdd-relay-client.sh" + local tdd_service="tdd-relay-client.service" + + write_tdd_client_script "${rootfs}${tdd_script}" + + sudo_write "${rootfs}/etc/systemd/system/${tdd_service}" <&2 + set +e + ${sudo} true + mount + ${sudo} ls -l /var/run/sudo/ts + set -e + echo "debug_check: (${info}) ^^^^" >&2 +} + +bootstrap_rootfs() { + local rootfs=${1} + + debug_check "${FUNCNAME[0]}:${LINENO}" + + case ${target_arch} in + amd64) + debian_arch="amd64" + debian_os_release=${debian_os_release:-"buster"} + debian_os_mirror=${debian_os_mirror:-"http://ftp.us.debian.org/debian"} + ;; + arm64) + debian_arch="arm64" + debian_os_release=${debian_os_release:-"buster"} + debian_os_mirror=${debian_os_mirror:-"http://ftp.us.debian.org/debian"} + ;; + ppc32|ppc64) + debian_arch="powerpc" + debian_os_release=${debian_os_release:-"unstable"} + debian_os_mirror=${debian_os_mirror:-"http://ftp.ports.debian.org/debian-ports"} + debootstrap_extra="--include=debian-ports-archive-keyring --exclude=powerpc-ibm-utils,powerpc-utils" + ;; + *) + echo "${script_name}: ERROR: Unsupported target-arch '${target_arch}'." >&2 + exit 1 + ;; + esac + + (${sudo} debootstrap --foreign --arch ${debian_arch} --no-check-gpg \ + ${debootstrap_extra} \ + ${debian_os_release} ${rootfs} ${debian_os_mirror}) + + debug_check "${FUNCNAME[0]}:${LINENO}" + + copy_qemu_static ${rootfs} + + ${sudo} mount -l -t proc + ${sudo} ls -la ${rootfs} + ${sudo} find ${rootfs} -type l -exec ls -la {} \; | egrep ' -> /' + ${sudo} rm -f ${rootfs}/proc + ${sudo} mkdir -p ${rootfs}/proc + ${sudo} mount -t proc -o nosuid,nodev,noexec /proc ${rootfs}/proc + ${sudo} mount -l -t proc + + ${sudo} LANG=C.UTF-8 chroot ${rootfs} /bin/sh -x <&2 + exit 1 + fi + + ${sudo} cp -f ${rootfs}/etc/ssh/ssh_host_rsa_key ${srv_key} + echo "${script_name}: USER=@$(id --user --real --name)@" >&2 + #printenv + #${sudo} chown $(id --user --real --name): ${srv_key} +} + +setup_relay_client() { + local rootfs=${1} + + local tdd_script="/bin/tdd-relay-client.sh" + local tdd_service="tdd-relay-client.service" + + write_tdd_client_script "${rootfs}${tdd_script}" + + sudo_write "${rootfs}/etc/systemd/system/${tdd_service}" <&2 + exit 1 +fi + +set -x + +rpm2cpio - | cpio --extract --unconditional --make-directories --preserve-modification-time diff --git a/scripts/run-builder.sh b/scripts/run-builder.sh new file mode 100755 index 00000000..a70db351 --- /dev/null +++ b/scripts/run-builder.sh @@ -0,0 +1,211 @@ +#!/usr/bin/env bash + +usage () { + local old_xtrace + old_xtrace="$(shopt -po xtrace || :)" + set +o xtrace + echo "${script_name} - Runs a tdd container. If no command is provided, runs an interactive container." >&2 + echo "Usage: ${script_name} [flags] -- [command] [args]" >&2 + echo "Option flags:" >&2 + echo " -a --docker-args - Args for docker run. Default: '${docker_args}'" >&2 + echo " -h --help - Show this help and exit." >&2 + echo " -n --container-name - Container name. Default: '${container_name}'." >&2 + echo " -t --tag - Print Docker tag to stdout and exit." >&2 + echo " -r --as-root - Run as root user." >&2 + echo " -v --verbose - Verbose execution." >&2 + echo "Args:" >&2 + echo " command - Default: '${user_cmd}'" >&2 + echo "Environment:" >&2 + echo " DOCKER_TAG - Default: '${DOCKER_TAG}'" >&2 + echo " TDD_CHECKOUT_SERVER - Default: '${TDD_CHECKOUT_SERVER}'" >&2 + echo " TDD_CHECKOUT_PORT - Default: '${TDD_CHECKOUT_PORT}'" >&2 + echo " TDD_RELAY_SERVER - Default: '${TDD_RELAY_SERVER}'" >&2 + echo " TDD_RELAY_PORT - Default: '${TDD_RELAY_PORT}'" >&2 + echo " TDD_TFTP_SERVER - Default: '${TDD_TFTP_SERVER}'" >&2 + echo " TDD_TFTP_USER - Default: '${TDD_TFTP_USER}'" >&2 + echo " TDD_TFTP_ROOT - Default: '${TDD_TFTP_ROOT}'" >&2 + echo "Examples:" >&2 + echo " ${script_name} -v" >&2 + eval "${old_xtrace}" +} + +process_opts() { + local short_opts="a:hn:trv" + local long_opts="docker-args:,help,container-name:,tag,as-root,verbose" + + local opts + opts=$(getopt --options ${short_opts} --long ${long_opts} -n "${script_name}" -- "$@") + + if [ $? != 0 ]; then + echo "${script_name}: ERROR: Internal getopt" >&2 + exit 1 + fi + + eval set -- "${opts}" + + while true ; do + case "${1}" in + -a | --docker-args) + docker_args="${2}" + shift 2 + ;; + -h | --help) + usage=1 + shift + ;; + -n | --container-name) + container_name="${2}" + shift 2 + ;; + -t | --tag) + tag=1 + shift + ;; + -r | --as-root) + as_root=1 + shift + ;; + -v | --verbose) + set -x + verbose=1 + shift + ;; + --) + shift + user_cmd="${@}" + break + ;; + *) + echo "${script_name}: ERROR: Internal opts: '${@}'" >&2 + exit 1 + ;; + esac + done +} + +on_exit() { + local result=${1} + + echo "${script_name}: ${result}" >&2 +} + +add_server() { + local server=${1} + local addr + + if ! is_ip_addr ${server}; then + find_addr addr "/etc/hosts" ${server} + docker_extra_args+=" --add-host ${server}:${addr}" + fi +} + +#=============================================================================== +export PS4='\[\e[0;33m\]+ ${BASH_SOURCE##*/}:${LINENO}:(${FUNCNAME[0]:-"?"}):\[\e[0m\] ' +script_name="${0##*/}" + +if [ ${TDD_BUILDER} ]; then + echo "${script_name}: ERROR: Already in tdd-builder." >&2 + exit 1 +fi + +SCRIPTS_TOP=${SCRIPTS_TOP:-"$( cd "${BASH_SOURCE%/*}" && pwd )"} +source ${SCRIPTS_TOP}/lib/util.sh + +trap "on_exit 'Done, failed.'" EXIT +set -e + +DOCKER_TOP=${DOCKER_TOP:-"$( cd "${SCRIPTS_TOP}/../docker" && pwd )"} +DOCKER_TAG=${DOCKER_TAG:-"$("${DOCKER_TOP}/builder/build-builder.sh" --tag)"} + +process_opts "${@}" + +docker_extra_args="" +container_name=${container_name:-"tdd-builder"} +user_cmd=${user_cmd:-"/bin/bash"} + +if [[ ${usage} ]]; then + usage + trap - EXIT + exit 0 +fi + +if [[ ${tag} ]]; then + echo "${DOCKER_TAG}" + exit 0 +fi + +if [[ ! ${TDD_CHECKOUT_SERVER} ]]; then + echo "${script_name}: ERROR: TDD_CHECKOUT_SERVER not defined." >&2 + usage + exit 1 +fi +if [[ ! ${TDD_RELAY_SERVER} ]]; then + echo "${script_name}: ERROR: TDD_RELAY_SERVER not defined." >&2 + usage + exit 1 +fi +if [[ ! ${TDD_TFTP_SERVER} ]]; then + echo "${script_name}: ERROR: TDD_TFTP_SERVER not defined." >&2 + usage + exit 1 +fi + +if [[ ! ${SSH_AUTH_SOCK} ]]; then + echo "${script_name}: ERROR: SSH_AUTH_SOCK not defined." >&2 +fi + +if ! echo "${docker_args}" | grep -q ' -w '; then + docker_extra_args+=" -v $(pwd):/work -w /work" +fi + +if [[ ! ${as_root} ]]; then + docker_extra_args+=" \ + -u $(id --user --real):$(id --group --real) \ + -v /etc/group:/etc/group:ro \ + -v /etc/passwd:/etc/passwd:ro \ + -v /etc/shadow:/etc/shadow:ro \ + -v /dev:/dev" +fi + +add_server ${TDD_CHECKOUT_SERVER} +add_server ${TDD_RELAY_SERVER} +add_server ${TDD_TFTP_SERVER} + +echo "${script_name}: ${TDD_TARGET_BMC_LIST} = '${TDD_TARGET_BMC_LIST}'." >&2 + +for s in ${TDD_TARGET_BMC_LIST}; do + add_server ${s} +done + +if egrep '127.0.0.53' /etc/resolv.conf; then + docker_extra_args+=" --dns 127.0.0.53" +fi + +eval "docker run \ + --rm \ + -it \ + --device /dev/kvm \ + --privileged \ + --network host \ + --name ${container_name} \ + --hostname ${container_name} \ + --add-host ${container_name}:127.0.0.1 \ + -v ${SSH_AUTH_SOCK}:/ssh-agent -e SSH_AUTH_SOCK=/ssh-agent \ + --group-add $(stat --format=%g /var/run/docker.sock) \ + --group-add $(stat --format=%g /dev/kvm) \ + --group-add sudo \ + -v /var/run/docker.sock:/var/run/docker.sock \ + -e TDD_CHECKOUT_SERVER \ + -e TDD_CHECKOUT_PORT \ + -e TDD_RELAY_SERVER \ + -e TDD_RELAY_PORT \ + -e TDD_TFTP_SERVER \ + -e TDD_TFTP_USER \ + -e TDD_TFTP_ROOT \ + ${docker_extra_args} \ + ${docker_args} \ + ${DOCKER_TAG} \ + ${user_cmd}" + +trap - EXIT +on_exit 'Done, success.' diff --git a/scripts/run-fedora-qemu-tests.sh b/scripts/run-fedora-qemu-tests.sh new file mode 100755 index 00000000..0d9124e2 --- /dev/null +++ b/scripts/run-fedora-qemu-tests.sh @@ -0,0 +1,300 @@ +#!/usr/bin/env bash + +usage() { + local old_xtrace + old_xtrace="$(shopt -po xtrace || :)" + set +o xtrace + echo "${script_name} - Run Fedora install test in QEMU." >&2 + echo "Usage: ${script_name} [flags]" >&2 + echo "Option flags:" >&2 + echo " -a --arch - Target architecture. Default: '${target_arch}'." >&2 + echo " -c --kernel-cmd - Kernel command line options. Default: '${kernel_cmd}'." >&2 + echo " -f --hostfwd-offset - QEMU hostfwd port offset. Default: '${hostfwd_offset}'." >&2 + echo " -h --help - Show this help and exit." >&2 + echo " -o --out-file - stdout, stderr redirection file. Default: '${out_file}'." >&2 + echo " -s --systemd-debug - Run systemd with debug options. Default: '${systemd_debug}'." >&2 + echo " -v --verbose - Verbose execution." >&2 + echo " --hda - QEMU IDE hard disk image hda. Default: '${hda}'." >&2 + echo " --initrd - Initrd image. Default: '${initrd}'." >&2 + echo " --kickstart - Fedora kickstart file. Default: '${kickstart}'." >&2 + echo " --kernel - Kernel image. Default: '${kernel}'." >&2 + echo " --result-file - Result file. Default: '${result_file}'." >&2 + echo " --ssh-key - SSH private key file. Default: '${ssh_key}'." >&2 + eval "${old_xtrace}" +} + +process_opts() { + local short_opts="a:c:f:ho:sv" + local long_opts="arch:,kernel-cmd:,hostfwd-offset:,help,out-file:,systemd-debug,\ +verbose,hda:,initrd:,kickstart:,kernel:,result-file:,ssh-key:" + + local opts + opts=$(getopt --options ${short_opts} --long ${long_opts} -n "${script_name}" -- "$@") + + if [ $? != 0 ]; then + echo "${script_name}: ERROR: Internal getopt" >&2 + exit 1 + fi + + eval set -- "${opts}" + + while true ; do + case "${1}" in + -a | --arch) + target_arch=$(get_arch "${2}") + shift 2 + ;; + -c | --kernel-cmd) + kernel_cmd="${2}" + shift 2 + ;; + -f | --hostfwd-offset) + hostfwd_offset="${2}" + shift 2 + ;; + -h | --help) + usage=1 + shift + ;; + -o | --out-file) + out_file="${2}" + shift 2 + ;; + -s | --systemd-debug) + systemd_debug=1 + shift + ;; + -v | --verbose) + set -x + verbose=1 + shift + ;; + --hda) + hda="${2}" + shift 2 + ;; + --initrd) + initrd="${2}" + shift 2 + ;; + --kickstart) + kickstart="${2}" + shift 2 + ;; + --kernel) + kernel="${2}" + shift 2 + ;; + --result_file) + result_file="${2}" + shift 2 + ;; + --ssh-key) + ssh_key="${2}" + shift 2 + ;; + --) + shift + break + ;; + *) + echo "${script_name}: ERROR: Internal opts: '${@}'" >&2 + exit 1 + ;; + esac + done +} + +on_exit() { + local result=${1} + + local old_xtrace + old_xtrace="$(shopt -po xtrace || :)" + set +o xtrace + echo '*** on_exit ***' + echo "*** result = ${result}" >&2 + echo "*** qemu_pid = ${qemu_pid}" >&2 + echo "*** up time = $(sec_to_min ${SECONDS}) min" >&2 + eval "${old_xtrace}" + + if [[ -n "${qemu_pid}" ]]; then + sudo kill ${qemu_pid} || : + wait ${qemu_pid} + qemu_pid='' + fi + + if [[ -d ${ks_mnt} ]]; then + sudo umount ${ks_mnt} || : + rm -rf ${ks_mnt} || : + ks_mnt='' + fi + + if [[ -f "${ks_img}" ]]; then + rm -f ${ks_img} + ks_img='' + fi + + if [[ -d ${tmp_dir} ]]; then + ${sudo} rm -rf ${tmp_dir} + fi + + echo "${script_name}: ${result}" >&2 +} + +make_kickstart_img() { + ks_img="$(mktemp --tmpdir tdd-ks-img.XXXX)" + ks_mnt="$(mktemp --tmpdir --directory tdd-ks-mnt.XXXX)" + + local ks_file + ks_file="${ks_mnt}/${kickstart##*/}" + + dd if=/dev/zero of=${ks_img} bs=1M count=1 + mkfs.vfat ${ks_img} + + sudo mount -o rw,uid=$(id -u),gid=$(id -g) ${ks_img} ${ks_mnt} + + cp -v ${kickstart} ${ks_file} + + if [[ -n "${ssh_key}" ]]; then + sed --in-place "s|@@ssh-keys@@|$(cat ${ssh_key}.pub)|" ${ks_file} + fi + + echo '' >> ${result_file} + echo '---------' >> ${result_file} + echo 'kickstart' >> ${result_file} + echo '---------' >> ${result_file} + cat ${ks_file} >> ${result_file} + echo '---------' >> ${result_file} + + sudo umount ${ks_mnt} + rmdir ${ks_mnt} + ks_mnt='' +} + +start_qemu_user_networking() { + ssh_fwd=$(( ${hostfwd_offset} + 22 )) + + echo "${script_name}: ssh_fwd port = ${ssh_fwd}" >&2 + + ${SCRIPTS_TOP}/start-qemu.sh \ + --arch="${target_arch}" \ + --kernel-cmd="${kernel_cmd}" \ + --hostfwd-offset="${hostfwd_offset}" \ + --initrd="${initrd}" \ + --kernel="${kernel}" \ + --hda="${hda}" \ + --hdb="${ks_img}" \ + --out-file="${out_file}" \ + --pid-file="${qemu_pid_file}" \ + --verbose \ + ${start_extra_args} \ + "${out_file}.start" & +} + +#=============================================================================== +# program start +#=============================================================================== +script_name="${0##*/}" +SCRIPTS_TOP=${SCRIPTS_TOP:-"$( cd "${BASH_SOURCE%/*}" && pwd )"} + +trap "on_exit 'failed.'" EXIT +set -e + +host_arch=$(get_arch "$(uname -m)") + +source ${SCRIPTS_TOP}/lib/util.sh +source ${SCRIPTS_TOP}/lib/relay.sh + +process_opts "${@}" + +target_arch=${target_arch:-"${host_arch}"} +hostfwd_offset=${hostfwd_offset:-"20000"} + +if [[ -n "${usage}" ]]; then + usage + exit 0 +fi + +if [[ "${target_arch}" != "arm64" ]]; then + echo "${script_name}: ERROR: Unsupported target arch '${target_arch}'." >&2 + exit 1 +fi + +check_opt 'kernel' ${kernel} +check_file "${kernel}" + +check_opt 'initrd' ${initrd} +check_file "${initrd}" + +check_opt 'hda' ${hda} +check_file "${hda}" + +check_opt 'kickstart' ${kickstart} +check_file "${kickstart}" + +inst_repo="$(egrep '^url[[:space:]]*--url=' ${kickstart} | cut -d '=' -f 2 | sed 's/"//g')" +kernel_cmd="inst.text inst.repo=${inst_repo} inst.ks=hd:vdb:${kickstart##*/} ${kernel_cmd}" + +if [[ ! ${out_file} ]]; then + out_file="${script_name}-out.txt" +fi + +if [[ ! ${result_file} ]]; then + result_file="${script_name}-result.txt" +fi + +if [[ ${ssh_key} ]]; then + check_file ${ssh_key} " ssh-key" "usage" +fi + +start_extra_args='' + +if [[ ${systemd_debug} ]]; then + start_extra_args+=' --systemd-debug' +fi + +rm -f ${out_file} ${out_file}.start ${result_file} + +tmp_dir="$(mktemp --tmpdir --directory ${script_name}.XXXX)" + +echo '--------' >> ${result_file} +echo 'printenv' >> ${result_file} +echo '--------' >> ${result_file} +printenv >> ${result_file} +echo '---------' >> ${result_file} + +make_kickstart_img + +qemu_pid_file=${tmp_dir}/qemu-pid + +SECONDS=0 +start_qemu_user_networking + +echo "${script_name}: Waiting for QEMU startup..." >&2 +sleep 10s + +echo '---- start-qemu start ----' >&2 +cat ${out_file}.start >&2 +echo '---- start-qemu end ----' >&2 + +ps aux + +if [[ ! -f ${qemu_pid_file} ]]; then + echo "${script_name}: ERROR: QEMU seems to have quit early (pid file)." >&2 + exit 1 +fi + +qemu_pid=$(cat ${qemu_pid_file}) + +if ! kill -0 ${qemu_pid} &> /dev/null; then + echo "${script_name}: ERROR: QEMU seems to have quit early (pid)." >&2 + exit 1 +fi + +echo "${script_name}: Waiting for QEMU exit..." >&2 +wait_pid ${qemu_pid} 180 + +echo "${script_name}: Boot time: $(sec_to_min ${SECONDS}) min" >&2 + +trap - EXIT +on_exit 'Done, success.' diff --git a/scripts/run-kernel-qemu-tests.sh b/scripts/run-kernel-qemu-tests.sh new file mode 100755 index 00000000..24b3e7aa --- /dev/null +++ b/scripts/run-kernel-qemu-tests.sh @@ -0,0 +1,394 @@ +#!/usr/bin/env bash + +set -e + +script_name="${0##*/}" + +SCRIPTS_TOP=${SCRIPTS_TOP:-"$( cd "${BASH_SOURCE%/*}" && pwd )"} + +source ${SCRIPTS_TOP}/lib/util.sh +source ${SCRIPTS_TOP}/lib/relay.sh + +usage() { + local old_xtrace + old_xtrace="$(shopt -po xtrace || :)" + set +o xtrace + echo "${script_name} - Run Linux kernel tests in QEMU." >&2 + echo "Usage: ${script_name} [flags]" >&2 + echo "Option flags:" >&2 + echo " -a --arch - Target architecture. Default: '${target_arch}'." >&2 + echo " -c --kernel-cmd - Kernel command line options. Default: '${kernel_cmd}'." >&2 + echo " -f --hostfwd-offset - QEMU hostfwd port offset. Default: '${hostfwd_offset}'." >&2 + echo " -h --help - Show this help and exit." >&2 + echo " -i --initrd - Initrd image. Default: '${initrd}'." >&2 + echo " -k --kernel - Kernel image. Default: '${kernel}'." >&2 + echo " -o --out-file - stdout, stderr redirection file. Default: '${out_file}'." >&2 + echo " -s --systemd-debug - Run systemd with debug options. Default: '${systemd_debug}'." >&2 + echo " -v --verbose - Verbose execution." >&2 + echo " --relay-server - Relay server host[:port]. Default: '${relay_server}'." >&2 + echo " --result-file - Result file. Default: '${result_file}'." >&2 + echo " --ssh-login-key - SSH login private key file. Default: '${ssh_login_key}'." >&2 + echo " --test-name - Tests name. Default: '${test_name}'." >&2 + echo " --tests-dir - Test directory. Default: '${tests_dir}'." >&2 + #echo " --qemu-tap - EXPERIMENTAL -- Use QEMU tap networking. Default: '${qemu_tap}'." >&2 + eval "${old_xtrace}" +} + +process_opts() { + local short_opts="a:c:d:f:hi:k:o:r:sv" + local long_opts="arch:,kernel-cmd:,ether-mac:,hostfwd-offset:,help,initrd:,\ +kernel:,out-file:,systemd-debug,verbose,\ +relay-server:,result-file:,ssh-login-key:,test-name:,tests-dir:,qemu-tap" + + local opts + opts=$(getopt --options ${short_opts} --long ${long_opts} -n "${script_name}" -- "$@") + + eval set -- "${opts}" + + while true ; do + case "${1}" in + -a | --arch) + target_arch=$(get_arch "${2}") + shift 2 + ;; + -c | --kernel-cmd) + kernel_cmd="${2}" + shift 2 + ;; + -f | --hostfwd-offset) + hostfwd_offset="${2}" + shift 2 + ;; + -h | --help) + usage=1 + shift + ;; + -i | --initrd) + initrd="${2}" + shift 2 + ;; + -k | --kernel) + kernel="${2}" + shift 2 + ;; + -o | --out-file) + out_file="${2}" + shift 2 + ;; + -s | --systemd-debug) + systemd_debug=1 + shift + ;; + -v | --verbose) + set -x + verbose=1 + shift + ;; + --relay-server) + relay_server="${2}" + shift 2 + ;; + --result-file) + result_file="${2}" + shift 2 + ;; + --ssh-login-key) + ssh_login_key="${2}" + shift 2 + ;; + --test-name) + test_name="${2}" + shift 2 + ;; + --tests-dir) + tests_dir="${2}" + shift 2 + ;; + --qemu-tap) + qemu_tap=1 + shift + ;; + --) + shift + break + ;; + *) + echo "${script_name}: ERROR: Internal opts: '${@}'" >&2 + exit 1 + ;; + esac + done +} + +on_exit() { + local result=${1} + + local old_xtrace + old_xtrace="$(shopt -po xtrace || :)" + set +o xtrace + echo '*** on_exit ***' + echo "*** result = ${result}" >&2 + echo "*** qemu_pid = ${qemu_pid}" >&2 + echo "*** up time = $(sec_to_min ${SECONDS}) min" >&2 + eval "${old_xtrace}" + + if [[ -n "${qemu_pid}" ]]; then + ${sudo} kill ${qemu_pid} || : + wait ${qemu_pid} + qemu_pid='' + fi + + rm -f ${test_kernel} + + if [[ -d ${tmp_dir} ]]; then + ${sudo} rm -rf ${tmp_dir} + fi + + echo "${script_name}: ${result}" >&2 +} + +start_qemu_user_networking() { + ssh_fwd=$(( ${hostfwd_offset} + 22 )) + + echo "${script_name}: ssh_fwd port = ${ssh_fwd}" >&2 + + ${SCRIPTS_TOP}/start-qemu.sh \ + --arch="${target_arch}" \ + --kernel-cmd="${kernel_cmd}" \ + --hostfwd-offset="${hostfwd_offset}" \ + --initrd="${initrd}" \ + --kernel="${test_kernel}" \ + --out-file="${out_file}" \ + --pid-file="${qemu_pid_file}" \ + --verbose \ + ${start_qemu_extra_args} \ + "${out_file}.start" & + ps aux +} + +start_qemu_tap_networking() { + local mac=${2} + + local bridge="br0" + local host_eth="eth0" + local qemu_tap="qemu0" + local my_addr=$(my_addr) + local my_net="${my_addr%.[0-9]*}" + + echo "${script_name}: my_addr = '${my_addr}'" >&2 + echo "${script_name}: my_net = '${my_net}'" >&2 + + local bridge_addr + bridge_addr="$(ip address show dev ${host_eth} \ + | egrep -o 'inet .*' | cut -f 2 -d ' ')" + + echo "${script_name}: bridge_addr='${bridge_addr}'" >&2 + + # Create bridge. + ${sudo} ip link add ${bridge} type bridge + ${sudo} ip link set ${bridge} down + ${sudo} ip addr flush dev ${bridge} + ${sudo} ip addr add dev ${bridge} ${bridge_addr} + ${sudo} ip link set ${bridge} up + bridge link + + # Add host interface to bridge. + ${sudo} ip link set ${host_eth} down + ${sudo} ip addr flush dev ${host_eth} + ${sudo} ip link set ${host_eth} up + ${sudo} ip link set ${host_eth} master ${bridge} + bridge link + + sudo_write /etc/default/isc-dhcp-server <"${out_file}" & + + qemu_pid="${!}" + + # Add qemu tap interface to bridge. + ${sudo} ip link set ${qemu_tap} down + ${sudo} ip addr flush dev ${qemu_tap} + ${sudo} ip link set ${qemu_tap} up + ${sudo} ip link set ${qemu_tap} master ${bridge} + bridge link +} + +#=============================================================================== +# program start +#=============================================================================== +sudo="sudo -S" + +trap "on_exit 'failed.'" EXIT + +process_opts "${@}" + +test_machine='qemu' + +host_arch=$(get_arch "$(uname -m)") +target_arch=${target_arch:-"${host_arch}"} +hostfwd_offset=${hostfwd_offset:-"20000"} +out_file=${out_file:-"${test_machine}.out"} +result_file=${result_file:-"${test_machine}-result.txt"} + +qemu_startup_timeout=${qemu_startup_timeout:-10} +qemu_exit_timeout=${qemu_exit_timeout:-240} +relay_get_timeout=${relay_get_timeout:-240} + +relay_triple=$(relay_init_triple ${relay_server}) +relay_token=$(relay_triple_to_token ${relay_triple}) + +if [[ -n "${usage}" ]]; then + usage + exit 0 +fi + +case ${target_arch} in +arm64|ppc32|ppc64) + ;; +*) + echo "${script_name}: ERROR: Unsupported target arch '${target_arch}'." >&2 + exit 1 + ;; +esac + + +check_opt 'kernel' ${kernel} +check_file "${kernel}" + +check_opt 'initrd' ${initrd} +check_file "${initrd}" + +check_opt 'ssh-login-key' ${ssh_login_key} +check_file "${ssh_login_key}" + +check_opt 'test-name' ${test_name} + +check_opt 'tests-dir' ${tests_dir} +check_directory "${tests_dir}" + +if [[ ${systemd_debug} ]]; then + start_qemu_extra_args+=" --systemd-debug" +fi + +${SCRIPTS_TOP}/set-relay-triple.sh \ + --kernel=${kernel} \ + --relay-triple="${relay_triple}" \ + --verbose + +test_kernel=${kernel}.${relay_token} + +mkdir -p ${out_file%/*} +rm -f ${out_file} ${out_file}.start ${result_file} + +tmp_dir="$(mktemp --tmpdir --directory ${script_name}.XXXX)" + +echo '--------' >> ${result_file} +echo 'printenv' >> ${result_file} +echo '--------' >> ${result_file} +printenv >> ${result_file} +echo '--------' >> ${result_file} + +qemu_hda=${tmp_dir}/qemu-hda +qemu-img create -f qcow2 ${qemu_hda} 8G +start_qemu_extra_args+=" --hda=${qemu_hda}" + +qemu_hdb=${tmp_dir}/qemu-hdb +qemu-img create -f qcow2 ${qemu_hdb} 8G +start_qemu_extra_args+=" --hdb=${qemu_hdb}" + +qemu_hdc=${tmp_dir}/qemu-hdc +qemu-img create -f qcow2 ${qemu_hdc} 8G +start_qemu_extra_args+=" --hdc=${qemu_hdc}" + +qemu_pid_file=${tmp_dir}/qemu-pid + +SECONDS=0 +start_qemu_user_networking + +#remote_mac="10:11:12:00:00:01" +#start_qemu_tap_networking ${remote_mac} + +echo "${script_name}: Waiting for QEMU startup..." >&2 +sleep ${qemu_startup_timeout} + +echo '---- start-qemu start ----' >&2 +cat ${out_file}.start >&2 +echo '---- start-qemu end ----' >&2 + +ps aux + +if [[ ! -f ${qemu_pid_file} ]]; then + echo "${script_name}: ERROR: QEMU seems to have quit early (pid file)." >&2 + exit 1 +fi + +qemu_pid=$(cat ${qemu_pid_file}) + +if ! kill -0 ${qemu_pid} &> /dev/null; then + echo "${script_name}: ERROR: QEMU seems to have quit early (pid)." >&2 + exit 1 +fi + +relay_get ${relay_get_timeout} ${relay_triple} remote_addr + +user_remote_host="root@localhost" +user_remote_ssh_opts="-o Port=${ssh_fwd}" + +tap_remote_host="root@${remote_addr}" + +remote_host=${user_remote_host} +remote_ssh_opts=${user_remote_ssh_opts} + +# The remote host address could come from DHCP, so don't use known_hosts. +ssh_no_check="-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null" + +if [[ -f ${SCRIPTS_TOP}/test-plugin/${test_name}/${test_name}.sh ]]; then + source ${SCRIPTS_TOP}/test-plugin/${test_name}/${test_name}.sh +else + echo "${script_name}: ERROR: Test plugin '${test_name}.sh' not found." >&2 + exit 1 +fi + +run_ssh_opts="${ssh_no_check} -i ${ssh_login_key} ${remote_ssh_opts}" +test_run_${test_name/-/_} ${tests_dir} ${test_machine} ${remote_host} run_ssh_opts + +ssh ${ssh_no_check} -i ${ssh_login_key} ${remote_ssh_opts} ${remote_host} \ + '/sbin/poweroff &' + +echo "${script_name}: Waiting for QEMU exit..." >&2 +wait_pid ${qemu_pid} ${qemu_exit_timeout} + +trap - EXIT +on_exit 'Done, success.' diff --git a/scripts/run-kernel-remote-tests.sh b/scripts/run-kernel-remote-tests.sh new file mode 100755 index 00000000..3e9bbeea --- /dev/null +++ b/scripts/run-kernel-remote-tests.sh @@ -0,0 +1,342 @@ +#!/usr/bin/env bash + +set -e + +script_name="${0##*/}" + +SCRIPTS_TOP=${SCRIPTS_TOP:-"$( cd "${BASH_SOURCE%/*}" && pwd )"} + +source ${SCRIPTS_TOP}/lib/util.sh +source ${SCRIPTS_TOP}/lib/ipmi.sh +source ${SCRIPTS_TOP}/lib/relay.sh + +usage () { + local old_xtrace + old_xtrace="$(shopt -po xtrace || :)" + set +o xtrace + echo "${script_name} - Run Linux kernel tests on remote machine." >&2 + echo "Usage: ${script_name} [flags]" >&2 + echo "Option flags:" >&2 + echo " -h --help - Show this help and exit." >&2 + echo " -i --initrd - Initrd image. Default: '${initrd}'." >&2 + echo " -k --kernel - Kernel image. Default: '${kernel}'." >&2 + echo " -m --test-machine - Test machine name. Default: '${test_machine}'." >&2 + echo " -n --no-known-hosts - Do not setup known_hosts file. Default: '${no_known_hosts}'." >&2 + echo " -o --out-file - stdout, stderr redirection file. Default: '${out_file}'." >&2 + echo " -s --systemd-debug - Run systemd with debug options. Default: '${systemd_debug}'." >&2 + echo " -v --verbose - Verbose execution." >&2 + echo " --bmc-host - Test machine BMC hostname or address. Default: '${bmc_host}'." >&2 + echo " --relay-server - Relay server host[:port]. Default: '${relay_server}'." >&2 + echo " --result-file - Result file. Default: '${result_file}'." >&2 + echo " --ssh-login-key - SSH login private key file. Default: '${ssh_login_key}'." >&2 + echo " --test-name - Tests name. Default: '${test_name}'." >&2 + echo " --tests-dir - Test directory. Default: '${tests_dir}'." >&2 + echo " --tftp-triple - tftp triple. File name or 'user:server:root'. Default: '${tftp_triple}'." >&2 + eval "${old_xtrace}" +} + +process_opts() { + local short_opts="hi:k:m:no:sv" + local long_opts="help,initrd:,kernel:,test-machine:,no-known-hosts,\ +out-file:,systemd-debug,verbose,\ +bmc-host:,relay-server:,result-file:,ssh-login-key:,test-name:,tests-dir:,\ +tftp-triple:" + + opts=$(getopt --options ${short_opts} --long ${long_opts} -n "${script_name}" -- "$@") + + eval set -- "${opts}" + + while true ; do + case "${1}" in + -h | --help) + usage=1 + shift + ;; + -i | --initrd) + initrd="${2}" + shift 2 + ;; + -k | --kernel) + kernel="${2}" + shift 2 + ;; + -m | --test-machine) + test_machine="${2}" + shift 2 + ;; + -n | --no-known-hosts) + no_known_hosts=1 + shift + ;; + -o | --out-file) + out_file="${2}" + shift 2 + ;; + -s | --systemd-debug) + systemd_debug=1 + shift + ;; + -v | --verbose) + set -x + verbose=1 + shift + ;; + --bmc-host) + bmc_host="${2}" + shift 2 + ;; + --relay-server) + relay_server="${2}" + shift 2 + ;; + --result-file) + result_file="${2}" + shift 2 + ;; + --ssh-login-key) + ssh_login_key="${2}" + shift 2 + ;; + --test-name) + test_name="${2}" + shift 2 + ;; + --tests-dir) + tests_dir="${2}" + shift 2 + ;; + --tftp-triple) + tftp_triple="${2}" + shift 2 + ;; + --) + shift + break + ;; + *) + echo "${script_name}: ERROR: Internal opts: '${@}'" >&2 + exit 1 + ;; + esac + done +} + +on_exit() { + local result=${1} + local sol_pid + + set +e + + if [[ -n "${sol_pid_file}" ]]; then + sol_pid=$(cat ${sol_pid_file}) + rm -f ${sol_pid_file} + fi + + if [[ -f ${test_kernel} ]]; then + rm -f ${test_kernel} + fi + + local old_xtrace + old_xtrace="$(shopt -po xtrace || :)" + set +o xtrace + echo '*** on_exit ***' + echo "*** result = @${result}@" >&2 + echo "*** sol_pid_fil = @${sol_pid_file}@" >&2 + echo "*** sol_pid = @${sol_pid}@" >&2 + echo "*** ipmi_args = @${ipmi_args}@" >&2 + eval "${old_xtrace}" + + if [[ ${sol_pid} ]]; then + kill -0 ${sol_pid} + ${sudo} kill ${sol_pid} || : + fi + + if [[ ${ipmi_args} ]]; then + ipmitool ${ipmi_args} -I lanplus sol deactivate || : + ipmitool ${ipmi_args} -I lanplus chassis power off || : + fi + + if [[ ${sol_pid} ]]; then + wait ${sol_pid} + fi + + if [[ ${checkout_token} ]]; then + ${SCRIPTS_TOP}/checkin.sh ${checkout_token} + fi + + echo "${script_name}: ${result}" >&2 +} + +#=============================================================================== +# program start +#=============================================================================== +sudo="sudo -S" + +trap "on_exit 'failed.'" EXIT + +process_opts "${@}" + +host_arch=$(get_arch "$(uname -m)") +start_extra_args="" +out_file=${out_file:-"${test_machine}.out"} +result_file=${result_file:-"${test_machine}-result.txt"} + +relay_triple=$(relay_init_triple ${relay_server}) +relay_token=$(relay_triple_to_token ${relay_triple}) + +if [[ ! ${bmc_host} ]]; then + bmc_host="${test_machine}-bmc" + echo "${script_name}: INFO: BMC host: '${bmc_host}'" >&2 +fi + +if [[ ${usage} ]]; then + usage + exit 0 +fi + +check_opt 'test-machine' ${test_machine} + +check_opt 'kernel' ${kernel} +check_file "${kernel}" + +check_opt 'initrd' ${initrd} +check_file "${initrd}" + +check_opt 'ssh-login-key' ${ssh_login_key} +check_file "${ssh_login_key}" + +check_opt 'test-name' ${test_name} + +check_opt 'tests-dir' ${tests_dir} +check_directory "${tests_dir}" + +tmp_kernel=${kernel}.tmp +test_kernel=${kernel}.${relay_token} + +if [[ ! ${systemd_debug} ]]; then + tmp_kernel=${kernel} +else + tmp_kernel=${kernel}.tmp + + ${SCRIPTS_TOP}/set-systemd-debug.sh \ + --in-file=${kernel} \ + --out-file=${tmp_kernel} \ + --verbose +fi + +${SCRIPTS_TOP}/set-relay-triple.sh \ + --relay-triple="${relay_triple}" \ + --kernel=${tmp_kernel} \ + --out-file=${test_kernel} \ + --verbose + +if [[ "${tmp_kernel}" != ${kernel} ]]; then + rm -f ${tmp_kernel} +fi + +if [[ "${test_machine}" == "qemu" ]]; then + echo "${script_name}: ERROR: '--test-machine=qemu' not yet supported." >&2 + exit 1 +fi + +set +e +checkout_token=$(${SCRIPTS_TOP}/checkout.sh -v ${test_machine} 1200) # 20 min. +result=${?} +set -e + +if [[ ${result} -ne 0 ]]; then + unset checkout_token + echo "${script_name}: ERROR: checkout '${test_machine}' failed (${result})." >&2 + exit 1 +fi + +if [[ ${no_known_hosts} ]]; then + tftp_upload_extra="--no-known-hosts" +fi + +${SCRIPTS_TOP}/tftp-upload.sh --kernel=${test_kernel} --initrd=${initrd} \ + --ssh-login-key=${ssh_login_key} --tftp-triple=${tftp_triple} \ + --tftp-dest="${test_machine}" ${tftp_upload_extra} --verbose + +# ===== secrets section ======================================================== +old_xtrace="$(shopt -po xtrace || :)" +set +o xtrace +if [[ ! ${TDD_BMC_CREDS_USR} || ! ${TDD_BMC_CREDS_PSW} ]]; then + echo "${script_name}: Using creds file ${test_machine}-bmc-creds" >&2 + check_file "${test_machine}-bmc-creds" ': Need environment variables or credentials file [user:passwd]' + TDD_BMC_CREDS_USR="$(cat ${test_machine}-bmc-creds | cut -d ':' -f 1)" + TDD_BMC_CREDS_PSW="$(cat ${test_machine}-bmc-creds | cut -d ':' -f 2)" +fi +if [[ ! ${TDD_BMC_CREDS_USR} ]]; then + echo "${script_name}: ERROR: No TDD_BMC_CREDS_USR defined." >&2 + exit 1 +fi +if [[ ! ${TDD_BMC_CREDS_PSW} ]]; then + echo "${script_name}: ERROR: No TDD_BMC_CREDS_PSW defined." >&2 + exit 1 +fi +export IPMITOOL_PASSWORD="${TDD_BMC_CREDS_PSW}" +eval "${old_xtrace}" +# ============================================================================== + +ping -c 1 -n ${bmc_host} +ipmi_args="-H ${bmc_host} -U ${TDD_BMC_CREDS_USR} -E" + +mkdir -p ${out_file%/*} +ipmitool ${ipmi_args} chassis status > ${out_file} +echo '-----' >> ${out_file} + +ipmitool ${ipmi_args} -I lanplus sol deactivate && result=1 + +if [[ ${result} ]]; then + # wait for ipmitool to disconnect. + sleep 5s +fi + +sol_pid_file="$(mktemp --tmpdir tdd-sol-pid.XXXX)" + +(echo "${BASHPID}" > ${sol_pid_file}; exec sleep 24h) | ipmitool ${ipmi_args} -I lanplus sol activate &>>"${out_file}" & + +sol_pid=$(cat ${sol_pid_file}) +echo "sol_pid=${sol_pid}" >&2 + +sleep 5s +if ! kill -0 ${sol_pid} &> /dev/null; then + echo "${script_name}: ERROR: ipmitool sol seems to have quit early." >&2 + exit 1 +fi + +ipmi_power_off "${ipmi_args}" +sleep 5s +ipmi_power_on "${ipmi_args}" + +relay_get "420" "${relay_triple}" remote_addr + +echo "${script_name}: remote_addr = '${remote_addr}'" >&2 + +remote_host="root@${remote_addr}" +remote_ssh_opts='' + +# The remote host address could come from DHCP, so don't use known_hosts. +ssh_no_check="-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null" + +if [[ -f ${SCRIPTS_TOP}/test-plugin/${test_name}/${test_name}.sh ]]; then + source ${SCRIPTS_TOP}/test-plugin/${test_name}/${test_name}.sh +else + echo "${script_name}: ERROR: Test plugin '${test_name}.sh' not found." >&2 + exit 1 +fi + +run_ssh_opts="${ssh_no_check} -i ${ssh_login_key} ${remote_ssh_opts}" +test_run_${test_name/-/_} ${tests_dir} ${test_machine} ${remote_host} run_ssh_opts + +ssh ${ssh_no_check} -i ${ssh_login_key} ${remote_ssh_opts} ${remote_host} \ + '/sbin/poweroff &' + +echo "${script_name}: Waiting for shutdown at ${remote_addr}..." >&2 + +ipmi_wait_power_state "${ipmi_args}" 'off' 120 + +trap - EXIT + +on_exit 'Done, success.' ${sol_pid_file} "${ipmi_args}" diff --git a/scripts/run-shellcheck.sh b/scripts/run-shellcheck.sh new file mode 100755 index 00000000..54078f9d --- /dev/null +++ b/scripts/run-shellcheck.sh @@ -0,0 +1,11 @@ +#!/usr/bin/env bash + +SCRIPTS_TOP=${SCRIPTS_TOP:-"$( cd "${BASH_SOURCE%/*}" && pwd )"} + +files=$(find ${SCRIPTS_TOP} -name '*.sh' -type f) + +set +e +for f in ${files}; do + echo "=== ${f} ====================================" >&2 + shellcheck ${f} +done diff --git a/scripts/set-config-opts.sh b/scripts/set-config-opts.sh new file mode 100755 index 00000000..f42f46a1 --- /dev/null +++ b/scripts/set-config-opts.sh @@ -0,0 +1,137 @@ +#!/usr/bin/env bash + +usage() { + local old_xtrace + old_xtrace="$(shopt -po xtrace || :)" + set +o xtrace + echo "${script_name} - Sets kernel config options from ." >&2 + echo "Usage: ${script_name} [flags] " >&2 + echo "Option flags:" >&2 + echo " -h --help - Show this help and exit." >&2 + echo " -v --verbose - Verbose execution." >&2 + echo " --platform-args - Platform args. Default: '${platform_args}'." >&2 + echo "Args:" >&2 + echo " - Build target {${target_list}}." >&2 + echo " Default: '${target}'." >&2 + echo " - Kernel source directory." >&2 + echo " Default: '${kernel_src}'." >&2 + echo "Spec File Info:" >&2 + echo " The spec file contains one kernel option per line. Lines beginning with '#' (regex '^#') are comments." >&2 + eval "${old_xtrace}" +} + +process_opts() { + local short_opts="hv" + local long_opts="help,verbose,platform-args:" + + local opts + opts=$(getopt --options ${short_opts} --long ${long_opts} -n "${script_name}" -- "$@") + + if [ $? != 0 ]; then + echo "${script_name}: ERROR: Internal getopt" >&2 + exit 1 + fi + + eval set -- "${opts}" + + while true ; do + case "${1}" in + -h | --help) + usage=1 + shift + ;; + -v | --verbose) + set -x + verbose=1 + shift + ;; + --platform-args) + platform_args="${2}" + shift 2 + ;; + --) + spec_file="${2}" + kernel_config="${3}" + if ! shift 3; then + echo "${script_name}: ERROR: Missing args:" >&2 + echo "${script_name}: ='${spec_file}'" >&2 + echo "${script_name}: ='${kernel_config}'" >&2 + usage + exit 1 + fi + if [[ -n "${1}" ]]; then + echo "${script_name}: ERROR: Got extra args: '${@}'" >&2 + usage + exit 1 + fi + break + ;; + *) + echo "${script_name}: ERROR: Internal opts: '${@}'" >&2 + exit 1 + ;; + esac + done +} + +on_exit() { + local result=${1} + + echo "${script_name}: ${result}" >&2 +} + +#=============================================================================== +# program start +#=============================================================================== +script_name="${0##*/}" + +SCRIPTS_TOP=${SCRIPTS_TOP:-"$( cd "${BASH_SOURCE%/*}" && pwd )"} + +source ${SCRIPTS_TOP}/lib/util.sh + +trap "on_exit 'Done, failed.'" EXIT +set -e + +process_opts "${@}" + +check_file "${spec_file}" "" "usage" +check_file "${kernel_config}" "" "usage" + +if [[ ${usage} ]]; then + usage + exit 0 +fi + +cp -f "${kernel_config}" "${kernel_config}".orig + +while read -r update; do + if [[ -z "${update}" || "${update:0:1}" == '#' ]]; then + #echo "skip @${update}@" + continue + fi + + tok="${update%%=*}" + + if old=$(egrep ".*${tok}[^_].*" ${kernel_config}); then + sed --in-place "{s@.*${tok}[^_].*@${update}@g}" ${kernel_config} + new=$(egrep ".*${tok}[^_].*" ${kernel_config}) + echo "${script_name}: Update: '${old}' -> '${new}'" + else + echo "${update}" >> "${kernel_config}" + echo "${script_name}: Append: '${update}'" + fi + +done < "${spec_file}" + +if [[ ${platform_args} ]]; then + sed --in-place "{s@platform_args@${platform_args}@g}" ${kernel_config} +fi + +diff -u "${kernel_config}".orig "${kernel_config}" || : >&2 + +echo "" >&2 + +trap - EXIT + +on_exit 'Done, success.' + diff --git a/scripts/set-known-good-cache.sh b/scripts/set-known-good-cache.sh new file mode 100755 index 00000000..e7aa63b7 --- /dev/null +++ b/scripts/set-known-good-cache.sh @@ -0,0 +1,33 @@ +#!/bin/bash + +if [[ ! -f /.dockerenv || ! ${TDD_JENKINS} ]]; then + echo "ERROR: Must be run from inside tdd-jenkins container." + exit -1 +fi + +set -ex + +SRC=${SRC:-'/var/tdd-store'} +DEST=${DEST:-'/var/jenkins_home/workspace/tdd/kernel/kernel-test-cache/bootstrap'} + +mkdir -p ${DEST} + +if [[ -d ${SRC}/bootstrap.known-good ]]; then + cp -avf ${SRC}/bootstrap.known-good/* ${DEST}/ + exit 0 +fi + +if [[ -f ${SRC}/bootstrap.known-good.tar ]]; then + tar -C ${DEST}/ --strip-components=1 -xvf ${SRC}/bootstrap.known-good.tar + exit 0 +fi + +if [[ -f ${SRC}/bootstrap.known-good.tar.gz ]]; then + tar -C ${DEST}/ --strip-components=1 -xvf ${SRC}/bootstrap.known-good.tar.gz + exit 0 +fi + +ls -lh ${SRC} + +echo "WARNING: No bootstrap.known-good sources found." +exit 1 diff --git a/scripts/set-relay-triple.sh b/scripts/set-relay-triple.sh new file mode 100755 index 00000000..1090ad8f --- /dev/null +++ b/scripts/set-relay-triple.sh @@ -0,0 +1,148 @@ +#!/usr/bin/env bash + +set -e + +script_name="${0##*/}" + +SCRIPTS_TOP=${SCRIPTS_TOP:-"$( cd "${BASH_SOURCE%/*}" && pwd )"} + +source ${SCRIPTS_TOP}/lib/util.sh +source ${SCRIPTS_TOP}/lib/relay.sh + +usage () { + local old_xtrace + old_xtrace="$(shopt -po xtrace || :)" + set +o xtrace + echo "${script_name} - Write a tdd-relay triple to a kernel image." >&2 + echo "Usage: ${script_name} [flags]" >&2 + echo "Option flags:" >&2 + echo " -h --help - Show this help and exit." >&2 + echo " -k --kernel - Kernel image. Default: '${kernel}'." >&2 + echo " -o --out-file - Output file. Default: '${out_file}'." >&2 + echo " -t --relay-triple - tdd-relay triple. File name or 'server:port:token'. Default: '${relay_triple}'." >&2 + echo " -v --verbose - Verbose execution." >&2 + eval "${old_xtrace}" +} + +short_opts="hk:o:t:v" +long_opts="help,kernel:,out-file:,relay-triple:,verbose" + +opts=$(getopt --options ${short_opts} --long ${long_opts} -n "${script_name}" -- "$@") + +if [ $? != 0 ]; then + echo "${script_name}: ERROR: Internal getopt" >&2 + exit 1 +fi + +eval set -- "${opts}" + +while true ; do + case "${1}" in + -h | --help) + usage=1 + shift + ;; + -k | --kernel) + kernel="${2}" + shift 2 + ;; + -o | --out-file) + out_file="${2}" + shift 2 + ;; + -t | --relay-triple) + relay_triple="${2}" + shift 2 + ;; + -v | --verbose) + set -x + verbose=1 + shift + ;; + --) + shift + break + ;; + *) + echo "${script_name}: ERROR: Internal opts: '${@}'" >&2 + exit 1 + ;; + esac +done + +if [[ -f "${relay_triple}" ]]; then + relay_triple=$(cat ${relay_triple}) + echo "${script_name}: INFO: Relay triple: '${relay_triple}'" >&2 +fi + +if [[ ! ${relay_triple} ]]; then + echo "${script_name}: ERROR: Must provide --relay_triple option." >&2 + usage + exit 1 +fi + +relay_triple=$(relay_resolve_triple ${relay_triple}) + +token=$(relay_triple_to_token ${relay_triple}) +out_file="${out_file:-${kernel}.${token}}" + +if [[ ${usage} ]]; then + usage + exit 0 +fi + +if [[ ! ${kernel} ]]; then + echo "${script_name}: ERROR: Must provide --kernel option." >&2 + usage + exit 1 +fi + +check_file "${kernel}" + +on_exit() { + local result=${1} + + echo "${script_name}: ${result}" >&2 +} + +trap "on_exit 'Done, failed.'" EXIT + +LANG=C +LC_ALL=C + +# kernel_param must match the CONFIG_CMDLINE entry in the kernel config fixup.spec file. +kernel_param='tdd_relay_triple=x*z' +set +e +old=$(eval "egrep --text --only-matching --max-count=1 '${kernel_param}' ${kernel}") +result=${?} + +if [[ ${result} -ne 0 ]]; then + echo "${script_name}: ERROR: Kernel tdd_relay_triple command line param '${kernel_param}' not found." >&2 + echo "Kernel strings:" >&2 + egrep --text 'tdd_relay_triple' ${kernel} >&2 + egrep --text --max-count=1 'chosen.*bootargs' ${kernel} >&2 + exit 1 +fi +set -e + +old_len=${#old} + +new="tdd_relay_triple=${relay_triple}" +new_len=${#new} + +empty=" " + +pad=$(printf '%0.*s' $(( ${old_len} - ${new_len} )) "${empty}") +pad_len=${#pad} + +cat ${kernel} | sed "{s/${old}/${new}${pad}/g}" > ${out_file} + +if [[ ${verbose} ]]; then + egrep --text 'tdd_relay_triple' ${out_file} >&2 +fi + +trap - EXIT + +echo "${script_name}: INFO: Test kernel: '${out_file}'" >&2 + +on_exit 'Done, success.' diff --git a/scripts/set-systemd-debug.sh b/scripts/set-systemd-debug.sh new file mode 100755 index 00000000..6033670b --- /dev/null +++ b/scripts/set-systemd-debug.sh @@ -0,0 +1,153 @@ +#!/usr/bin/env bash + +set -e + +script_name="${0##*/}" + +SCRIPTS_TOP=${SCRIPTS_TOP:-"$( cd "${BASH_SOURCE%/*}" && pwd )"} + +source ${SCRIPTS_TOP}/lib/util.sh +source ${SCRIPTS_TOP}/lib/relay.sh + +usage () { + local old_xtrace + old_xtrace="$(shopt -po xtrace || :)" + set +o xtrace + echo "${script_name} - Write systemd debug args to a kernel image." >&2 + echo "Usage: ${script_name} [flags]" >&2 + echo "Option flags:" >&2 + echo " -h --help - Show this help and exit." >&2 + echo " -i --in-file - Kernel image. Default: '${in_file}'." >&2 + echo " -o --out-file - Output file. Default: '${out_file}'." >&2 + echo " -v --verbose - Verbose execution." >&2 + eval "${old_xtrace}" +} + +short_opts="hi:o:v" +long_opts="help,in-file:,out-file:,verbose" + +opts=$(getopt --options ${short_opts} --long ${long_opts} -n "${script_name}" -- "$@") + +if [ $? != 0 ]; then + echo "${script_name}: ERROR: Internal getopt" >&2 + exit 1 +fi + +eval set -- "${opts}" + +while true ; do + case "${1}" in + -h | --help) + usage=1 + shift + ;; + -i | --in-file) + in_file="${2}" + shift 2 + ;; + -o | --out-file) + out_file="${2}" + shift 2 + ;; + -v | --verbose) + set -x + verbose=1 + shift + ;; + --) + shift + break + ;; + *) + echo "${script_name}: ERROR: Internal opts: '${@}'" >&2 + exit 1 + ;; + esac +done + +out_file=${out_file:-"${in_file}.out"} + +if [[ ${usage} ]]; then + usage + exit 0 +fi + +if [[ ! ${in_file} ]]; then + echo "${script_name}: ERROR: Must provide --in-file option." >&2 + usage + exit 1 +fi + +check_file "${in_file}" + +on_exit() { + local result=${1} + + echo "${script_name}: ${result}" >&2 +} + +trap "on_exit 'Done, failed.'" EXIT + +LANG=C +LC_ALL=C + + +# systemctl show --no-pager +# log_level: debug info notice warning err crit alert emerg +# log_target: console journal kmsg journal-or-kmsg +# +# 'systemd.log_level=info ' => 'systemd.log_level=debug' +# 'systemd.log_target=journal-or-kmsg' => 'systemd.log_target=console' +# systemd.journald.forward_to_console=1 + + +# systemd args must match the CONFIG_CMDLINE entry in the kernel config fixup.spec file. + +args=( + 'systemd.log_level=info :systemd.log_level=debug' +# 'systemd.log_target=journal-or-kmsg:systemd.log_target=console ' +) + +tmp_file=${out_file}.tmp +rm -f ${out_file} ${tmp_file} +cp -vf ${in_file} ${tmp_file} + +for pair in "${args[@]}"; do + echo "pair: @${pair}@" >&2 + + unset old_txt new_txt + + old_txt=${pair%:*} + new_txt=${pair#*:} + + echo " old_txt: @${old_txt}@" >&2 + echo " new_txt: @${new_txt}@" >&2 + + set +e + old=$(eval "egrep --text --only-matching --max-count=1 '${old_txt}' ${tmp_file}") + result=${?} + + if [[ ${result} -ne 0 ]]; then + echo "${script_name}: ERROR: Kernel command line arg not found: '${old_txt}'." >&2 + echo "Kernel strings:" >&2 + egrep --text 'systemd.' ${in_file} >&2 + egrep --text --max-count=1 'chosen.*bootargs' ${in_file} >&2 + exit 1 + fi + set -e + + sed --in-place "{s/${old_txt}/${new_txt}/g}" ${tmp_file} + + if [[ ${verbose} ]]; then + eval "egrep --text '${new_txt}' ${tmp_file}" >&2 + fi +done + +cp -vf ${tmp_file} ${out_file} +rm -f ${tmp_file} + +trap - EXIT + +echo "${script_name}: INFO: Output kernel: '${out_file}'" >&2 + +on_exit 'Done, success.' diff --git a/scripts/start-qemu.sh b/scripts/start-qemu.sh new file mode 100755 index 00000000..8cce2c62 --- /dev/null +++ b/scripts/start-qemu.sh @@ -0,0 +1,368 @@ +#!/usr/bin/env bash + +usage() { + local old_xtrace + old_xtrace="$(shopt -po xtrace || :)" + set +o xtrace + echo "${script_name} - Run Linux kernel in QEMU." >&2 + echo "Usage: ${script_name} [flags]" >&2 + echo "Option flags:" >&2 + echo " -a --arch - Target architecture. Default: '${target_arch}'." >&2 + echo " -c --kernel-cmd - Kernel command line options. Default: '${kernel_cmd}'." >&2 + echo " -e --ether-mac - QEMU Ethernet MAC. Default: '${ether_mac}'." >&2 + echo " -f --hostfwd-offset - QEMU hostfwd port offset. Default: '${hostfwd_offset}'." >&2 + echo " -h --help - Show this help and exit." >&2 + echo " -i --initrd - Initrd image. Default: '${initrd}'." >&2 + echo " -k --kernel - Kernel image. Default: '${kernel}'." >&2 +# TODO echo " -m --modules - Kernel modules directory. To mount over existing modules directory. Default: '${modules}'." >&2 + echo " -o --out-file - stdout, stderr redirection file. Default: '${out_file}'." >&2 +# TODO echo " -r --disk-image - Raw disk image. Alternative to --initrd. Default: '${disk_image}'." >&2 + echo " -s --systemd-debug - Run systemd with debug options. Default: '${systemd_debug}'." >&2 + echo " -t --qemu-tap - Use QEMU tap networking. Default: '${qemu_tap}'." >&2 + echo " -v --verbose - Verbose execution." >&2 + echo " --hda - QEMU IDE hard disk image hda. Default: '${hda}'." >&2 + echo " --hdb - QEMU IDE hard disk image hdb. Default: '${hdb}'." >&2 + echo " --hdc - QEMU IDE hard disk image hdc. Default: '${hdc}'." >&2 + echo " --pid-file - QEMU IDE hard disk image hdb. Default: '${pid_file}'." >&2 + echo " --p9-share - Plan9 share directory. Default: '${p9_share}'." >&2 + eval "${old_xtrace}" +} + +process_opts() { + local short_opts="a:c:e:f:hi:k:m:o:r:stv" + local long_opts="arch:,kernel-cmd:,ether-mac:,hostfwd-offset:,help,initrd:,\ +kernel:,modules:,out-file:,disk-image:,systemd-debug,qemu-tap,verbose,\ +hda:,hdb:,hdc:,pid-file:,p9-share:" + + local opts + opts=$(getopt --options ${short_opts} --long ${long_opts} -n "${script_name}" -- "$@") + + if [ $? != 0 ]; then + echo "${script_name}: ERROR: Internal getopt" >&2 + exit 1 + fi + + eval set -- "${opts}" + + while true ; do + case "${1}" in + -a | --arch) + target_arch=$(get_arch "${2}") + shift 2 + ;; + -c | --kernel-cmd) + kernel_cmd="${2}" + shift 2 + ;; + -e | --ether-mac) + ether_mac="${2}" + shift 2 + ;; + -f | --hostfwd-offset) + hostfwd_offset="${2}" + shift 2 + ;; + -h | --help) + usage=1 + shift + ;; + -i | --initrd) + initrd="${2}" + shift 2 + ;; + -k | --kernel) + kernel="${2}" + shift 2 + ;; + -m | --modules) + modules="${2}" + shift 2 + ;; + -o | --out-file) + out_file="${2}" + shift 2 + ;; + -r | --disk-image) + disk_image="${2}" + shift 2 + ;; + -s | --systemd-debug) + systemd_debug=1 + shift + ;; + -t | --qemu-tap) + qemu_tap=1 + shift + ;; + -v | --verbose) + set -x + verbose=1 + shift + ;; + --hda) + hda="${2}" + shift 2 + ;; + --hdb) + hdb="${2}" + shift 2 + ;; + --hdc) + hdc="${2}" + shift 2 + ;; + --pid-file) + pid_file="${2}" + shift 2 + ;; + --p9-share) + p9_share="${2}" + shift 2 + ;; + --) + shift + break + ;; + *) + echo "${script_name}: ERROR: Internal opts: '${@}'" >&2 + exit 1 + ;; + esac + done +} + +setup_efi() { + local efi_code_src + local efi_vars_src + local efi_full_src + + case "${target_arch}" in + amd64) + efi_code_src="/usr/share/OVMF/OVMF_CODE.fd" + efi_vars_src="/usr/share/OVMF/OVMF_VARS.fd" + efi_full_src="/usr/share/ovmf/OVMF.fd" + ;; + arm64) + efi_code_src="/usr/share/AAVMF/AAVMF_CODE.fd" + efi_vars_src="/usr/share/AAVMF/AAVMF_VARS.fd" + efi_full_src="/usr/share/qemu-efi-aarch64/QEMU_EFI.fd" + ;; + esac + + efi_code="${efi_code_src}" + efi_vars="${target_arch}-EFI_VARS.fd" + + check_file ${efi_code_src} + check_file ${efi_vars_src} + + copy_file ${efi_vars_src} ${efi_vars} +} + +on_exit() { + local result=${1} + + echo "${script_name}: Done: ${result}" >&2 + exit ${result} +} + +#=============================================================================== +# program start +#=============================================================================== +script_name="${0##*/}" + +trap "on_exit 'failed.'" EXIT +set -e + +SCRIPTS_TOP=${SCRIPTS_TOP:-"$( cd "${BASH_SOURCE%/*}" && pwd )"} +source ${SCRIPTS_TOP}/lib/util.sh + +process_opts "${@}" + +TARGET_HOSTNAME=${TARGET_HOSTNAME:-"tdd-tester"} + +MODULES_ID=${MODULES_ID:-"kernel_modules"} +P9_SHARE_ID=${SHARE_ID:-"p9_share"} + +host_arch=$(get_arch "$(uname -m)") +target_arch=${target_arch:-"${host_arch}"} +hostfwd_offset=${hostfwd_offset:-"20000"} +ether_mac=${ether_mac:-"01:02:03:00:00:01"} + +if [[ ${systemd_debug} ]]; then + # FIXME: need to run set-systemd-debug.sh??? + kernel_cmd+=" systemd.log_level=debug systemd.log_target=console systemd.journald.forward_to_console=1" +fi + +if [[ -n "${usage}" ]]; then + usage + trap - EXIT + exit 0 +fi + +case ${target_arch} in +arm64|ppc*) + ;; +*) + echo "${script_name}: ERROR: Unsupported target arch '${target_arch}'." >&2 + exit 1 + ;; +esac + +if [[ ! ${kernel} ]]; then + echo "${script_name}: ERROR: Must provide --kernel option." >&2 + usage + exit 1 +fi + +check_file "${kernel}" + +if [[ ! ${initrd} && ! ${disk_image} ]]; then + echo "${script_name}: ERROR: Must provide --initrd or --disk-image option." >&2 + usage + exit 1 +fi + +if [[ ${initrd} ]]; then + check_file "${initrd}" +fi + +if [[ ${disk_image} ]]; then + check_file "${disk_image}" + disk_image_root="/dev/vda" +fi + +if [[ ${modules} ]]; then + check_directory "${modules}" +fi + + +qemu_args="-kernel ${kernel}" +qemu_append_args="${kernel_cmd}" + +case "${host_arch}--${target_arch}" in +amd64--amd64) + have_efi=1 + qemu_exe="qemu-system-x86_64" + qemu_args+=" -machine accel=kvm -cpu host -m 2048 -smp 2" + ;; +arm64--amd64) + have_efi=1 + qemu_exe="qemu-system-x86_64" + qemu_args+=" -machine pc-q35-2.8 -cpu kvm64 -m 2048 -smp 2" + ;; +amd64--arm64) + have_efi=1 + qemu_exe="qemu-system-aarch64" + #qemu_mem=${qemu_mem:-5120} # 5G + qemu_mem=${qemu_mem:-6144} # 6G + #qemu_mem=${qemu_mem:-16384} # 16G + qemu_args+=" -machine virt,gic-version=3 -cpu cortex-a57 -m ${qemu_mem} -smp 2" + ;; +arm64--arm64) + have_efi=1 + qemu_exe="qemu-system-aarch64" + qemu_args+=" -machine virt,gic-version=3,accel=kvm -cpu host -m 4096 -smp 2" + ;; +amd64--ppc*) + unset have_efi + qemu_exe="qemu-system-ppc64" + #qemu_args+=" -machine cap-htm=off -m 2048" + qemu_args+=" -machine pseries,cap-htm=off -m 2048" + ;; +*) + echo "${script_name}: ERROR: Unsupported host--target combo: '${host_arch}--${target_arch}'." >&2 + exit 1 + ;; +esac + +nic_model=${nic_model:-"virtio-net-pci"} + +if [[ ${qemu_tap} ]]; then + qemu_args+=" \ + -netdev tap,id=tap0,ifname=qemu0,br=br0 \ + -device ${nic_model},netdev=tap0,mac=${ether_mac} \ + " +else + ssh_fwd=$(( ${hostfwd_offset} + 22 )) + echo "${script_name}: SSH fwd = ${ssh_fwd}" >&2 + + # FIXME: When is -nic unsupported? + if [[ ${use_virtio_net} ]]; then + #virtio_net_type="virtio-net-device" + virtio_net_type="virtio-net-pci" + qemu_args+=" -netdev user,id=eth0,hostfwd=tcp::${ssh_fwd}-:22,hostname=${TARGET_HOSTNAME}" + qemu_args+=" -device ${virtio_net_type},netdev=eth0" + else + qemu_args+=" -nic user,model=${nic_model},hostfwd=tcp::${ssh_fwd}-:22,hostname=${TARGET_HOSTNAME}" + fi +fi + +if [[ ${initrd} ]]; then + qemu_args+=" -initrd ${initrd}" +fi + +if [[ ${hda} ]]; then + qemu_args+=" -hda ${hda}" +fi + +if [[ ${hdb} ]]; then + qemu_args+=" -hdb ${hdb}" +fi + +if [[ ${hdc} ]]; then + qemu_args+=" -hdc ${hdc}" +fi + +if [[ ${p9_share} ]]; then + check_directory "${p9_share}" + qemu_args+=" \ + -virtfs local,id=${P9_SHARE_ID},path=${p9_share},security_model=none,mount_tag=${P9_SHARE_ID}" + echo "${script_name}: INFO: 'mount -t 9p -o trans=virtio ${P9_SHARE_ID} -oversion=9p2000.L'" >&2 +fi + +if [[ ${modules} ]]; then + qemu_args+=" \ + -virtfs local,id=${MODULES_ID},path=${modules},security_model=none,mount_tag=${MODULES_ID}" +fi + +if [[ ${disk_image} ]]; then # TODO + qemu_args+=" \ + -drive if=none,id=blk,file=${disk_image} \ + -device virtio-blk-device,drive=blk \ + " + qemu_append_args+=" root=${disk_image_root} rw" +fi + +if [[ ${out_file} ]]; then + qemu_args+=" \ + -display none \ + -chardev file,id=char0,path=${out_file} \ + -serial chardev:char0 \ + " +else + qemu_args+=" -nographic" +fi + +if [[ ${pid_file} ]]; then + qemu_args+=" -pidfile ${pid_file}" +fi + +if [[ ${have_efi} ]]; then + setup_efi + qemu_args+=" -drive if=pflash,file=${efi_code},format=raw,readonly" + qemu_args+=" -drive if=pflash,file=${efi_vars},format=raw" +fi + +ls -l /dev/kvm || : +cat /etc/group || : +id + +cmd="${qemu_exe} \ + -name tdd-vm \ + -object rng-random,filename=/dev/urandom,id=rng0 \ + -device virtio-rng-pci,rng=rng0 \ + ${qemu_args} \ + -append '${qemu_append_args}' \ +" + +eval exec "${cmd}" diff --git a/scripts/targets/arm64/tx2/set-numcores.sh b/scripts/targets/arm64/tx2/set-numcores.sh new file mode 120000 index 00000000..2440cadd --- /dev/null +++ b/scripts/targets/arm64/tx2/set-numcores.sh @@ -0,0 +1 @@ +set-smt.sh \ No newline at end of file diff --git a/scripts/targets/arm64/tx2/set-smt.sh b/scripts/targets/arm64/tx2/set-smt.sh new file mode 100755 index 00000000..15529105 --- /dev/null +++ b/scripts/targets/arm64/tx2/set-smt.sh @@ -0,0 +1,280 @@ +#!/usr/bin/env bash + +usage() { + local op_name + local op_values + local old_xtrace + + op_name=$(op_get_name) + op_values=$(op_get_values) + + old_xtrace="$(shopt -po xtrace || :)" + set +o xtrace + echo "${script_name} - Set or display ThunderX2 ${op_name} value." >&2 + echo "Usage: ${script_name} [flags]" >&2 + echo "Option flags:" >&2 + echo " -h --help - Show this help and exit." >&2 + echo " -s --set - Set value {${op_values}}. Default: '${set_value}'." >&2 + echo " -v --verbose - Verbose execution." >&2 + eval "${old_xtrace}" +} + +process_opts() { + local short_opts="hs:v" + local long_opts="help,set:,verbose" + + local opts + opts=$(getopt --options ${short_opts} --long ${long_opts} -n "${script_name}" -- "$@") + + eval set -- "${opts}" + + while true ; do + #echo "${FUNCNAME[0]}: @${1}@ @${2}@" + case "${1}" in + -h | --help) + usage=1 + shift + ;; + -s | --set) + set_value="${2}" + shift 2 + ;; + -v | --verbose) + set -x + verbose=1 + shift + ;; + --) + shift + if [[ ${1} ]]; then + echo "${script_name}: ERROR: Extra args found: '${@}'" >&2 + usage=1 + fi + break + ;; + *) + echo "${script_name}: ERROR: Internal opts: '${@}'" >&2 + exit 1 + ;; + esac + done +} + +on_exit() { + local result=${1} + + echo "${script_name}: Done: ${result}" >&2 + exit 0 +} + +print_efi_var() { + local efi_file=${1} + + echo -n '0x' + cat "${efi_file}" | hexdump -s 4 -v -e '/4 "%02X\n"' +} + +set_efi_var() { + local efi_file=${1} + local value_hex=${2} + local immutable + + if lsattr -l "${efi_file}" | egrep 'Immutable' >/dev/null; then + immutable=1 + chattr -i "${efi_file}" + fi + + echo -n -e "\x00\x00\x00\x00\x${value_hex#0x}\x00\x00\x00" > "${efi_file}" + + if [[ ${immutable} ]]; then + chattr +i "${efi_file}" + fi +} + +print_value() { + local msg=${1} + local value_hex=${2} + local value_dec + + value_dec=$(( 16#${value_hex#0x} )) + echo "${msg}: ${value_hex} (${value_dec})" >&2 +} + +op_print_all() { + local op + local array + + for op in "smt turbo numcores"; do + #echo "${LINENO}: op = '${op}'" >&2 + array="${op}_ops"[@] + array=( "${!array}" ) + echo "${LINENO}: op = '${array[name_index]}', values = '${array[values_index]}', file = '${array[file_index]}'" >&2 + done +} + +op_get_name() { + echo "${op_array[name_index]}" +} + +op_get_type() { + echo "${op_array[type_index]}" +} + +op_get_values() { + echo "${op_array[values_index]}" +} + +op_get_file() { + echo "${op_array[file_index]}" +} + +op_check_value() { + local value_hex=${1} + local type=$(op_get_type) + + case ${type} in + range) + local range=$(op_get_values) + local min=${range%-*} + local max=${range#*-} + + if [[ ${value_hex} -ge ${min} && ${value_hex} -le ${max} ]]; then + return + fi + ;; + set) + local v + for v in $(op_get_values); do + #echo "v = '${v}'" >&2 + if [[ ${v} -eq ${value_hex} ]]; then + return + fi + done + ;; + *) + echo "${script_name}: ERROR: Internal type: ${type}" >&2 + exit 1 + ;; + esac + + echo "${script_name}: ERROR: Bad set value: '${value_hex}'" >&2 + usage + exit 1 +} + +dmidecode_cpu_count() { + if [[ ${dmidecode} ]]; then + echo -n "DMI: " + ${dmidecode} -t processor | egrep --ignore-case --max-count=1 \ + --only-matching 'Core Count: [[:digit:]]{1,3}' + echo -n "DMI: " + ${dmidecode} -t processor | egrep --ignore-case --max-count=1 \ + --only-matching 'Core Enabled: [[:digit:]]{1,3}' + fi +} + +#=============================================================================== +# program start +#=============================================================================== +PS4='\[\033[0;33m\]+ ${BASH_SOURCE##*/}:${LINENO}:(${FUNCNAME[0]:-"?"}): \[\033[0;37m\]' + +script_name="${0##*/}" + +trap "on_exit 'failed.'" EXIT +set -e + +process_opts "${@}" + +host_arch="$(uname -m)" + +efi_guid='a9f76944-9749-11e7-96d5-736f2e5d4e7e' + +# name type values file +smt_ops=( SMT set '1 2 4' CvmHyperThread-${efi_guid}) +turbo_ops=( TURBO set '0 1 2' CvmTurbo-${efi_guid}) +numcores_ops=( NUMCORES range '1-0x1E' CvmNumCores-${efi_guid}) + +name_index=0 +type_index=1 +values_index=2 +file_index=3 + +#op_print_all + +op=${script_name%%.*} +op=${op##*-} + +op_array="${op}_ops"[@] +op_array=( "${!op_array}" ) + +#echo "${LINENO}: op = '${op_array[name_index]}', values = '${op_array[values_index]}', file = '${op_array[file_index]}'" >&2 + +if [[ -x "$(command -v dmidecode)" ]]; then + dmidecode="dmidecode" +fi + +if [[ ${set_value} ]]; then + set_value=$(printf "0x%X" ${set_value}) +fi + +efi_dir="/sys/firmware/efi/efivars" +efi_file="${efi_dir}/$(op_get_file)" + +if [[ ${usage} ]]; then + usage + trap - EXIT + exit 0 +fi + +if [[ "${host_arch}" != "aarch64" ]]; then + echo "${script_name}: ERROR: For ARM64 machines only." >&2 + exit 1 +fi + +if [[ ${dmidecode} ]]; then + if ! ${dmidecode} -s processor-version | egrep -i 'ThunderX' > /dev/null; then + echo "${script_name}: ERROR: For ThunderX machines only." >&2 + exit 1 + fi +fi + +if [[ ! -d ${efi_dir} ]]; then + mount | egrep efivars || : + if [[ ${?} ]]; then + echo "${script_name}: ERROR: efivars file system not mounted: '${efi_dir}'" >&2 + else + echo "${script_name}: ERROR: Directory not found: '${efi_dir}'" >&2 + fi + exit 1 +fi + +if [[ ! -f ${efi_file} ]]; then + echo "${script_name}: ERROR: File not found: '${efi_file}'" >&2 + echo "${script_name}: Check firmware version" >&2 + if [[ ${dmidecode} ]]; then + ${dmidecode} -s bios-version + fi + exit 1 +fi + +if [[ ${op} == "numcores" ]]; then + dmidecode_cpu_count +fi + +cur_value=$(print_efi_var ${efi_file}) +print_value "current" ${cur_value} + +if [[ ${set_value} ]]; then + print_value "set" ${set_value} + op_check_value ${set_value} + + if [[ ${cur_value} -eq ${set_value} ]]; then + echo "${script_name}: INFO: Set value same as current value: '${cur_value}'" >&2 + else + set_efi_var ${efi_file} ${set_value} + new_value=$(print_efi_var ${efi_file}) + print_value "new" ${new_value} + fi +fi + +trap "on_exit 'Success.'" EXIT diff --git a/scripts/targets/arm64/tx2/set-turbo.sh b/scripts/targets/arm64/tx2/set-turbo.sh new file mode 120000 index 00000000..2440cadd --- /dev/null +++ b/scripts/targets/arm64/tx2/set-turbo.sh @@ -0,0 +1 @@ +set-smt.sh \ No newline at end of file diff --git a/scripts/targets/arm64/tx2/tx2-fixup.spec b/scripts/targets/arm64/tx2/tx2-fixup.spec new file mode 100644 index 00000000..a797dc26 --- /dev/null +++ b/scripts/targets/arm64/tx2/tx2-fixup.spec @@ -0,0 +1,34 @@ +# Kernel config fixups for Cavium ThunderX2 (TX2) systems. + +CONFIG_I2C_THUNDERX=m +CONFIG_MDIO_THUNDER=m +CONFIG_SPI_THUNDERX=m +CONFIG_THUNDER_NIC_VF=m +CONFIG_THUNDER_NIC_BGX=m +CONFIG_THUNDER_NIC_RGX=m + +CONFIG_MODULE_SIG_KEY="" +CONFIG_SYSTEM_TRUSTED_KEYS="" + +# Reserve space for a full relay triple: xxx.xxx.xxx.xxx:xxxxx:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx +CONFIG_CMDLINE="platform_args initrd=tdd-initrd tdd_relay_triple=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxz systemd.log_level=info systemd.log_target=console systemd.journald.forward_to_console=1 " +CONFIG_CMDLINE_FORCE=y +CONFIG_INITRAMFS_FORCE=n + +# Ethernet drivers +CONFIG_QED=m +CONFIG_QED_SRIOV=y +CONFIG_QEDE=m + +CONFIG_BNX2X=m +CONFIG_BNX2X_SRIOV=y + +# storage drivers +CONFIG_RAID_ATTRS=m +#CONFIG_SCSI_MPT2SAS=m +CONFIG_SCSI_MPT2SAS_MAX_SGE=128 +CONFIG_SCSI_MPT3SAS=m +CONFIG_SCSI_MPT3SAS_MAX_SGE=128 + +# For QEMU testing +CONFIG_HW_RANDOM_VIRTIO=m diff --git a/scripts/targets/generic-fixup.spec b/scripts/targets/generic-fixup.spec new file mode 100644 index 00000000..426c80ef --- /dev/null +++ b/scripts/targets/generic-fixup.spec @@ -0,0 +1,13 @@ +# Kernel config fixups for generic systems. + +CONFIG_MODULE_SIG_KEY="" +CONFIG_SYSTEM_TRUSTED_KEYS="" + +# Reserve space for a full relay triple: xxx.xxx.xxx.xxx:xxxxx:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx +CONFIG_CMDLINE_BOOL=y +CONFIG_CMDLINE="platform_args initrd=tdd-initrd tdd_relay_triple=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxz systemd.log_level=info systemd.log_target=console systemd.journald.forward_to_console=1 " +CONFIG_CMDLINE_FORCE=y +CONFIG_INITRAMFS_FORCE=n + +# For QEMU testing +CONFIG_HW_RANDOM_VIRTIO=m diff --git a/scripts/tdd-bisect.sh b/scripts/tdd-bisect.sh new file mode 100755 index 00000000..5087b0f9 --- /dev/null +++ b/scripts/tdd-bisect.sh @@ -0,0 +1,107 @@ +#!/usr/bin/env bash + +usage() { + local old_xtrace + old_xtrace="$(shopt -po xtrace || :)" + set +o xtrace + echo "${script_name} - Runs TDD via git bisect." >&2 + echo "Usage: ${script_name} [flags]" >&2 + echo "Option flags:" >&2 + echo " -h --help - Show this help and exit." >&2 + echo " -v --verbose - Verbose execution." >&2 + echo " -g --good - git bisect good revision. Default: '${good_rev}'." >&2 + echo " -b --bad - git bisect bad revision. Default: '${bad_rev}'." >&2 + echo " -a --all-steps - Run all steps." >&2 + echo " -t --test-only - Only run --run-tests step." >&2 + eval "${old_xtrace}" +} + +process_opts() { + local short_opts="hvgbat" + local long_opts="help,verbose,good:,bad:,all-steps,test-only" + + local opts + opts=$(getopt --options ${short_opts} --long ${long_opts} -n "${script_name}" -- "$@") + + eval set -- "${opts}" + + while true ; do + #echo "${FUNCNAME[0]}: @${1}@ @${2}@" + case "${1}" in + -h | --help) + usage=1 + shift + ;; + -v | --verbose) + set -x + verbose=1 + shift + ;; + -g | --good) + good_rev="${2}" + shift 2 + ;; + -b | --bad) + bad_rev="${2}" + shift 2 + ;; + -a | --all-steps) + all_steps=1 + shift + ;; + -t | --test-only) + test_only=1 + shift + ;; + --) + shift + break + ;; + *) + echo "${script_name}: ERROR: Internal opts: '${@}'" >&2 + exit 1 + ;; + esac + done +} + +on_exit() { + local result=${1} + + echo "${script_name}: Done: ${result}" >&2 +} + +#=============================================================================== +# program start +#=============================================================================== +export PS4='\[\e[0;33m\]+ ${BASH_SOURCE##*/}:${LINENO}:(${FUNCNAME[0]:-"?"}):\[\e[0m\] ' +set -x + +script_name="${0##*/}" + +trap "on_exit 'failed.'" EXIT + +process_opts "${@}" + +if [[ ${usage} ]]; then + usage + trap - EXIT + exit 0 +fi + +sudo="sudo -S" + +if [[ ${all_steps} ]]; then + steps="-12345" +elif [[ ${test_only} ]]; then + steps="-5" +else + steps="-135" +fi + +${sudo} true + +echo "${script_name}: TODO" >&2 + +trap "on_exit 'Success.'" EXIT +exit 0 diff --git a/scripts/tdd-run.conf b/scripts/tdd-run.conf new file mode 100644 index 00000000..9d23d82e --- /dev/null +++ b/scripts/tdd-run.conf @@ -0,0 +1,19 @@ +#!/usr/bin/env bash + +TDD_PROJECT_ROOT="${TDD_PROJECT_ROOT:-${HOME}/projects/tdd/git/tdd-project}" +TDD_TEST_ROOT="${TDD_TEST_ROOT:-${HOME}/projects/tdd/tdd--test}" + +TDD_CHECKOUT_SERVER="${TDD_CHECKOUT_SERVER:-relay.tdd.net}" +TDD_RELAY_SERVER="${TDD_RELAY_SERVER:-relay.tdd.net}" +TDD_TFTP_SERVER="${TDD_TFTP_SERVER:-tftp.tdd.net}" + +TDD_TARGET_LIST="${TDD_TARGET_LIST:-gbt2s17.tdd.net gbt2s18.tdd.net gbt2s19.tdd.net zabla15.tdd.net}" +TDD_TARGET_BMC_LIST="${TDD_TARGET_BMC_LIST:-gbt2s17-bmc.tdd.net gbt2s18-bmc.tdd.net gbt2s19-bmc.tdd.net zabla15-bmc.tdd.net}" + +export TDD_CHECKOUT_SERVER +export TDD_RELAY_SERVER +export TDD_TFTP_SERVER +export TDD_TARGET_LIST +export TDD_TARGET_BMC_LIST +export TDD_PROJECT_ROOT +export TDD_TEST_ROOT diff --git a/scripts/tdd-run.sh b/scripts/tdd-run.sh new file mode 100755 index 00000000..0c30cad7 --- /dev/null +++ b/scripts/tdd-run.sh @@ -0,0 +1,714 @@ +#!/usr/bin/env bash + +usage() { + local old_xtrace + old_xtrace="$(shopt -po xtrace || :)" + set +o xtrace + echo "${script_name} - Builds TDD container image, Linux kernel, root file system images, runs test suites." >&2 + echo "Usage: ${script_name} [flags]" >&2 + echo "Option flags:" >&2 + echo " --arch - Target architecture. Default: ${target_arch}." >&2 + echo " -a --help-all - Show test help and exit." >&2 + echo " -c --config-file - ${script_name} config file. Default: '${config_file}'." >&2 + echo " -h --help - Show this help and exit." >&2 + echo " -v --verbose - Verbose execution." >&2 + echo " --build-name - Build name. Default: '${build_name}'." >&2 + echo " --linux-config - URL of an alternate kernel config file. Default: '${linux_config}'." >&2 + echo " --linux-source - Linux kernel source tree path. Default: '${linux_source}'." >&2 + echo " --linux-repo - Linux kernel git repository URL. Default: '${linux_repo}'." >&2 + echo " --linux-branch - Linux kernel git repository branch. Default: '${linux_branch}'." >&2 + echo " --linux-src-dir - Linux kernel git repository path. Default: '${linux_src_dir}'." >&2 + echo " --test-machine - Test machine name {$(clean_ws ${TDD_TARGET_LIST}) qemu}. Default: '${test_machine}'." >&2 + echo " --systemd-debug - Run systemd with debug options. Default: '${systemd_debug}'." >&2 + echo " --rootfs-types - Rootfs types to build {$(clean_ws ${known_rootfs_types}) all}. Default: '${rootfs_types}'." >&2 + echo " --test-types - Test types to run {$(clean_ws ${known_test_types}) all}. Default: '${test_types}'." >&2 + echo " --hostfwd-offset - QEMU hostfwd port offset. Default: '${hostfwd_offset}'." >&2 + echo "Option steps:" >&2 + echo " --enter - Enter container, no builds." >&2 + echo " -1 --build-kernel - Build kernel." >&2 + echo " -2 --build-bootstrap - Build rootfs bootstrap." >&2 + echo " -3 --build-rootfs - Build rootfs." >&2 + echo " -4 --build-tests - Build tests." >&2 + echo " -5 --run-tests - Run tests on test machine '${test_machine}'." >&2 + echo "Environment:" >&2 + echo " TDD_PROJECT_ROOT - Default: '${TDD_PROJECT_ROOT}'." >&2 + echo " TDD_TEST_ROOT - Default: '${TDD_TEST_ROOT}'." >&2 + echo " TDD_CHECKOUT_SERVER - Default: '${TDD_CHECKOUT_SERVER}'." >&2 + echo " TDD_RELAY_SERVER - Default: '${TDD_RELAY_SERVER}'." >&2 + echo " TDD_TFTP_SERVER - Default: '${TDD_TFTP_SERVER}'." >&2 + echo " TDD_HISTFILE - Default: '${TDD_HISTFILE}'." >&2 + eval "${old_xtrace}" +} + +test_usage() { + local old_xtrace + old_xtrace="$(shopt -po xtrace || :)" + set +o xtrace + echo "Test Plugin Info:" >&2 + for test in ${known_test_types}; do + test_usage_${test/-/_} + echo "" >&2 + done + eval "${old_xtrace}" +} + +process_opts() { + local short_opts="ac:hv12345" + local long_opts="\ +arch:,help-all,config-file:,help,verbose,\ +build-name:,\ +linux-config:,linux-source:,linux-repo:,linux-branch:,linux-src-dir:,\ +test-machine:,systemd-debug,rootfs-types:,test-types:,hostfwd-offset:,\ +enter,build-kernel,build-bootstrap,build-rootfs,build-tests,run-tests" + + local opts + opts=$(getopt --options ${short_opts} --long ${long_opts} -n "${script_name}" -- "$@") + + eval set -- "${opts}" + + while true ; do + #echo "${FUNCNAME[0]}: @${1}@ @${2}@" + case "${1}" in + --arch) + target_arch=$(get_arch "${2}") + shift 2 + ;; + -a | --help-all) + help_all=1 + shift + ;; + -c | --config-file) + config_file="${2}" + shift 2 + ;; + -h | --help) + usage=1 + shift + ;; + -v | --verbose) + set -x + verbose=1 + shift + ;; + --build-name) + build_name="${2}" + shift 2 + ;; + --linux-config) + linux_config="${2}" + shift 2 + ;; + --linux-source) + linux_source="${2}" + shift 2 + ;; + --linux-repo) + linux_repo="${2}" + shift 2 + ;; + --linux-branch) + linux_branch="${2}" + shift 2 + ;; + --linux-src-dir) + linux_src_dir="${2}" + shift 2 + ;; + --test-machine) + test_machine="${2}" + shift 2 + ;; + --systemd-debug) + systemd_debug=1 + shift + ;; + --rootfs-types) + rootfs_types="${2}" + shift 2 + ;; + --test-types) + test_types="${2}" + shift 2 + ;; + --hostfwd-offset) + hostfwd_offset="${2}" + shift 2 + ;; + --enter) + step_enter=1 + shift + ;; + -1 | --build-kernel) + step_build_kernel=1 + shift + ;; + -2 | --build-bootstrap) + step_build_bootstrap=1 + shift + ;; + -3 | --build-rootfs) + step_build_rootfs=1 + shift + ;; + -4 | --build-tests) + step_build_tests=1 + shift + ;; + -5 | --run-tests) + step_run_tests=1 + shift + ;; + --) + shift + break + ;; + *) + echo "${script_name}: ERROR: Internal opts: '${@}'" >&2 + exit 1 + ;; + esac + done +} + +on_exit() { + local result=${1} + + if [[ -d ${image_dir} ]]; then + ${sudo} chown -R $(id --user --real --name): ${image_dir} + fi + + local end_time="$(date)" + local end_sec="${SECONDS}" + local end_min + if test -x "$(command -v bc)"; then + end_min="$(bc <<< "scale=2; ${end_sec} / 60")" + else + end_min="$((end_sec / 60)).$(((end_sec * 100) / 60))" + fi + + set +x + echo "${script_name}: start time: ${start_time}" >&2 + echo "${script_name}: end time: ${end_time}" >&2 + echo "${script_name}: duration: ${end_sec} seconds (${end_min} min)" >&2 + echo "${script_name}: Done: ${result}" >&2 +} + +check_machine() { + local machine=${1} + + if [[ ${machine} == "qemu" ]]; then + return + fi + + set +e + ${SCRIPTS_TOP}/checkout-query.sh -v ${machine} + result=${?} + set -e + + if [[ ${result} -eq 0 ]]; then + return + elif [[ ${result} -eq 1 ]]; then + echo "${script_name}: ERROR: unknown machine: '${machine}'" >&2 + usage + exit 1 + fi + exit ${result} +} + +check_rootfs_types() { + local given + local known + local found + local all + + for given in ${rootfs_types}; do + found="n" + if [[ "${given}" == "all" ]]; then + all=1 + continue + fi + for known in ${known_rootfs_types}; do + if [[ "${given}" == "${known}" ]]; then + found="y" + break + fi + done + if [[ "${found}" != "y" ]]; then + echo "${script_name}: ERROR: Unknown rootfs-type '${given}'." >&2 + exit 1 + fi + #echo "${FUNCNAME[0]}: Found '${given}'." >&2 + done + + if [[ ${all} ]]; then + rootfs_types="$(clean_ws ${known_rootfs_types})" + fi +} + +check_test_types() { + local given + local known + local found + local all + + for given in ${test_types}; do + found="n" + if [[ "${given}" == "all" ]]; then + all=1 + continue + fi + for known in ${known_test_types}; do + if [[ "${given}" == "${known}" ]]; then + found="y" + break + fi + done + if [[ "${found}" != "y" ]]; then + echo "${script_name}: ERROR: Unknown test-type '${given}'." >&2 + usage + exit 1 + fi + #echo "${FUNCNAME[0]}: Found '${given}'." >&2 + done + + if [[ ${all} ]]; then + test_types="$(clean_ws ${known_test_types})" + fi +} + +build_kernel_from_src() { + local config=${1} + local fixup_spec=${2} + local platform_args=${3} + local src_dir=${4} + local build_dir=${5} + local install_dir=${6} + + rm -rf ${build_dir} ${install_dir} + + ${SCRIPTS_TOP}/build-linux-kernel.sh \ + --build-dir=${build_dir} \ + --install-dir=${install_dir} \ + ${target_arch} ${src_dir} defconfig + + if [[ ${config} != 'defconfig' ]]; then + if [[ -f ${config} ]]; then + cp -vf ${config} ${build_dir}/.config + else + curl --silent --show-error --location ${config} \ + > ${build_dir}/.config + fi + fi + + bash -x ${SCRIPTS_TOP}/set-config-opts.sh \ + --verbose \ + ${platform_args:+"--platform-args='${platform_args}'"} \ + ${fixup_spec} ${build_dir}/.config + + bash -x ${SCRIPTS_TOP}/build-linux-kernel.sh \ + --build-dir=${build_dir} \ + --install-dir=${install_dir} \ + ${verbose:+--verbose} \ + ${target_arch} ${src_dir} all +} + +build_kernel_from_repo() { + local repo=${1} + local branch=${2} + local config=${3} + local fixup_spec=${4} + local platform_args=${5} + local src_dir=${6} + local build_dir=${7} + local install_dir=${8} + + git_checkout_safe ${src_dir} ${repo} ${branch} + + build_kernel_from_src \ + "${config}" \ + "${fixup_spec}" \ + "${platform_args}" \ + "${src_dir}" \ + "${build_dir}" \ + "${install_dir}" +} + +build_kernel_with_initrd() { + local src_dir=${1} + local build_dir=${2} + local install_dir=${3} + local image_dir=${4} + + check_file ${image_dir}/initrd + ln -sf ./initrd ${image_dir}/initrd.cpio + + #export make_options_user="CONFIG_INITRAMFS_SOURCE=${image_dir}/initrd.cpio" + + make_options_user="CONFIG_INITRAMFS_SOURCE=${image_dir}/initrd.cpio" ${SCRIPTS_TOP}/build-linux-kernel.sh \ + --build-dir=${build_dir} \ + --install-dir=${install_dir} \ + ${verbose:+--verbose} \ + ${target_arch} ${src_dir} Image.gz +} + +build_bootstrap() { + local rootfs_type=${1} + local bootstrap_dir=${2} + + ${sudo} rm -rf ${bootstrap_dir} + + ${SCRIPTS_TOP}/build-rootfs.sh \ + --arch=${target_arch} \ + --rootfs-type=${rootfs_type} \ + --bootstrap-dir="${bootstrap_dir}" \ + --image-dir="NA" \ + --bootstrap \ + --verbose +} + +build_rootfs() { + local rootfs_type=${1} + local test_name=${2} + local bootstrap_dir=${3} + local image_dir=${4} + local kernel_dir=${5} + + check_directory "${bootstrap_dir}" + check_directory "${kernel_dir}" + + rm -rf ${image_dir} + mkdir -p ${image_dir} + + local modules + modules="$(find ${kernel_dir}/lib/modules/* -maxdepth 0 -type d)" + check_directory "${modules}" + + local extra_packages + extra_packages+="$(test_packages_${test_name//-/_} ${rootfs_type} ${target_arch})" + + ${SCRIPTS_TOP}/build-rootfs.sh \ + --arch=${target_arch} \ + --rootfs-type=${rootfs_type} \ + --bootstrap-dir="${bootstrap_dir}" \ + --image-dir=${image_dir} \ + --kernel-modules="${modules}" \ + --extra-packages="${extra_packages}" \ + --rootfs-setup \ + --make-image \ + --verbose + + test_setup_${test_name//-/_} ${rootfs_type} ${image_dir}/rootfs +} + +create_sysroot() { + local rootfs_type=${1} + local rootfs=${2} + local sysroot=${3} + + check_directory "${rootfs}" + + mkdir -p ${sysroot} + ${sudo} rsync -a --delete ${rootfs}/ ${sysroot}/ + ${sudo} chown $(id --user --real --name): ${sysroot} + + ${SCRIPTS_TOP}/prepare-sysroot.sh ${sysroot} +} + +build_tests() { + local rootfs_type=${1} + local test_name=${2} + local tests_dir=${3} + local sysroot=${4} + local kernel_src=${5} + + check_directory "${sysroot}" + check_directory "${kernel_src}" + + test_build_${test_name//-/_} ${rootfs_type} ${tests_dir} ${sysroot} \ + ${kernel_src} +} + +run_tests() { + local kernel=${1} + local image_dir=${2} + local tests_dir=${3} + local results_dir=${4} + + echo "${script_name}: run_tests: ${test_machine}" >&2 + + check_file ${kernel} + check_directory ${image_dir} + check_file ${image_dir}/initrd + check_file ${image_dir}/login-key + check_directory ${tests_dir} + + local test_script + local extra_args + + if [[ ${test_machine} == 'qemu' ]]; then + test_script="${SCRIPTS_TOP}/run-kernel-qemu-tests.sh" + extra_args+=" --arch=${target_arch} ${hostfwd_offset:+--hostfwd-offset=${hostfwd_offset}}" + else + test_script="${SCRIPTS_TOP}/run-kernel-remote-tests.sh" + extra_args+=" --test-machine=${test_machine}" + fi + + if [[ ${systemd_debug} ]]; then + extra_args+=" --systemd-debug" + fi + + bash -x ${test_script} \ + --kernel=${kernel} \ + --initrd=${image_dir}/initrd \ + --ssh-login-key=${image_dir}/login-key \ + --test-name=${test_name} \ + --tests-dir=${tests_dir} \ + --out-file=${results_dir}/${test_machine}-console.txt \ + --result-file=${results_dir}/${test_machine}-result.txt \ + ${extra_args} \ + --verbose +} + +#=============================================================================== +# program start +#=============================================================================== +export PS4='\[\e[0;33m\]+ ${BASH_SOURCE##*/}:${LINENO}:(${FUNCNAME[0]:-"?"}):\[\e[0m\] ' +script_name="${0##*/}" + +trap "on_exit '[setup] failed.'" EXIT +set -e + +SCRIPTS_TOP=${SCRIPTS_TOP:-"$(cd "${BASH_SOURCE%/*}" && pwd)"} +DOCKER_TOP=${DOCKER_TOP:-"$(cd "${SCRIPTS_TOP}/../docker" && pwd)"} + +source ${SCRIPTS_TOP}/lib/util.sh +source ${SCRIPTS_TOP}/rootfs-plugin/rootfs-plugin.sh +source ${SCRIPTS_TOP}/test-plugin/test-plugin.sh + +for test in ${known_test_types}; do + if [[ -f ${SCRIPTS_TOP}/test-plugin/${test}/${test}.sh ]]; then + source ${SCRIPTS_TOP}/test-plugin/${test}/${test}.sh + else + echo "${script_name}: ERROR: Test plugin '${test}.sh' not found." >&2 + exit 1 + fi +done + +set -x + +sudo="sudo -S" +parent_ops="$@" + +start_time="$(date)" +SECONDS=0 + +process_opts "${@}" + +config_file="${config_file:-${SCRIPTS_TOP}/tdd-run.conf}" +check_file ${config_file} " --config-file" "usage" +source ${config_file} + +container_work_dir=${container_work_dir:-"/tdd--test"} + +test_machine=${test_machine:-"qemu"} +test_machine=${test_machine%-bmc} + +build_name=${build_name:-"${script_name%.*}-$(date +%m.%d)"} +target_arch=${target_arch:-"arm64"} +host_arch=$(get_arch "$(uname -m)") + +top_build_dir="$(pwd)/${build_name}" + +TDD_PROJECT_ROOT=${TDD_PROJECT_ROOT:-"$(cd ${SCRIPTS_TOP}/.. && pwd)"} +TDD_TEST_ROOT=${TDD_TEST_ROOT:-"$(pwd)"} +TDD_HISTFILE=${TDD_HISTFILE:-"${container_work_dir}/${build_name}--bash_history"} + +rootfs_types=${rootfs_types:-"debian"} +rootfs_types="${rootfs_types//,/ }" + +test_types=${test_types:-"sys-info"} +test_types="${test_types//,/ }" + +linux_config=${linux_config:-"defconfig"} + +if [[ ${linux_source} ]]; then + check_not_opt 'linux-source' 'linux-repo' ${linux_repo} + check_not_opt 'linux-source' 'linux-branch' ${linux_branch} + check_not_opt 'linux-source' 'linux-src-dir' ${linux_src_dir} + + check_directory ${linux_source} "" "usage" + linux_src_dir="${linux_source}" +else + linux_repo=${linux_repo:-"https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux-stable.git"} + linux_branch=${linux_branch:-"linux-5.3.y"} + linux_src_dir=${linux_src_dir:-"${top_build_dir}/$(git_get_repo_name ${linux_repo})"} +fi + +kernel_build_dir="${top_build_dir}/${target_arch}-kernel-build" +kernel_install_dir="${top_build_dir}/${target_arch}-kernel-install" + +case ${target_arch} in +arm64) + fixup_spec="${SCRIPTS_TOP}/targets/arm64/tx2/tx2-fixup.spec" + kernel_image="${kernel_install_dir}/boot/Image" + ;; +ppc*) + fixup_spec="${SCRIPTS_TOP}/targets/powerpc/powerpc-fixup.spec" + kernel_image="${kernel_install_dir}/boot/vmlinux.strip" + ;; +*) + fixup_spec="${SCRIPTS_TOP}/targets/generic-fixup.spec" + kernel_image="${kernel_install_dir}/boot/vmlinux.strip" + ;; +esac + +if [[ ${help_all} ]]; then + set +o xtrace + usage + echo "" >&2 + test_usage + trap - EXIT + exit 0 +fi + +if [[ ${usage} ]]; then + usage + trap - EXIT + exit 0 +fi + +if [[ ${TDD_BUILDER} ]]; then + if [[ ${step_enter} ]]; then + echo "${script_name}: ERROR: Already in tdd-builder." >&2 + exit 1 + fi +else + check_directory ${TDD_PROJECT_ROOT} "" "usage" + check_directory ${TDD_TEST_ROOT} "" "usage" + + ${DOCKER_TOP}/builder/build-builder.sh + + echo "${script_name}: Entering ${build_name} container..." >&2 + + if [[ ${step_enter} ]]; then + docker_cmd="/bin/bash" + else + docker_cmd="/tdd/scripts/tdd-run.sh ${parent_ops}" + fi + + ${SCRIPTS_TOP}/run-builder.sh \ + --verbose \ + --container-name="${build_name}" \ + --docker-args="\ + -e build_name \ + -v ${TDD_PROJECT_ROOT}:/tdd-project:ro \ + -e TDD_PROJECT_ROOT=/tdd-project \ + -v ${TDD_TEST_ROOT}:${container_work_dir}:rw,z \ + -e TDD_TEST_ROOT=${container_work_dir} \ + -w ${container_work_dir} \ + -e HISTFILE=${TDD_HISTFILE} \ + " \ + -- "${docker_cmd}" + + trap "on_exit 'container success.'" EXIT + exit 0 +fi + +check_rootfs_types +check_test_types +if [[ ${step_run_tests} ]]; then + check_machine "${test_machine}" +fi + +step_code="${step_build_kernel:-"0"}${step_build_bootstrap:-"0"}\ +${step_build_rootfs:-"0"}${step_build_tests:-"0"}${step_run_tests:-"0"}\ +${step_run_remote_tests:-"0"}" + +if [[ "${step_code}" == "000000" ]]; then + echo "${script_name}: ERROR: No step options provided." >&2 + usage + exit 1 +fi + +printenv + +if [[ ${step_build_bootstrap} || ${step_build_rootfs} ]]; then + ${sudo} true +fi + +mkdir -p ${top_build_dir} + +if [[ ${step_build_kernel} ]]; then + trap "on_exit '[build_kernel] failed.'" EXIT + + if [[ ${linux_source} ]]; then + build_kernel_from_src \ + "${linux_config}" \ + "${fixup_spec}" \ + "${kernel_platform_args}" \ + "${linux_src_dir}" \ + "${kernel_build_dir}" \ + "${kernel_install_dir}" + else + build_kernel_from_repo \ + "${linux_repo}" \ + "${linux_branch}" \ + "${linux_config}" \ + "${fixup_spec}" \ + "${kernel_platform_args}" \ + "${linux_src_dir}" \ + "${kernel_build_dir}" \ + "${kernel_install_dir}" + fi +fi + +for rootfs_type in ${rootfs_types}; do + + bootstrap_prefix="${top_build_dir}/${target_arch}-${rootfs_type}" + bootstrap_dir="${bootstrap_prefix}.bootstrap" + + if [[ ${step_build_bootstrap} ]]; then + trap "on_exit '[build_bootstrap] failed.'" EXIT + build_bootstrap ${rootfs_type} ${bootstrap_dir} + fi + + for test_name in ${test_types}; do + trap "on_exit 'test loop failed.'" EXIT + + output_prefix="${bootstrap_prefix}-${test_name}" + image_dir=${output_prefix}.image + tests_dir=${output_prefix}.tests + results_dir=${output_prefix}.results + + echo "${script_name}: INFO: ${test_name} => ${output_prefix}" >&2 + + if [[ ${step_build_rootfs} ]]; then + trap "on_exit '[build_rootfs] failed.'" EXIT + build_rootfs ${rootfs_type} \ + ${test_name} \ + ${bootstrap_dir} \ + ${image_dir} \ + ${kernel_install_dir} + create_sysroot ${rootfs_type} ${image_dir}/rootfs \ + ${image_dir}/sysroot + #build_kernel_with_initrd ${linux_src_dir} \ + # ${kernel_build_dir} ${kernel_install_dir} \ + # ${image_dir} + fi + + if [[ ${step_build_tests} ]]; then + trap "on_exit '[build_tests] failed.'" EXIT + build_tests ${rootfs_type} ${test_name} ${tests_dir} \ + ${image_dir}/sysroot ${linux_src_dir} + fi + + if [[ ${step_run_tests} ]]; then + trap "on_exit '[run_tests] failed.'" EXIT + run_tests ${kernel_image} ${image_dir} ${tests_dir} \ + ${results_dir} + fi + done +done + +trap "on_exit 'Success.'" EXIT +exit 0 diff --git a/scripts/test-plugin/http-wrk/http-wrk.sh b/scripts/test-plugin/http-wrk/http-wrk.sh new file mode 100644 index 00000000..5fcabec2 --- /dev/null +++ b/scripts/test-plugin/http-wrk/http-wrk.sh @@ -0,0 +1,97 @@ +# wrk - HTTP benchmark test plug-in. + +SCRIPTS_TOP=${SCRIPTS_TOP:-"$(cd "${BASH_SOURCE%/*}/.." && pwd)"} + +source ${SCRIPTS_TOP}/lib/util.sh + +test_usage_http_wrk() { + local old_xtrace + old_xtrace="$(shopt -po xtrace || :)" + set +o xtrace + echo " ${BASH_SOURCE##*/} - wrk - a HTTP benchmarking tool." >&2 + echo " wrk is a modern HTTP benchmarking tool capable of generating significant" + echo " load when run on a single multi-core CPU. It combines a multithreaded" + echo " design with scalable event notification systems such as epoll and kqueue." + echo " More Info:" >&2 + echo " https://github.com/wg/wrk/blob/master/README.md" >&2 + eval "${old_xtrace}" +} + +test_packages_http_wrk() { + local rootfs_type=${1} + local target_arch=${2} + + echo '' +} + +test_setup_http_wrk() { + local rootfs_type=${1} + local rootfs=${2} + + return +} + +test_build_http_wrk() { + local rootfs_type=${1} + local tests_dir=${2} + mkdir -p ${tests_dir} + tests_dir="$(cd ${tests_dir} && pwd)" + local sysroot="$(cd ${3} && pwd)" + local kernel_src_dir="$(cd ${4} && pwd)" + + local test_name='http-wrk' + local src_repo=${http_wrk_src_repo:-"https://github.com/wg/wrk.git"} + local repo_branch=${http_wrk_repo_branch:-"master"} + local src_dir="${tests_dir}/${test_name}-src" + local build_dir="${tests_dir}/${test_name}-build" + local archive_file="${tests_dir}/${test_name}.tar.gz" + local results_file="${tests_dir}/${test_name}-results.tar.gz" + + check_directory "${sysroot}" + + rm -rf ${build_dir} ${archive_file} ${results_file} + + git_checkout_safe ${src_dir} ${src_repo} ${repo_branch} + + mkdir -p ${build_dir} + rsync -av --delete --exclude='.git' ${src_dir}/ ${build_dir}/ + + pushd ${build_dir} + + if [[ "${host_arch}" != "${target_arch}" ]]; then + make_opts="CC=$(get_triple ${target_arch})-gcc" + fi + + export SYSROOT="${sysroot}" + export CPPFLAGS="-I${SYSROOT}/usr/include -I${SYSROOT}/include -I${SYSROOT}" + export LDFLAGS="-L${SYSROOT}/usr/lib -L${SYSROOT}/lib" + export DESTDIR="${build_dir}/install" + export SKIP_IDCHECK=1 + + echo "${FUNCNAME[0]}: TODO." >&2 + touch ${archive_file} + + popd + echo "${FUNCNAME[0]}: Done, success." >&2 +} + +test_run_http_wrk() { + local tests_dir="$(cd ${1} && pwd)" + local machine_type=${2} + local ssh_host=${3} + local -n _test_run_http_wrk__ssh_opts=${4} + local ssh_opts="${_test_run_sys_info__ssh_opts}" + + local test_name='http-wrk' + local archive_file="${tests_dir}/${test_name}.tar.gz" + local results_file="${tests_dir}/${test_name}-results.tar.gz" + local timeout=${http_wrk_timeout:-"5m"} + + echo "INSIDE @${BASH_SOURCE[0]}:${FUNCNAME[0]}@" + echo "ssh_opts = @${_test_run_http_wrk__ssh_opts}@" + + echo "${FUNCNAME[0]}: TODO." >&2 + touch ${results_file} + + echo "${FUNCNAME[0]}: Done, success." >&2 +} diff --git a/scripts/test-plugin/ilp32/generic-test.sh b/scripts/test-plugin/ilp32/generic-test.sh new file mode 100644 index 00000000..108bc2aa --- /dev/null +++ b/scripts/test-plugin/ilp32/generic-test.sh @@ -0,0 +1,179 @@ +#!/usr/bin/env sh +# generic test driver + +on_exit() { + local result=${1} + + tar -czvf "${RESULTS_FILE}" "${results_dir}" + + echo "GENERIC TEST RESULT: ilp32-${TEST_NAME}: Done: ${result}" >&2 +} + +print_sys_info() { + rootfs_type=$(grep -E '^ID=' /etc/os-release) + rootfs_type=${rootfs_type#ID=} + + set +x + { + echo '-----------------------------' + echo -n 'date: ' + date + echo -n 'uname: ' + uname -a + echo "test name: ${TEST_NAME}" + echo "rootfs_type: ${rootfs_type}" + echo '-----------------------------' + echo 'os-release:' + cat /etc/os-release + echo '-----------------------------' + echo 'env:' + env + echo '-----------------------------' + echo 'set:' + set + echo '-----------------------------' + } 2>&1 | tee -a "${log_file}" + set -x +} + +print_fs_info() { + local lib_info=${1} + + set +x + { + echo '-----------------------------' + echo 'ilp32-libraries info:' + cat "${lib_info}" + echo '-----------------------------' + echo 'manifest:' + find . -type f -exec ls -l {} \; + echo '-----------------------------' + } 2>&1 | tee -a "${log_file}" + set -x +} + +run_test_prog() { + local msg=${1} + local prog_path=${2} + local prog_name=${3} + + set +e + #set +x + { + echo "=============================" + file "${prog_path}/${prog_name}" + echo "-----------------------------" + echo "test ${prog_name} '${msg}': strace start" + strace "${prog_path}/${prog_name}" + echo "test ${prog_name} '${msg}': result = ${?}" + echo "-----------------------------" + echo "test ${prog_name} '${msg}': run start" + "${prog_path}/${prog_name}" + echo "test ${prog_name} '${msg}': result = ${?}" + echo "test ${prog_name} '${msg}': end" + echo "=============================" + } 2>&1 | tee -a "${log_file}" + #set -x + set -e +} + +run_test_prog_verbose() { + local msg=${1} + local prog_path=${2} + local prog_name=${3} + + set +e + #set +x + { + echo "=============================" + file "${prog_path}/${prog_name}" + echo "-----------------------------" + echo "test ${prog_name} '${msg}': verbose start" + + ls -l /opt/ilp32/lib64/ld-2.30.so + file /opt/ilp32/lib64/ld-2.30.so + /opt/ilp32/lib64/ld-2.30.so --list "${prog_path}/${prog_name}" + + LD_SHOW_AUXV=1 "${prog_path}/${prog_name}" + LD_TRACE_LOADED_OBJECTS=1 LD_VERBOSE=1 "${prog_path}/${prog_name}" + LD_DEBUG=libs "${prog_path}/${prog_name}" + + echo "test ${prog_name} '${msg}': verbose end" + echo "=============================" + } 2>&1 | tee -a "${log_file}" + #set -x + set -e +} + +install_tests() { + tar -C "${test_home}" -xf "/ilp32-${TEST_NAME}-tests.tar.gz" + + mkdir -p /opt/ilp32/ + cp -a "${test_home}/${TEST_NAME}/ilp32-libraries/opt/ilp32"/* /opt/ilp32/ +} + +#=============================================================================== +# program start +#=============================================================================== +TEST_NAME=${TEST_NAME:-"${1}"} + +export PS4='+ generic-test.sh (ilp32-${TEST_NAME}): ' +set -x + +trap "on_exit 'failed.'" EXIT +set -e + +test_home="/ilp32-${TEST_NAME}" +mkdir -p "${test_home}" +cd "${test_home}" + +results_dir="${test_home}/results" +mkdir -p "${results_dir}" + +log_file="${results_dir}/test.log" +rm -f "${log_file}" + +print_sys_info +install_tests +print_fs_info "${test_home}/${TEST_NAME}/ilp32-libraries/opt/ilp32/info.txt" + +which sh +ls -l $(which sh) + +test_progs=$(cat "${test_home}/${TEST_NAME}/test_manifest") + +orig_limit=$(ulimit -s) + +for prog in ${test_progs}; do + echo "Running '${prog}'." >&2 + + ulimit -s "${orig_limit}" + ulimit -s + run_test_prog "limited" "${test_home}/${TEST_NAME}" "${prog}" + #run_test_prog_verbose "limited" "${test_home}/${TEST_NAME}" "${prog}" + + #ulimit -s unlimited + #ulimit -s + #run_test_prog "unlimited" "${test_home}/${TEST_NAME}" "${prog}" +done + +ulimit -s "${orig_limit}" + +checks='Segmentation fault|Internal error' +checks_IFS='|' + +IFS="${checks_IFS}" +for check in ${checks}; do + if grep "${check}" "${log_file}"; then + echo "ilp32-${TEST_NAME}: ERROR: '${check}' detected." >&2 + check_failed=1 + fi +done +unset IFS + +if [ ${check_failed} ]; then + exit 1 +fi + +trap "on_exit 'Success.'" EXIT +exit 0 diff --git a/scripts/test-plugin/ilp32/ilp32.sh b/scripts/test-plugin/ilp32/ilp32.sh new file mode 100644 index 00000000..a7c0b31a --- /dev/null +++ b/scripts/test-plugin/ilp32/ilp32.sh @@ -0,0 +1,186 @@ +#!/usr/bin/env bash +# +# ILP32 hello world test plug-in. + +test_usage_ilp32() { + local old_xtrace + old_xtrace="$(shopt -po xtrace || :)" + set +o xtrace + echo " ${BASH_SOURCE##*/} - Build and run ILP32 hello world program." >&2 + eval "${old_xtrace}" +} + +test_packages_ilp32() { + local rootfs_type=${1} + local target_arch=${2} + + case "${rootfs_type}-${target_arch}" in + alpine-*) + ;; + debian-*) + ;; + *) + ;; + esac + echo "" +} + +test_setup_ilp32() { + return +} + +ilp32_build_sub_test() { + local sub_test=${1} + + ${src_dir}/scripts/build-ilp32-test-program.sh \ + --build-top=${build_dir}/tests/${sub_test} \ + --src-top=${src_dir}/tests/${sub_test} \ + --prefix=${tool_prefix} + + tar -czf ${tests_dir}/ilp32-${sub_test}-tests.tar.gz \ + -C ${build_dir}/tests ${sub_test} \ + -C ${src_dir} docker scripts +} + +test_build_ilp32() { + local rootfs_type=${1} + local tests_dir=${2} + mkdir -p ${tests_dir} + tests_dir="$(cd ${tests_dir} && pwd)" + local sysroot="$(cd ${3} && pwd)" + local kernel_src_dir="$(cd ${4} && pwd)" + + local src_repo=${ilp32_src_repo:-"https://github.com/glevand/ilp32--builder.git"} + local repo_branch=${ilp32_repo_branch:-"master"} + local src_dir="${tests_dir}/ilp32-src" + local build_dir="${tests_dir}/ilp32-build" + local results_archive="${tests_dir}/ilp32-results.tar.gz" + local ilp32_libs_file="${tests_dir}/ilp32-libraries.tar.gz" + local tool_prefix="/opt/ilp32" + + rm -rf ${build_dir} ${results_archive} ${tests_dir}/ilp32*-tests.tar.gz + + # FIXME: For debug. + #src_repo="/tdd--test/ilp32--builder.git-copy" + + git_checkout_force ${src_dir} ${src_repo} ${repo_branch} + + mkdir -p ${build_dir} + pushd ${build_dir} + + # FIXME: For debug. + #force_toolup="--force" + #force_builder="--force" + #force_runner="--force" + + ${src_dir}/scripts/build-ilp32-docker-image.sh \ + --build-top=${build_dir}/toolchain \ + ${force_toolup} \ + --toolup + + ${src_dir}/scripts/build-ilp32-docker-image.sh \ + --build-top=${build_dir}/toolchain \ + ${force_builder} \ + --builder + + if [[ -d ${build_dir}/toolchain ]]; then + cp -vf --link ${build_dir}/toolchain/ilp32-toolchain-*.tar.gz ${tests_dir}/ + # FIXME: Need this??? + cp -vf --link ${build_dir}/toolchain/ilp32-libraries-*.tar.gz ${tests_dir}/ + else + echo "${script_name}: INFO (${FUNCNAME[0]}): No toolchain archives found." >&2 + fi + + if [[ ${host_arch} == ${target_arch} ]]; then + ${src_dir}/scripts/build-ilp32-docker-image.sh \ + --build-top=${build_dir}/toolchain \ + ${force_runner} \ + --runner + fi + + ilp32_build_sub_test "hello-world" + ilp32_build_sub_test "vdso-tests" + ilp32_build_sub_test "gcc-tests" + + #tar -czf ${tests_dir}/ilp32-spec-cpu-tests.tar.gz \ + # -C ${src_dir}/tests lib spec-cpu \ + # -C ${??} cpu2017-src + + popd + echo "${FUNCNAME[0]}: Done, success." >&2 +} + +ilp32_run_sub_test() { + local sub_test=${1} + local test_driver=${2} + + local tests_archive="${tests_dir}/ilp32-${sub_test}-tests.tar.gz" + local results_archive="${tests_dir}/ilp32-${sub_test}-results.tar.gz" + local remote_results_archive="/ilp32-${sub_test}-results.tar.gz" + + rm -rf ${results_archive} + + scp ${ssh_opts} ${tests_archive} ${ssh_host}:/ + scp ${ssh_opts} ${TEST_TOP}/${test_driver} ${ssh_host}:/ + ssh ${ssh_opts} ${ssh_host} chmod +x /${test_driver} + ssh ${ssh_opts} ${ssh_host} "TEST_NAME=${sub_test} sh -c 'ls -l / && env'" + + set +e + timeout ${timeout} ssh ${ssh_opts} ${ssh_host} \ + "TEST_NAME=${sub_test} RESULTS_FILE=${remote_results_archive} sh -c '/${test_driver}'" + result=${?} + set -e + + if [[ ${result} -eq 124 ]]; then + echo "${FUNCNAME[0]}: Done, ilp32-${sub_test} failed: timeout." >&2 + elif [[ ${result} -ne 0 ]]; then + echo "${FUNCNAME[0]}: Done, ilp32-${sub_test} failed: '${result}'." >&2 + else + echo "${FUNCNAME[0]}: Done, ilp32-${sub_test} success." >&2 + fi + + scp ${ssh_opts} ${ssh_host}:${remote_results_archive} ${results_archive} +} + +ilp32_run_spec_cpu() { + local sub_test=spec-cpu + + local tests_archive="${tests_dir}/ilp32-${sub_test}-tests.tar.gz" + local results_archive="${tests_dir}/ilp32-${sub_test}-results.tar.gz" + local remote_results_archive="/ilp32-${sub_test}-results.tar.gz" + + rm -rf ${results_archive} + + scp ${ssh_opts} ${tests_archive} ${ssh_host}:/ +} + +test_run_ilp32() { + local tests_dir="$(cd ${1} && pwd)" + local machine_type=${2} + local ssh_host=${3} + local -n _test_run_ilp32__ssh_opts=${4} + local ssh_opts="${_test_run_ilp32__ssh_opts}" + + local src_repo=${ilp32_src_repo:-"https://github.com/glevand/ilp32--builder.git"} + local repo_branch=${ilp32_repo_branch:-"master"} + local src_dir="${tests_dir}/ilp32-src" + local build_dir="${tests_dir}/ilp32-build" + local timeout=${ilp32_timeout:-"5m"} + + echo "ssh_opts = @${ssh_opts}@" + + set -x + + for ((i = 0; i < 1; i++)); do + ilp32_run_sub_test "hello-world" "generic-test.sh" + ilp32_run_sub_test "vdso-tests" "generic-test.sh" + ilp32_run_sub_test "gcc-tests" "generic-test.sh" + #ilp32_run_sub_test "spec-cpu" "spec-cpu-test.sh" + done + +} + +SCRIPTS_TOP=${SCRIPTS_TOP:-"$(cd "${BASH_SOURCE%/*}/.." && pwd)"} +source ${SCRIPTS_TOP}/lib/util.sh + +TEST_TOP=${TEST_TOP:-"$(cd "${BASH_SOURCE%/*}" && pwd)"} diff --git a/scripts/test-plugin/ilp32/spec-cpu-test.sh b/scripts/test-plugin/ilp32/spec-cpu-test.sh new file mode 100644 index 00000000..3b59cc33 --- /dev/null +++ b/scripts/test-plugin/ilp32/spec-cpu-test.sh @@ -0,0 +1,79 @@ +#!/usr/bin/env sh +# +# spec-cpu test driver + +on_exit() { + local result=${1} + + tar -czvf ${RESULTS_FILE} ${results_dir} + + echo "ilp32-${TEST_NAME}: Done: ${result}" >&2 +} + +script_name="${0##*/}" +TEST_NAME=${TEST_NAME:-"${1}"} + +export PS4='+ ilp32-${TEST_NAME}: ' +set -x + +trap "on_exit 'failed.'" EXIT +set -e + +test_home="/ilp32-${TEST_NAME}" +mkdir -p ${test_home} +cd ${test_home} + +results_dir=${test_home}/results +mkdir -p ${results_dir} + +log_file=${results_dir}/test.log +rm -f ${log_file} + +rootfs_type=$(egrep '^ID=' /etc/os-release) +rootfs_type=${rootfs_type#ID=} + +{ + echo '-----------------------------' + echo -n 'date: ' + date + echo -n 'uname: ' + uname -a + echo "test name: ${TEST_NAME}" + echo "rootfs_type: ${rootfs_type}" + echo '-----------------------------' + echo 'os-release:' + cat /etc/os-release + echo '-----------------------------' + echo 'env:' + env + echo '-----------------------------' + echo 'set:' + set +} | tee -a ${log_file} + +tar -C ${test_home} -xf /ilp32-${TEST_NAME}-tests.tar.gz +mkdir -p /opt/ilp32/ +cp -a ${test_home}/${TEST_NAME}/ilp32-libraries/opt/ilp32/* /opt/ilp32/ + +{ + echo '-----------------------------' + echo 'ilp32-libraries info:' + cat ${test_home}/${TEST_NAME}/ilp32-libraries/opt/ilp32/info.txt + echo '-----------------------------' + echo 'manifest:' + find . -type f -ls + echo '-----------------------------' +} | tee -a ${log_file} + +set +e +{ + echo 'test results:' + echo "${test_home}/${TEST_NAME}: TODO" +} | tee -a ${log_file} + +result=${?} + +set -e + +trap "on_exit 'Success.'" EXIT +exit 0 diff --git a/scripts/test-plugin/ltp/ltp.sh b/scripts/test-plugin/ltp/ltp.sh new file mode 100644 index 00000000..3b326adb --- /dev/null +++ b/scripts/test-plugin/ltp/ltp.sh @@ -0,0 +1,207 @@ +# LTP test plug-in. + +SCRIPTS_TOP=${SCRIPTS_TOP:-"$(cd "${BASH_SOURCE%/*}/.." && pwd)"} + +source ${SCRIPTS_TOP}/lib/util.sh +source ${SCRIPTS_TOP}/lib/chroot.sh + +test_usage_ltp() { + local old_xtrace + old_xtrace="$(shopt -po xtrace || :)" + set +o xtrace + echo " ${BASH_SOURCE##*/} - Linux Kernel Selftests." >&2 + echo " The LTP testsuite contains a collection of tools for testing the Linux kernel" + echo " and related features. Our goal is to improve the Linux kernel and system" + echo " libraries by bringing test automation to the testing effort." + echo " More Info:" >&2 + echo " https://github.com/linux-test-project/ltp/blob/master/README.md" >&2 + eval "${old_xtrace}" +} + +test_packages_ltp() { + local rootfs_type=${1} + local target_arch=${2} + + case "${rootfs_type}" in + alpine) + # FIXME: Error relocating /root/ltp-test/opt/ltp/bin/ltp-pan: __sprintf_chk: symbol not found + echo "${FUNCNAME[0]}: TODO: Need to setup build wih alpine's musl glibc." >&2 + exit 1 + echo 'libaio-dev' + ;; + debian) + echo 'libaio-dev libnuma-dev' + ;; + *) + echo "${FUNCNAME[0]}: ERROR: Unknown rootfs type: '${rootfs_type}'" >&2 + exit 1 + ;; + esac +} + +test_setup_ltp() { + local rootfs_type=${1} + local rootfs=${2} + + case "${rootfs_type}" in + alpine) + enter_chroot ${rootfs} " + set -e + apk add numactl-dev --repository http://dl-3.alpinelinux.org/alpine/edge/main/ --allow-untrusted + " + ;; + debian) + ;; + *) + echo "${FUNCNAME[0]}: ERROR: Unknown rootfs type: '${rootfs_type}'" >&2 + exit 1 + ;; + esac +} + +test_build_ltp() { + local rootfs_type=${1} + local tests_dir=${2} + mkdir -p ${tests_dir} + tests_dir="$(cd ${tests_dir} && pwd)" + local sysroot="$(cd ${3} && pwd)" + local kernel_src_dir="$(cd ${4} && pwd)" + + local test_name='ltp' + local src_repo=${ltp_src_repo:-"https://github.com/linux-test-project/ltp.git"} + local repo_branch=${ltp_repo_branch:-"master"} + local src_dir="${tests_dir}/${test_name}-src" + local build_dir="${tests_dir}/${test_name}-build" + local archive_file="${tests_dir}/${test_name}.tar.gz" + local results_file="${tests_dir}/${test_name}-results.tar.gz" + + check_directory "${sysroot}" + rm -rf ${build_dir} ${archive_file} ${results_file} + + git_checkout_safe ${src_dir} ${src_repo} ${repo_branch} + + mkdir -p ${build_dir} + rsync -av --delete --exclude='.git' ${src_dir}/ ${build_dir}/ + + pushd ${build_dir} + + if [[ "${host_arch}" != "${target_arch}" ]]; then + local triple="$(get_triple ${target_arch})" + make_opts="--host=${triple} CC=${triple}-gcc" + fi + + export SYSROOT="${sysroot}" + export CPPFLAGS="-I${SYSROOT}/usr/include -I${SYSROOT}/include -I${SYSROOT}" + export LDFLAGS="-L${SYSROOT}/usr/lib -L${SYSROOT}/lib" + export DESTDIR="${build_dir}/install" + export SKIP_IDCHECK=1 + + make autotools + ./configure \ + SYSROOT="${sysroot}" \ + CPPFLAGS="-I${SYSROOT}/usr/include -I${SYSROOT}/include -I${SYSROOT}" \ + LDFLAGS="-L${SYSROOT}/usr/lib -L${SYSROOT}/lib" \ + DESTDIR="${build_dir}/install" \ + ${make_opts} + (unset TARGET_ARCH; make) + make DESTDIR="${build_dir}/install" install + + file ${build_dir}/install/opt/ltp/bin/ltp-pan + tar -C ${DESTDIR} -czf ${archive_file} . + + popd + echo "${FUNCNAME[0]}: Done, success." >&2 +} + +test_run_ltp() { + local tests_dir="$(cd ${1} && pwd)" + local machine_type=${2} + local ssh_host=${3} + local -n _test_run_ltp__ssh_opts=${4} + local ssh_opts="${_test_run_ltp__ssh_opts}" + + local test_name='ltp' + local archive_file="${tests_dir}/${test_name}.tar.gz" + local results_file="${tests_dir}/${test_name}-results.tar.gz" + local timeout=${ltp_timeout:-"5m"} + + echo "INSIDE @${BASH_SOURCE[0]}:${FUNCNAME[0]}@" + echo "ssh_opts = @${ssh_opts}@" + + set -x + rm -rf ${results_file} + + case "${machine_type}" in + qemu) + LTP_DEV='/dev/vda' + LTP_RUN_OPTS='-b /dev/vdb -z /dev/vdc' + ;; + remote) + ;; + esac + + #echo "${FUNCNAME[0]}: tests_dir = @${tests_dir}@" + #echo "${FUNCNAME[0]}: machine_type = @${machine_type}@" + #echo "${FUNCNAME[0]}: ssh_host = @${ssh_host}@" + #echo "${FUNCNAME[0]}: ssh_opts = @${ssh_opts}@" + echo "${FUNCNAME[0]}: archive_file = @${archive_file}@" + echo "${FUNCNAME[0]}: LTP_RUN_OPTS = @${LTP_RUN_OPTS}@" + + scp ${ssh_opts} ${archive_file} ${ssh_host}:ltp.tar.gz + + set +e + timeout ${timeout} ssh ${ssh_opts} ${ssh_host} \ + LTP_DEV="'${LTP_DEV}'" \ + LTP_RUN_OPTS="'${LTP_RUN_OPTS}'" 'sh -s' <<'EOF' +export PS4='+ ltp-test-script:${LINENO}: ' +set -ex + +cat /proc/partitions +printenv + +rootfs_type=$(egrep '^ID=' /etc/os-release) +rootfs_type=${rootfs_type#ID=} + +## Exclude sshd from oom-killer. +#sshd_pid=$(systemctl show --value -p MainPID ssh) +#if [[ ${sshd_pid} -eq 0 ]]; then +# exit 1 +#fi +#echo -17 > /proc/${sshd_pid}/oom_adj + +mkfs.ext4 ${LTP_DEV} +mkdir -p ltp-test +mount ${LTP_DEV} ltp-test + +tar -C ltp-test -xf ltp.tar.gz +cd ./ltp-test/opt/ltp + +echo -e "oom01\noom02\noom03\noom04\noom05" > skip-tests +cat skip-tests + +cat ./Version + +set +e +ls -l ./bin/ltp-pan +ldd ./bin/ltp-pan + +./runltp -S skip-tests ${LTP_RUN_OPTS} + +result=${?} +set -e + +tar -czvf ${HOME}/ltp-results.tar.gz ./output ./results +EOF + result=${?} + set -e + + if [[ ${result} -eq 124 ]]; then + echo "${FUNCNAME[0]}: Done, failed: timeout." >&2 + elif [[ ${result} -ne 0 ]]; then + scp ${ssh_opts} ${ssh_host}:ltp-results.tar.gz ${results_file} || : + echo "${FUNCNAME[0]}: Done, failed: '${result}'." >&2 + else + scp ${ssh_opts} ${ssh_host}:ltp-results.tar.gz ${results_file} + echo "${FUNCNAME[0]}: Done, success." >&2 + fi +} diff --git a/scripts/test-plugin/phoronix/phoronix.sh b/scripts/test-plugin/phoronix/phoronix.sh new file mode 100644 index 00000000..9f54ad4a --- /dev/null +++ b/scripts/test-plugin/phoronix/phoronix.sh @@ -0,0 +1,146 @@ +# phoronix test plug-in. + +SCRIPTS_TOP=${SCRIPTS_TOP:-"$(cd "${BASH_SOURCE%/*}/.." && pwd)"} + +source ${SCRIPTS_TOP}/lib/util.sh +source ${SCRIPTS_TOP}/lib/chroot.sh + +test_usage_phoronix() { + local old_xtrace + old_xtrace="$(shopt -po xtrace || :)" + set +o xtrace + echo " ${BASH_SOURCE##*/} - Phoronix Test Suite." >&2 + echo " The Phoronix Test Suite itself is an open-source framework for conducting automated" + echo " tests along with reporting of test results, detection of installed system" + echo " software/hardware, and other features." + echo " More Info:" >&2 + echo " https://github.com/phoronix-test-suite/phoronix-test-suite/blob/master/README.md" >&2 + eval "${old_xtrace}" +} + +test_packages_phoronix() { + local rootfs_type=${1} + local target_arch=${2} + + case "${rootfs_type}" in + alpine) + echo 'php-cli' + ;; + debian) + echo 'php-cli' + ;; + *) + echo "${FUNCNAME[0]}: ERROR: Unknown rootfs type: '${rootfs_type}'" >&2 + exit 1 + ;; + esac +} + +test_setup_phoronix() { + local rootfs_type=${1} + local rootfs=${2} + + case "${rootfs_type}" in + alpine) + ;; + debian) + ;; + *) + echo "${FUNCNAME[0]}: ERROR: Unknown rootfs type: '${rootfs_type}'" >&2 + exit 1 + ;; + esac +} + +test_build_phoronix() { + local rootfs_type=${1} + local tests_dir=${2} + local sysroot=${3} + local kernel_src_dir=${4} + + mkdir -p ${tests_dir} + tests_dir="$(cd ${tests_dir} && pwd)" + sysroot="$(cd ${sysroot} && pwd)" + kernel_src_dir="$(cd ${kernel_src_dir} && pwd)" + + local test_name='phoronix' + local src_tar_url="https://phoronix-test-suite.com/releases/phoronix-test-suite-8.8.1.tar.gz" + + local archive_file="${tests_dir}/${test_name}.tar.gz" + local results_file="${tests_dir}/${test_name}-results.tar.gz" + + check_directory "${sysroot}" + rm -rf ${archive_file} ${results_file} + + curl --silent --show-error --location ${src_tar_url} > ${archive_file} + + popd + echo "${FUNCNAME[0]}: Done, success." >&2 +} + +test_run_phoronix() { + local tests_dir=${1} + local machine_type=${2} + local ssh_host=${3} + local -n _test_run_phoronix__ssh_opts=${4} + local ssh_opts="${_test_run_phoronix__ssh_opts}" + + tests_dir="$(cd ${tests_dir} && pwd)" + + local test_name='phoronix' + local archive_file="${tests_dir}/${test_name}.tar.gz" + local results_file="${tests_dir}/${test_name}-results.tar.gz" + local timeout=${phoronix_timeout:-"5m"} + + echo "INSIDE @${BASH_SOURCE[0]}:${FUNCNAME[0]}@" + echo "ssh_opts = @${ssh_opts}@" + + set -x + rm -rf ${results_file} + + case "${machine_type}" in + qemu) + phoronix_RUN_OPTS='-b /dev/vda -z /dev/vdb' + ;; + remote) + ;; + esac + + scp ${ssh_opts} ${archive_file} ${ssh_host}:phoronix.tar.gz + + set +e + timeout ${timeout} ssh ${ssh_opts} ${ssh_host} phoronix_RUN_OPTS="'${phoronix_RUN_OPTS}'" 'sh -s' <<'EOF' +export PS4='+phoronix-test-script:${LINENO}: ' +set -ex + +cat /proc/partitions +printenv + +rootfs_type=$(egrep '^ID=' /etc/os-release) +rootfs_type=${rootfs_type#ID=} + +mkdir -p phoronix-test +tar -C phoronix-test -xf phoronix.tar.gz +cd ./phoronix-test/ + +ls -lah ./bin/phoronix-pan +echo "skippping tests for debug!!!" +mkdir -p ./results + +set -e + +tar -czvf ${HOME}/phoronix-results.tar.gz ./results +EOF + result=${?} + set -e + + if [[ ${result} -eq 124 ]]; then + echo "${FUNCNAME[0]}: Done, failed: timeout." >&2 + elif [[ ${result} -ne 0 ]]; then + scp ${ssh_opts} ${ssh_host}:phoronix-results.tar.gz ${results_file} || : + echo "${FUNCNAME[0]}: Done, failed: '${result}'." >&2 + else + scp ${ssh_opts} ${ssh_host}:phoronix-results.tar.gz ${results_file} + echo "${FUNCNAME[0]}: Done, success." >&2 + fi +} diff --git a/scripts/test-plugin/sys-info/sys-info-test.sh b/scripts/test-plugin/sys-info/sys-info-test.sh new file mode 100644 index 00000000..23b025e4 --- /dev/null +++ b/scripts/test-plugin/sys-info/sys-info-test.sh @@ -0,0 +1,76 @@ +#!/usr/bin/env sh +# generic test driver + +on_exit() { + local result=${1} + + set -x + tar -czvf ${RESULTS_FILE} ${results_dir} + echo "${TEST_NAME}: Done: ${result}" >&2 +} + +print_sys_info() { + local log_file=${1} + + local rootfs_type=$(egrep '^ID=' /etc/os-release) + local rootfs_type=${rootfs_type#ID=} + + set +x + { + echo '-----------------------------' + echo -n 'date: ' + date + echo -n 'uname: ' + uname -a + echo "test name: ${TEST_NAME}" + echo "rootfs_type: ${rootfs_type}" + echo '-----------------------------' + echo 'os-release:' + cat /etc/os-release + echo '-----------------------------' + echo 'env:' + env + echo '-----------------------------' + echo 'set:' + set + echo '-----------------------------' + echo 'id:' + id + echo '-----------------------------' + echo '/proc/partitions:' + cat /proc/partitions + echo '-----------------------------' + echo 'dmidecode:' + if [ -f /usr/sbin/dmidecode ]; then + /usr/sbin/dmidecode + else + echo '/usr/sbin/dmidecode not found.' + fi + echo '-----------------------------' + } 2>&1 | tee -a "${log_file}" + set -x +} + +#=============================================================================== +# program start +#=============================================================================== +set -x +export PS4='+ sys-info-test.sh: ${LINENO:-"?"}: ' + +TEST_NAME=${TEST_NAME:-"${1}"} + +trap "on_exit 'failed.'" EXIT +set -e + +test_home="/${TEST_NAME}" +mkdir -p "${test_home}" +cd "${test_home}" + +results_dir=${test_home}/results +rm -rf "${results_dir}" +mkdir -p "${results_dir}" + +print_sys_info "${results_dir}/test.log" + +trap "on_exit 'Success.'" EXIT +exit 0 diff --git a/scripts/test-plugin/sys-info/sys-info.sh b/scripts/test-plugin/sys-info/sys-info.sh new file mode 100644 index 00000000..7d4cf491 --- /dev/null +++ b/scripts/test-plugin/sys-info/sys-info.sh @@ -0,0 +1,82 @@ +# System info test plug-in. + +test_usage_sys_info() { + local old_xtrace + old_xtrace="$(shopt -po xtrace || :)" + set +o xtrace + echo " ${BASH_SOURCE##*/} - Collect system information." >&2 + eval "${old_xtrace}" +} + +test_packages_sys_info() { + local rootfs_type=${1} + local target_arch=${2} + + case "${rootfs_type}-${target_arch}" in + alpine-arm64) + echo "dmidecode" + ;; + debian-arm64) + echo "dmidecode" + ;; + *) + ;; + esac +} + +test_setup_sys_info() { + local rootfs_type=${1} + local rootfs=${2} + + return +} + +test_build_sys_info() { + local rootfs_type=${1} + local tests_dir=${2} + mkdir -p ${tests_dir} + tests_dir="$(cd ${tests_dir} && pwd)" + local sysroot="$(cd ${3} && pwd)" + local kernel_src_dir="$(cd ${4} && pwd)" + + echo "${FUNCNAME[0]}: Done, success." >&2 +} + +test_run_sys_info() { + local tests_dir="$(cd ${1} && pwd)" + local machine_type=${2} + local ssh_host=${3} + local -n _test_run_sys_info__ssh_opts=${4} + local ssh_opts="${_test_run_sys_info__ssh_opts}" + + local test_driver="sys-info-test.sh" + local results_archive="${tests_dir}/sys-info-results.tar.gz" + local remote_results_archive="/sys-info-results.tar.gz" + local timeout=${sys_info_timeout:-"5m"} + + rm -rf ${results_archive} + + scp ${ssh_opts} ${TEST_TOP}/${test_driver} ${ssh_host}:/ + ssh ${ssh_opts} ${ssh_host} chmod +x /${test_driver} + + set +e + timeout ${timeout} ssh ${ssh_opts} ${ssh_host} \ + "TEST_NAME='sys-info' RESULTS_FILE='${remote_results_archive}' sh -c '/${test_driver}'" + result=${?} + set -e + + if [[ ${result} -eq 124 ]]; then + echo "${FUNCNAME[0]}: Done, sys-info failed: timeout." >&2 + elif [[ ${result} -ne 0 ]]; then + echo "${FUNCNAME[0]}: Done, sys-info failed: '${result}'." >&2 + else + echo "${FUNCNAME[0]}: Done, sys-info success." >&2 + fi + + scp ${ssh_opts} ${ssh_host}:${remote_results_archive} ${results_archive} +} + +SCRIPTS_TOP=${SCRIPTS_TOP:-"$(cd "${BASH_SOURCE%/*}/.." && pwd)"} +source ${SCRIPTS_TOP}/lib/util.sh + +TEST_TOP=${TEST_TOP:-"$(cd "${BASH_SOURCE%/*}" && pwd)"} diff --git a/scripts/test-plugin/test-plugin.sh b/scripts/test-plugin/test-plugin.sh new file mode 100644 index 00000000..30d51076 --- /dev/null +++ b/scripts/test-plugin/test-plugin.sh @@ -0,0 +1,12 @@ +# TDD test plug-in. + +known_test_types=" + http-wrk + ilp32 + kselftest + lmbench + ltp + phoronix + sys-info + unixbench +" diff --git a/scripts/test-plugin/unixbench/unixbench.sh b/scripts/test-plugin/unixbench/unixbench.sh new file mode 100644 index 00000000..ca5e242a --- /dev/null +++ b/scripts/test-plugin/unixbench/unixbench.sh @@ -0,0 +1,141 @@ +# UnixBench test plug-in. + +SCRIPTS_TOP=${SCRIPTS_TOP:-"$(cd "${BASH_SOURCE%/*}/.." && pwd)"} + +source ${SCRIPTS_TOP}/lib/util.sh + +test_usage_unixbench() { + local old_xtrace + old_xtrace="$(shopt -po xtrace || :)" + set +o xtrace + echo " ${BASH_SOURCE##*/} - UnixBench - The original BYTE UNIX benchmark suite." >&2 + echo " The purpose of UnixBench is to provide a basic indicator of the performance" + echo " of a Unix-like system; hence, multiple tests are used to test various" + echo " aspects of the system's performance. These test results are then compared" + echo " to the scores from a baseline system to produce an index value, which is" + echo " generally easier to handle than the raw scores. The entire set of index" + echo " values is then combined to make an overall index for the system." + echo " More Info:" >&2 + echo " https://github.com/kdlucas/byte-unixbench/blob/master/README.md" >&2 + eval "${old_xtrace}" +} + +test_packages_unixbench() { + local rootfs_type=${1} + local target_arch=${2} + + case "${rootfs_type}" in + alpine) + echo 'make perl' + ;; + debian) + echo 'make libperl-dev' + ;; + *) + echo "${FUNCNAME[0]}: ERROR: Unknown rootfs type: '${rootfs_type}'" >&2 + exit 1 + ;; + esac +} + +test_setup_unixbench() { + local rootfs_type=${1} + local rootfs=${2} + + return +} + +test_build_unixbench() { + local rootfs_type=${1} + local tests_dir=${2} + mkdir -p ${tests_dir} + tests_dir="$(cd ${tests_dir} && pwd)" + local sysroot="$(cd ${3} && pwd)" + local kernel_src_dir="$(cd ${4} && pwd)" + + local test_name='unixbench' + local src_repo=${unixbench_src_repo:-"https://github.com/kdlucas/byte-unixbench.git"} + local repo_branch=${unixbench_repo_branch:-"master"} + local src_dir="${tests_dir}/${test_name}-src" + local build_dir="${tests_dir}/${test_name}-build" + local archive_file="${tests_dir}/${test_name}.tar.gz" + local results_file="${tests_dir}/${test_name}-results.tar.gz" + + check_directory "${sysroot}" + rm -rf ${build_dir} ${archive_file} + + git_checkout_safe ${src_dir} ${src_repo} ${repo_branch} + + mkdir -p ${build_dir} + rsync -av --delete --exclude='.git' ${src_dir}/ ${build_dir}/ + + if [[ "${host_arch}" != "${target_arch}" ]]; then + make_opts="CC=$(get_triple ${target_arch})-gcc" + fi + + export SYSROOT="$(pwd)/${sysroot}" + export CPPFLAGS="-I${SYSROOT}/usr/include -I${SYSROOT}/include -I${SYSROOT}" + export LDFLAGS="-L${SYSROOT}/usr/lib -L${SYSROOT}/lib" + + make -C ${build_dir}/UnixBench ${make_opts} UB_GCC_OPTIONS='-O3 -ffast-math' + + tar -C ${build_dir} -czf ${archive_file} UnixBench + echo "${FUNCNAME[0]}: Done, success." >&2 +} + +test_run_unixbench() { + local tests_dir="$(cd ${1} && pwd)" + local machine_type=${2} + local ssh_host=${3} + local -n _test_run_unixbench__ssh_opts=${4} + local ssh_opts="${_test_run_unixbench__ssh_opts}" + + local test_name='unixbench' + local archive_file="${tests_dir}/${test_name}.tar.gz" + local results_file="${tests_dir}/${test_name}-results.tar.gz" + local timeout=${unixbench_timeout:-"5m"} + + echo "INSIDE @${BASH_SOURCE[0]}:${FUNCNAME[0]}@" + echo "ssh_opts = @${ssh_opts}@" + + set -x + rm -rf ${results_file} + + scp ${ssh_opts} ${archive_file} ${ssh_host}:unixbench.tar.gz + + set +e + timeout ${timeout} ssh ${ssh_opts} ${ssh_host} 'sh -s' <<'EOF' +export PS4='+unixbench-test-script:${LINENO}: ' +set -ex + +cat /proc/partitions +printenv + +rootfs_type=$(egrep '^ID=' /etc/os-release) +rootfs_type=${rootfs_type#ID=} + +mkdir -p unixbench-test +tar -C unixbench-test -xf unixbench.tar.gz +cd ./unixbench-test/UnixBench + +set +e +#./Run +echo "skippping tests for debug!!!" +result=${?} +set -e + +tar -czvf ${HOME}/unixbench-results.tar.gz ./results +EOF + result=${?} + set -e + + if [[ ${result} -eq 124 ]]; then + echo "${FUNCNAME[0]}: Done, failed: timeout." >&2 + elif [[ ${result} -ne 0 ]]; then + scp ${ssh_opts} ${ssh_host}:unixbench-results.tar.gz ${results_file} || : + echo "${FUNCNAME[0]}: Done, failed: '${result}'." >&2 + else + scp ${ssh_opts} ${ssh_host}:unixbench-results.tar.gz ${results_file} + echo "${FUNCNAME[0]}: Done, success." >&2 + fi +} diff --git a/scripts/tftp-upload.sh b/scripts/tftp-upload.sh new file mode 100755 index 00000000..7dc6183e --- /dev/null +++ b/scripts/tftp-upload.sh @@ -0,0 +1,169 @@ +#!/usr/bin/env bash + +set -e + +script_name="${0##*/}" + +SCRIPTS_TOP=${SCRIPTS_TOP:-"$( cd "${BASH_SOURCE%/*}" && pwd )"} + +source ${SCRIPTS_TOP}/lib/util.sh + +usage () { + local old_xtrace + old_xtrace="$(shopt -po xtrace || :)" + set +o xtrace + echo "${script_name} - Upload files to tftp server." >&2 + echo "Usage: ${script_name} [flags]" >&2 + echo "Option flags:" >&2 + echo " -h --help - Show this help and exit." >&2 + echo " -i --initrd - Initrd image. Default: '${initrd}'." >&2 + echo " -k --kernel - Kernel image. Default: '${kernel}'." >&2 + echo " -n --no-known-hosts - Do not setup known_hosts file. Default: '${no_known_hosts}'." >&2 + echo " -s --ssh-login-key - SSH login private key file. Default: '${ssh_login_key}'." >&2 + echo " -v --verbose - Verbose execution." >&2 + echo " --tftp-triple - tftp triple. File name or 'user:server:root'. Default: '${tftp_triple}'." >&2 + echo " --tftp-dest - tftp destination directory relative to tftp-root. Default: '${tftp_dest}'." >&2 + eval "${old_xtrace}" +} + +short_opts="hi:k:ns:v" +long_opts="help,initrd:,kernel:,no-known-hosts,ssh-login-key:,tftp-dest:,tftp-triple:,verbose" + +opts=$(getopt --options ${short_opts} --long ${long_opts} -n "${script_name}" -- "$@") + +if [ $? != 0 ]; then + echo "${script_name}: ERROR: Internal getopt" >&2 + exit 1 +fi + +eval set -- "${opts}" + +while true ; do + case "${1}" in + -h | --help) + usage=1 + shift + ;; + -i | --initrd) + initrd="${2}" + shift 2 + ;; + -k | --kernel) + kernel="${2}" + shift 2 + ;; + -n | --no-known-hosts) + no_known_hosts=1 + shift + ;; + -s | --ssh-login-key) + ssh_login_key="${2}" + shift 2 + ;; + -t | --tftp-triple) + tftp_triple="${2}" + shift 2 + ;; + -t | --tftp-dest) + tftp_dest="${2}" + shift 2 + ;; + -v | --verbose) + set -x + verbose=1 + shift + ;; + --) + shift + break + ;; + *) + echo "${script_name}: ERROR: Internal opts: '${@}'" >&2 + exit 1 + ;; + esac +done + +tftp_kernel="tdd-kernel" +tftp_initrd="tdd-initrd" +tftp_login_key="tdd-login-key" + +if [[ ${usage} ]]; then + usage + exit 0 +fi + +if [[ -f "${tftp_triple}" ]]; then + tftp_triple=$(cat ${tftp_triple}) +fi + +if [[ ${tftp_triple} ]]; then + echo "${script_name}: INFO: tftp triple: '${tftp_triple}'" >&2 + + tftp_user="$(echo ${tftp_triple} | cut -d ':' -f 1)" + tftp_server="$(echo ${tftp_triple} | cut -d ':' -f 2)" + tftp_root="$(echo ${tftp_triple} | cut -d ':' -f 3)" +else + tftp_user=${TDD_TFTP_USER:-"tdd-jenkins"} + tftp_server=${TDD_TFTP_SERVER:-"tdd-tftp"} + tftp_root=${TDD_TFTP_ROOT:-"/var/tftproot"} +fi + +check_opt 'tftp-dest' ${tftp_dest} + +echo "${script_name}: INFO: tftp user: '${tftp_user}'" >&2 +echo "${script_name}: INFO: tftp server: '${tftp_server}'" >&2 +echo "${script_name}: INFO: tftp root: '${tftp_root}'" >&2 +echo "${script_name}: INFO: tftp dest: '${tftp_dest}'" >&2 + +check_opt 'kernel' ${kernel} +check_file "${kernel}" + +check_opt 'initrd' ${initrd} +check_file "${initrd}" + +check_opt 'ssh-login-key' ${ssh_login_key} +check_file "${ssh_login_key}" + +on_exit() { + local result=${1} + + if [[ ${tmp_dir} && -d ${tmp_dir} ]]; then + rm -rf ${tmp_dir} + fi + + echo "${script_name}: ${result}" >&2 +} + +trap "on_exit 'Done, failed.'" EXIT + +#if [[ ${no_known_hosts} ]]; then +# ssh_extra_args+="-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null" +#else +# if ! ssh-keygen -F ${tftp_server} &> /dev/null; then +# tmp_dir="$(mktemp --tmpdir --directory ${script_name}.XXXX)" +# known_hosts_file="${tmp_dir}/known_hosts" +# +# ssh-keyscan ${tftp_server} >> ${known_hosts_file} +# ssh_extra_args+="-o UserKnownHostsFile=${known_hosts_file}" +# fi +#fi + +ssh_extra_args+="-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null" + +if [[ ${verbose} ]]; then + ssh_extra_args+=" -v" + #ssh ${ssh_extra_args} ${tftp_user}@${tftp_server} ls -lh ${tftp_root}/${tftp_dest} +fi + +scp ${ssh_extra_args} ${initrd} ${tftp_user}@${tftp_server}:${tftp_root}/${tftp_dest}/${tftp_initrd} +scp ${ssh_extra_args} ${kernel} ${tftp_user}@${tftp_server}:${tftp_root}/${tftp_dest}/${tftp_kernel} +scp ${ssh_extra_args} ${ssh_login_key} ${tftp_user}@${tftp_server}:${tftp_root}/${tftp_dest}/${tftp_login_key} + +#if [[ ${verbose} ]]; then +# ssh ${ssh_extra_args} ${tftp_user}@${tftp_server} ls -lh ${tftp_root}/${tftp_dest} +#fi + +trap - EXIT + +on_exit 'Done, success.' diff --git a/scripts/upload-di.sh b/scripts/upload-di.sh new file mode 100755 index 00000000..11283de5 --- /dev/null +++ b/scripts/upload-di.sh @@ -0,0 +1,177 @@ +#!/usr/bin/env bash + +usage () { + local old_xtrace + old_xtrace="$(shopt -po xtrace || :)" + set +o xtrace + echo "${script_name} - Upload Debian netboot installer to tftp server." >&2 + echo "Usage: ${script_name} [flags]" >&2 + echo "Option flags:" >&2 + echo " -c --config-file - Config file. Default: '${config_file}'." >&2 + echo " -h --help - Show this help and exit." >&2 + echo " -o --host - Target host. Default: '${host}'." >&2 + echo " -r --release - Debian release. Default: '${release}'." >&2 + echo " -s --tftp-server - TFTP server. Default: '${tftp_server}'." >&2 + echo " -t --type - Release type {$(clean_ws ${types})}." >&2 + echo " Default: '${type}'." >&2 + echo " -v --verbose - Verbose execution." >&2 + eval "${old_xtrace}" +} + +process_opts() { + local short_opts="c:hors:t:v" + local long_opts="config-file:,help,host:,release:,tftp-server:,type:,verbose" + + local opts + opts=$(getopt --options ${short_opts} --long ${long_opts} -n "${script_name}" -- "$@") + + eval set -- "${opts}" + + while true ; do + case "${1}" in + -c | --config-file) + config_file="${2}" + shift 2 + ;; + -h | --help) + usage=1 + shift + ;; + -o | --host) + host="${2}" + shift 2 + ;; + -r | --release) + release="${2}" + shift 2 + ;; + -s | --tftp-server) + tftp_server="${2}" + shift 2 + ;; + -t | --type) + type="${2}" + shift 2 + ;; + -v | --verbose) + set -x + verbose=1 + shift + ;; + --) + shift + break + ;; + *) + echo "${script_name}: ERROR: Internal opts: '${@}'" >&2 + exit 1 + ;; + esac + done +} + +on_exit() { + local result=${1} + + echo "${script_name}: Done: ${result}" >&2 +} + +#=============================================================================== +# program start +#=============================================================================== + +script_name="${0##*/}" +SCRIPTS_TOP=${SCRIPTS_TOP:-"$( cd "${BASH_SOURCE%/*}" && pwd )"} + +trap "on_exit 'failed.'" EXIT +set -e + +source ${SCRIPTS_TOP}/lib/util.sh + +process_opts "${@}" + +config_file="${config_file:-${SCRIPTS_TOP}/upload.conf}" + +check_file ${config_file} " --config-file" "usage" +source ${config_file} + +if [[ ! ${tftp_server} ]]; then + echo "${script_name}: ERROR: No tftp_server entry: '${config_file}'" >&2 + usage + exit 1 +fi + +if [[ ! ${host} ]]; then + echo "${script_name}: ERROR: No host entry: '${config_file}'" >&2 + usage + exit 1 +fi + +types=" + buster + daily + sid +" + +type=${type:-"buster"} + +case "${type}" in +buster) + release="current" + files_url="http://ftp.nl.debian.org/debian/dists/buster/main/installer-arm64/${release}/images/netboot/debian-installer/arm64" + sums_url="http://ftp.nl.debian.org/debian/dists/buster/main/installer-arm64/${release}/images/" + ;; +daily) + release="daily" + files_url="https://d-i.debian.org/daily-images/arm64/${release}/netboot/debian-installer/arm64" + sums_url="https://d-i.debian.org/daily-images/arm64/${release}" + ;; +sid) + echo "${script_name}: ERROR: No sid support yet." >&2 + exit 1 + ;; +*) + echo "${script_name}: ERROR: Unknown type '${type}'" >&2 + usage + exit 1 + ;; +esac + +if [[ -n "${usage}" ]]; then + usage + exit 0 +fi + +ssh ${tftp_server} ls -l /var/tftproot/${host} + +ssh ${tftp_server} host=${host} files_url=${files_url} sums_url=${sums_url} 'bash -s' <<'EOF' + +set -e + +if [[ -f /var/tftproot/${host}/tdd-initrd \ + && -f /var/tftproot/${host}/tdd-kernel ]]; then + mv -f /var/tftproot/${host}/tdd-initrd /var/tftproot/${host}/tdd-initrd.old + mv -f /var/tftproot/${host}/tdd-kernel /var/tftproot/${host}/tdd-kernel.old +fi + +wget --no-verbose -O /var/tftproot/${host}/tdd-initrd ${files_url}/initrd.gz +wget --no-verbose -O /var/tftproot/${host}/tdd-kernel ${files_url}/linux +wget --no-verbose -O /tmp/di-sums ${sums_url}/MD5SUMS + +echo "--- initrd ---" +[[ -f /var/tftproot/${host}/tdd-initrd.old ]] && md5sum /var/tftproot/${host}/tdd-initrd.old +md5sum /var/tftproot/${host}/tdd-initrd +cat /tmp/di-sums | egrep 'netboot/debian-installer/arm64/initrd.gz' +echo "--- kernel ---" +[[ -f /var/tftproot/${host}/tdd-kernel.old ]] && md5sum /var/tftproot/${host}/tdd-kernel.old +md5sum /var/tftproot/${host}/tdd-kernel +cat /tmp/di-sums | egrep 'netboot/debian-installer/arm64/linux' +echo "---------" + +EOF + +echo "${script_name}: ${host} files ready on ${tftp_server}." >&2 + +trap "on_exit 'success.'" EXIT +exit 0 + diff --git a/scripts/upload-fedora-installer.sh b/scripts/upload-fedora-installer.sh new file mode 100644 index 00000000..d72c161f --- /dev/null +++ b/scripts/upload-fedora-installer.sh @@ -0,0 +1,198 @@ +#!/usr/bin/env bash + +usage () { + local old_xtrace + old_xtrace="$(shopt -po xtrace || :)" + set +o xtrace + echo "${script_name} - Upload Fedora netboot installer to tftp server." >&2 + echo "Usage: ${script_name} [flags]" >&2 + echo "Option flags:" >&2 + echo " -c --config-file - Config file. Default: '${config_file}'." >&2 + echo " -h --help - Show this help and exit." >&2 + echo " -o --host - Target host. Default: '${host}'." >&2 + echo " -r --release - Debian release. Default: '${release}'." >&2 + echo " -s --tftp-server - TFTP server. Default: '${tftp_server}'." >&2 + echo " -t --type - Release type {$(clean_ws ${types})}." >&2 + echo " Default: '${type}'." >&2 + echo " -v --verbose - Verbose execution." >&2 + eval "${old_xtrace}" +} + +process_opts() { + local short_opts="c:hors:t:v" + local long_opts="config-file:,help,host:,release:,tftp-server:,type:,verbose" + + local opts + opts=$(getopt --options ${short_opts} --long ${long_opts} -n "${script_name}" -- "$@") + + eval set -- "${opts}" + + while true ; do + case "${1}" in + -c | --config-file) + config_file="${2}" + shift 2 + ;; + -h | --help) + usage=1 + shift + ;; + -o | --host) + host="${2}" + shift 2 + ;; + -r | --release) + release="${2}" + shift 2 + ;; + -s | --tftp-server) + tftp_server="${2}" + shift 2 + ;; + -t | --type) + type="${2}" + shift 2 + ;; + -v | --verbose) + set -x + verbose=1 + shift + ;; + --) + shift + break + ;; + *) + echo "${script_name}: ERROR: Internal opts: '${@}'" >&2 + exit 1 + ;; + esac + done +} + +on_exit() { + local result=${1} + + echo "${script_name}: Done: ${result}" >&2 +} + + +download_fedora_files() { + local cmd + local dl_root + + if [[ ${tftp_server} == "localhost" ]]; then + cmd='' + dl_root="$(pwd)" + else + cmd="ssh ${tftp_server} " + dl_root="/var/tftproot" + fi + + ${cmd} ls -l ${dl_root}/${host} + + ${cmd} dl_root=${dl_root} host=${host} files_url=${files_url} sums_url=${sums_url} 'bash -s' <<'EOF' + +set -e + +if [[ -f ${dl_root}/${host}/tdd-initrd \ + && -f ${dl_root}/${host}/tdd-kernel ]]; then + mv -f ${dl_root}/${host}/tdd-initrd ${dl_root}/${host}/tdd-initrd.old + mv -f ${dl_root}/${host}/tdd-kernel ${dl_root}/${host}/tdd-kernel.old +fi + +curl --silent --show-error --location ${f30_initrd} > ${dir}/f_initrd +curl --silent --show-error --location ${f30_kernel} > ${dir}/f_kernel + +wget --no-verbose -O ${dl_root}/${host}/tdd-initrd ${dl_initrd} +wget --no-verbose -O ${dl_root}/${host}/tdd-kernel ${dl_kernel} +wget --no-verbose -O /tmp/di-sums ${sums_url}/MD5SUMS + +echo "--- initrd ---" +[[ -f ${dl_root}/${host}/tdd-initrd.old ]] && md5sum ${dl_root}/${host}/tdd-initrd.old +md5sum ${dl_root}/${host}/tdd-initrd +cat /tmp/di-sums | egrep 'netboot/debian-installer/arm64/initrd.gz' +echo "--- kernel ---" +[[ -f ${dl_root}/${host}/tdd-kernel.old ]] && md5sum ${dl_root}/${host}/tdd-kernel.old +md5sum ${dl_root}/${host}/tdd-kernel +cat /tmp/di-sums | egrep 'netboot/debian-installer/arm64/linux' +echo "---------" + +EOF +} + +#=============================================================================== +# program start +#=============================================================================== + +script_name="${0##*/}" +SCRIPTS_TOP=${SCRIPTS_TOP:-"$( cd "${BASH_SOURCE%/*}" && pwd )"} + +trap "on_exit 'failed.'" EXIT +set -e + +source ${SCRIPTS_TOP}/lib/util.sh + +process_opts "${@}" + +config_file="${config_file:-${SCRIPTS_TOP}/upload.conf}" + +check_file ${config_file} " --config-file" "usage" +source ${config_file} + +if [[ ! ${tftp_server} ]]; then + echo "${script_name}: ERROR: No tftp_server entry: '${config_file}'" >&2 + usage + exit 1 +fi + +if [[ ! ${host} ]]; then + echo "${script_name}: ERROR: No host entry: '${config_file}'" >&2 + usage + exit 1 +fi + +types=" + f28 + f30 + daily? + rawhide? +" + +type=${type:-"f30"} + +case "${type}" in +f30) + + dl_url="https://download.fedoraproject.org/pub/fedora/linux/releases/${type}/Server/aarch64" + dl_initrd="${dl_url}/os/images/pxeboot/initrd.img" + dl_kernel="${dl_url}/os/images/pxeboot/vmlinuz" + ;; +daily) + release="daily" + files_url="https://d-i.debian.org/daily-images/arm64/${release}/netboot/debian-installer/arm64" + sums_url="https://d-i.debian.org/daily-images/arm64/${release}" + ;; +sid) + echo "${script_name}: ERROR: No sid support yet." >&2 + exit 1 + ;; +*) + echo "${script_name}: ERROR: Unknown type '${type}'" >&2 + usage + exit 1 + ;; +esac + +if [[ -n "${usage}" ]]; then + usage + exit 0 +fi + +download_fedora_files + +echo "${script_name}: ${host} files ready on ${tftp_server}." >&2 + +trap "on_exit 'success.'" EXIT +exit 0 + diff --git a/scripts/upload.conf-sample b/scripts/upload.conf-sample new file mode 100644 index 00000000..b18219ad --- /dev/null +++ b/scripts/upload.conf-sample @@ -0,0 +1,2 @@ +tftp_server=${tftp_server:="user@server"} +host=${host:="saber1"} diff --git a/scripts/useradd-jenkins.sh b/scripts/useradd-jenkins.sh new file mode 100755 index 00000000..1a0b42ed --- /dev/null +++ b/scripts/useradd-jenkins.sh @@ -0,0 +1,230 @@ +#!/usr/bin/env bash + +set -e + +script_name="${0##*/}" + +SCRIPTS_TOP=${SCRIPTS_TOP:-"$( cd "${BASH_SOURCE%/*}" && pwd )"} + +source ${SCRIPTS_TOP}/lib/util.sh + +usage() { + local old_xtrace + old_xtrace="$(shopt -po xtrace || :)" + set +o xtrace + + if [[ -z ${password} ]]; then + local p + else + local p='*******' + fi + + echo "${script_name} - Adds a TDD jenkins user to system." >&2 + echo "Usage: ${script_name} [flags]" >&2 + echo "Option flags:" >&2 + echo " -c --check - Only run checks then exit." >&2 + echo " -d --delete - Delete user '${user}' from system." >&2 + echo " -e --home - home. Default: '${home}'." >&2 + echo " -g --gid - GID. Default: '${gid}'." >&2 + echo " -h --help - Show this help and exit." >&2 + echo " -n --np-sudo - Setup NOPASSWD sudo. Default: '${np_sudo}'." >&2 + echo " -p --group - Group. Default: '${group}'." >&2 + echo " -r --user - User. Default: '${user}'." >&2 + echo " -s --sudo - Setup sudo. Default: '${sudo}'." >&2 + echo " -u --uid - UID. Default: '${uid}'." >&2 + echo " -w --password - Account password. Default: '${p}'." >&2 + echo "Environment:" >&2 + echo " JENKINS_USER - Default: '${JENKINS_USER}'" >&2 + eval "${old_xtrace}" +} + +short_opts="cde:g:hnp:r:su:w:" +long_opts="check,delete,home:,gid:,help,np-sudo,group:,user:,sudo,uid:,password:" +opts=$(getopt --options ${short_opts} --long ${long_opts} -n "${script_name}" -- "$@") + +if [ $? != 0 ]; then + echo "${script_name}: ERROR: Internal getopt" >&2 + exit 1 +fi + +eval set -- "${opts}" + +while true ; do + case "${1}" in + -c | --check) + check=1 + shift + ;; + -d | --delete) + delete=1 + shift + ;; + -e | --home) + home="${2}" + shift 2 + ;; + -g | --gid) + gid="${2}" + shift 2 + ;; + -h | --help) + usage=1 + shift + ;; + -n | --np-sudo) + np_sudo=1 + shift + ;; + -p | --group) + group="${2}" + shift 2 + ;; + -r | --user) + user="${2}" + shift 2 + ;; + -s | --sudo) + sudo=1 + shift + ;; + -u | --uid) + uid="${2}" + shift 2 + ;; + -w | --password) + password="${2}" + shift 2 + ;; + --) + shift + break + ;; + *) + echo "${script_name}: ERROR: Internal opts: '${@}'" >&2 + exit 1 + ;; + esac +done + +user=${user:-"${JENKINS_USER}"} +user=${user:-'tdd-jenkins'} +uid=${uid:-"5522"} + +home=${home:-"/home/${user}"} +group=${group:-"${user}"} +gid=${gid:-"${uid}"} + +if [[ -n "${usage}" ]]; then + usage + exit 0 +fi + +run_checks() { + local result + local check_msg + + if [[ ${check} ]]; then + check_msg='INFO' + else + check_msg='ERROR' + fi + + if getent passwd ${user} &> /dev/null; then + echo "${script_name}: ${check_msg}: user '${user}' exists." >&2 + echo "${script_name}: ${check_msg}: => $(id ${user})" >&2 + result=1 + else + echo "${script_name}: INFO: user '${user}' does not exist." >&2 + fi + + if getent group ${uid} &> /dev/null; then + echo "${script_name}: ${check_msg}: uid ${uid} exists." >&2 + result=1 + else + echo "${script_name}: INFO: uid ${uid} does not exist." >&2 + fi + + if [[ -d ${home} ]]; then + echo "${script_name}: ${check_msg}: home '${home}' exists." >&2 + result=1 + else + echo "${script_name}: INFO: home '${home}' does not exist." >&2 + fi + + if getent group ${group} &> /dev/null; then + echo "${script_name}: ${check_msg}: group '${group}' exists." >&2 + result=1 + else + echo "${script_name}: INFO: group '${group}' does not exist." >&2 + fi + + if getent group ${gid} &> /dev/null; then + echo "${script_name}: ${check_msg}: gid ${gid} exists." >&2 + result=1 + else + echo "${script_name}: INFO: gid ${gid} does not exist." >&2 + fi + + if [[ -f /etc/sudoers.d/${user} ]]; then + echo "${script_name}: ${check_msg}: sudoers '/etc/sudoers.d/${user}' exists." >&2 + result=1 + else + echo "${script_name}: INFO: sudoers '/etc/sudoers.d/${user}' does not exist." >&2 + fi + + if [[ ${result} ]]; then + return 1 + fi + + echo "${script_name}: INFO: Checks OK." >&2 + + return 0 +} + +if [[ ${delete} ]]; then + set -x + + userdel ${user} + rm -rf ${home} + rm -f /etc/sudoers.d/${user} + + exit 0 +fi + +result=$(run_checks) + +if [[ ${result} ]]; then + exit 1 +fi + +if [[ ${check} ]]; then + exit 0 +fi + +set -x + +groupadd --gid=${gid} ${group} +useradd --create-home --home-dir=${home} \ + --uid=${uid} --gid=${gid} --groups='docker' \ + --shell=/bin/bash ${user} + +if [[ ${sudo} || ${np_sudo} ]]; then + usermod --append --groups='sudo' ${user} +fi + +if [[ ${np_sudo} ]]; then + echo "%${user} ALL=(ALL) NOPASSWD:ALL" > /etc/sudoers.d/${user} +fi + +if [[ ${no_lecture} ]]; then # TODO + echo 'Defaults lecture = never' > /etc/sudoers.d/lecture +fi + +old_xtrace="$(shopt -po xtrace || :)" +set +o xtrace +if [[ -n ${password} ]]; then + echo "${user}:${password}" | chpasswd +fi +eval "${old_xtrace}" + +echo "${script_name}: INFO: Done OK." >&2 diff --git a/vars b/vars new file mode 120000 index 00000000..733c8f5f --- /dev/null +++ b/vars @@ -0,0 +1 @@ +jenkins/vars \ No newline at end of file