- Clone repo
$ mkdir -p $GOPATH/src/sigs.k8s.io
$ git clone https://github.com/kubernetes-sigs/azurelustre-csi-driver $GOPATH/src/sigs.k8s.io/azurelustre-csi-driver
- Build azurelustre Storage CSI driver
$ cd $GOPATH/src/sigs.k8s.io/azurelustre-csi-driver
$ make azurelustre
- Run verification before sending PR
$ make verify
- Build container image and push to dockerhub
$ export REGISTRY_NAME=<dockerhub-alias>
$ make push-latest
- Install CSC
Install csc
tool according to https://github.com/rexray/gocsi/tree/master/csc:
$ mkdir -p $GOPATH/src/github.com
$ cd $GOPATH/src/github.com
$ git clone https://github.com/rexray/gocsi.git
$ cd rexray/gocsi/csc
$ make build
- Setup variables
$ readonly volname="testvolume-$(date +%s)"
$ readonly cap="MULTI_NODE_MULTI_WRITER,mount,,,"
$ readonly target_path="/tmp/lustre-pv"
$ readonly endpoint="tcp://127.0.0.1:10000"
$ readonly lustre_fs_name=""
$ readonly lustre_fs_ip=""
- Start CSI driver locally
$ cd $GOPATH/src/sigs.k8s.io/azurelustre-csi-driver
$ ./_output/azurelustreplugin --endpoint $endpoint --nodeid CSINode -v=5 &
Before running CSI driver, create "/etc/kubernetes/azure.json" file under testing server(it's better copy
azure.json
file from a k8s cluster with service principle configured correctly) and setAZURE_CREDENTIAL_FILE
as following:
$ export set AZURE_CREDENTIAL_FILE=/etc/kubernetes/azure.json
$ csc identity plugin-info --endpoint $endpoint
$ csc controller new --endpoint $endpoint --cap $cap --req-bytes 2147483648 --params "fs-name=$lustre_fs_name,mgs-ip-address=$lustre_fs_ip" $volname
$ mkdir /tmp/target-path
$ volumeid=$(csc node publish --endpoint $endpoint --cap $cap --target-path $target_path --vol-context "fs-name=$lustre_fs_name,mgs-ip-address=$lustre_fs_ip" $volname)
$ csc node unpublish --endpoint $endpoint --target-path $target_path $volname
$ csc controller del --endpoint $endpoint volumeid
$ csc controller validate-volume-capabilities --endpoint $endpoint --cap $cap volumeid
$ csc node get-info --endpoint $endpoint