forked from johnsmith0031/alpaca_lora_4bit
-
Notifications
You must be signed in to change notification settings - Fork 0
/
install.sh
31 lines (23 loc) · 1.3 KB
/
install.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
#!/bin/bash
# This is an install script for Alpaca_LoRA_4bit
# makedir ./repository/ if not exists
if [ ! -d "./repository" ]; then
mkdir ./repository
fi
# Clone repos into current repository into ./repository/
git clone https://github.com/qwopqwop200/GPTQ-for-LLaMa.git ./repository/GPTQ-for-LLaMa
git clone https://github.com/huggingface/peft.git ./repository/peft
git clone https://github.com/huggingface/transformers.git ./repository/transformers
# Replace ./repository/peft/src/peft/tuners/lora.py with ./peft/tuners/lora.py
cp ./peft/tuners/lora.py ./repository/peft/src/peft/tuners/lora.py
# Replace ./repository/GPTQ-for-LLaMa/quant_cuda.cpp and quant_cuda_kernel.cu with ./GPTQ-for-LLaMa/quant_cuda.cpp and quant_cuda_kernel.cu
cp ./GPTQ-for-LLaMa/quant_cuda.cpp ./repository/GPTQ-for-LLaMa/quant_cuda.cpp
cp ./GPTQ-for-LLaMa/quant_cuda_kernel.cu ./repository/GPTQ-for-LLaMa/quant_cuda_kernel.cu
# Copy files into ./repository/GPTQ-for-LLaMa/
cp ./GPTQ-for-LLaMa/autograd_4bit.py ./repository/GPTQ-for-LLaMa/autograd_4bit.py
cp ./GPTQ-for-LLaMa/gradient_checkpointing.py ./repository/GPTQ-for-LLaMa/gradient_checkpointing.py
# Install quant_cuda and cd into ./repository/GPTQ-for-LLaMa
cd ./repository/GPTQ-for-LLaMa
python setup_cuda.py install
echo "Install finished"
read -p "Press [Enter] to continue..."