chore: update to torch 2.4 (#2259)

* chore: update to torch 2.4

* remove un-necessary patch

* fix
This commit is contained in:
OlivierDehaene 2024-07-23 20:39:43 +00:00 committed by yuanwu
parent b1077b077c
commit 34c472bd64
3 changed files with 3 additions and 6 deletions

View File

@ -30,7 +30,6 @@ install: install-cuda
install-cuda: install-server install-flash-attention-v2-cuda install-vllm-cuda install-flash-attention install-fbgemm
pip install -e ".[bnb]"
pip install nvidia-nccl-cu12==2.22.3
install-rocm: install-server install-flash-attention-v2-rocm install-vllm-rocm

View File

@ -1,10 +1,8 @@
fbgemm_commit := 9cf0429b726931cfab72b8264730bea682f32fca
fbgemm_commit := ddac8dd9fc0bee70a3f456df68b8aac38576c856
build-fbgemm:
chmod +x fix_torch90a.sh && ./fix_torch90a.sh && \
git clone https://github.com/pytorch/FBGEMM.git fbgemm && \
cp fbgemm_remove_unused.patch fbgemm && \
cd fbgemm && git fetch && git checkout $(fbgemm_commit) && git apply fbgemm_remove_unused.patch && \
cd fbgemm && git fetch && git checkout $(fbgemm_commit) && \
git submodule update --init --recursive && \
cd fbgemm_gpu && \
pip install -r requirements.txt && \

View File

@ -31,7 +31,7 @@ einops = "^0.6.1"
texttable = { version = "^1.6.7", optional = true }
datasets = { version = "^2.14.0", optional = true }
peft = { version = "^0.10", optional = true }
torch = { version = "^2.3.0", optional = true }
torch = { version = "^2.4.0", optional = true }
scipy = "^1.11.1"
pillow = "^10.0.0"
outlines= { version = "^0.0.34", optional = true }