4 |
4 |
EAPI=8
|
5 |
5 |
|
6 |
6 |
PYTHON_COMPAT=( python3_{9..11} )
|
7 |
|
inherit python-single-r1 cmake cuda flag-o-matic
|
|
7 |
inherit python-single-r1 cmake cuda flag-o-matic prefix
|
8 |
8 |
|
9 |
9 |
MYPN=pytorch
|
10 |
10 |
MYP=${MYPN}-${PV}
|
... | ... | |
24 |
24 |
ffmpeg? ( opencv )
|
25 |
25 |
mpi? ( distributed )
|
26 |
26 |
tensorpipe? ( distributed )
|
|
27 |
distributed? ( tensorpipe )
|
27 |
28 |
gloo? ( distributed )
|
28 |
29 |
" # ?? ( cuda rocm )
|
29 |
30 |
|
... | ... | |
48 |
49 |
fbgemm? ( dev-libs/FBGEMM )
|
49 |
50 |
ffmpeg? ( media-video/ffmpeg:= )
|
50 |
51 |
gloo? ( sci-libs/gloo[cuda?] )
|
51 |
|
mpi? ( sys-cluster/openmpi )
|
|
52 |
mpi? ( virtual/mpi )
|
52 |
53 |
nnpack? ( sci-libs/NNPACK )
|
53 |
54 |
numpy? ( $(python_gen_cond_dep '
|
54 |
55 |
dev-python/numpy[${PYTHON_USEDEP}]
|
... | ... | |
56 |
57 |
opencl? ( virtual/opencl )
|
57 |
58 |
opencv? ( media-libs/opencv:= )
|
58 |
59 |
qnnpack? ( sci-libs/QNNPACK )
|
59 |
|
tensorpipe? ( sci-libs/tensorpipe )
|
|
60 |
tensorpipe? ( sci-libs/tensorpipe[cuda?] )
|
60 |
61 |
xnnpack? ( >=sci-libs/XNNPACK-2022.12.22 )
|
61 |
62 |
"
|
62 |
63 |
DEPEND="
|
... | ... | |
78 |
79 |
S="${WORKDIR}"/${MYP}
|
79 |
80 |
|
80 |
81 |
PATCHES=(
|
81 |
|
"${FILESDIR}"/${P}-gentoo.patch
|
|
82 |
"${FILESDIR}"/${PN}-2.0.0-gentoo.patch
|
82 |
83 |
"${FILESDIR}"/${PN}-1.13.0-install-dirs.patch
|
83 |
84 |
"${FILESDIR}"/${PN}-1.12.0-glog-0.6.0.patch
|
84 |
85 |
"${FILESDIR}"/${PN}-1.13.1-tensorpipe.patch
|
85 |
|
"${FILESDIR}"/${P}-gcc13.patch
|
86 |
|
"${FILESDIR}"/${P}-cudnn_include_fix.patch
|
|
86 |
"${FILESDIR}"/${PN}-2.0.0-gcc13.patch
|
|
87 |
"${FILESDIR}"/${PN}-2.0.0-cudnn_include_fix.patch
|
87 |
88 |
)
|
88 |
89 |
|
89 |
90 |
src_prepare() {
|
... | ... | |
96 |
97 |
pushd torch/csrc/jit/serialization || die
|
97 |
98 |
flatc --cpp --gen-mutable --scoped-enums mobile_bytecode.fbs || die
|
98 |
99 |
popd
|
|
100 |
# prefixify the hardcoded paths, after all patches are applied
|
|
101 |
hprefixify \
|
|
102 |
aten/CMakeLists.txt \
|
|
103 |
caffe2/CMakeLists.txt \
|
|
104 |
cmake/Metal.cmake \
|
|
105 |
cmake/Modules/*.cmake \
|
|
106 |
cmake/Modules_CUDA_fix/FindCUDNN.cmake \
|
|
107 |
cmake/Modules_CUDA_fix/upstream/FindCUDA/make2cmake.cmake \
|
|
108 |
cmake/Modules_CUDA_fix/upstream/FindPackageHandleStandardArgs.cmake \
|
|
109 |
cmake/public/LoadHIP.cmake \
|
|
110 |
cmake/public/cuda.cmake \
|
|
111 |
cmake/Dependencies.cmake \
|
|
112 |
torch/CMakeLists.txt \
|
|
113 |
CMakeLists.txt
|
99 |
114 |
}
|
100 |
115 |
|
101 |
116 |
src_configure() {
|
... | ... | |
105 |
120 |
ewarn ""
|
106 |
121 |
ewarn "To configure caffe2 with the CUDA compute capability that is optimal for your GPU,"
|
107 |
122 |
ewarn "set TORCH_CUDA_ARCH_LIST in your make.conf, and re-emerge caffe2."
|
108 |
|
ewarn "For example, to use CUDA capability 7.5 & 3.5, add: TORCH_CUDA_ARCH_LIST=7.5,3.5"
|
|
123 |
ewarn "For example, to use CUDA capability 7.5 & 3.5, add: TORCH_CUDA_ARCH_LIST=7.5 3.5"
|
109 |
124 |
ewarn "For a Maxwell model GPU, an example value would be: TORCH_CUDA_ARCH_LIST=Maxwell"
|
110 |
125 |
ewarn ""
|
111 |
126 |
ewarn "You can look up your GPU's CUDA compute capability at https://developer.nvidia.com/cuda-gpus"
|