Skip to content

Commit b760a1d

Browse files
authored
replaced numa with py-libnuma to work w python 3.11 (#900)
* replaced numa with py-libnuma to work w python 3.11 * update Dockerfile dependencies
1 parent f3b9ddd commit b760a1d

File tree

3 files changed

+10
-12
lines changed

3 files changed

+10
-12
lines changed

docker/Dockerfile.intel

+1-3
Original file line numberDiff line numberDiff line change
@@ -27,8 +27,6 @@ RUN --mount=type=cache,id=apt-dev,target=/var/cache/apt \
2727
libpng-dev \
2828
python3 \
2929
python3-pip \
30-
python3-dev \
31-
libnuma-dev \
3230
&& rm -rf /var/lib/apt/lists/*"
3331
RUN /usr/sbin/update-ccache-symlinks
3432
RUN mkdir /opt/ccache && ccache --set-config=cache_dir=/opt/ccache
@@ -46,7 +44,7 @@ RUN python3 -m pip install --no-cache-dir \
4644
-f https://download.pytorch.org/whl/torch_stable.html && \
4745
python3 -m pip install intel-extension-for-pytorch==$IPEX_VERSION && \
4846
python3 -m pip install oneccl_bind_pt --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/cpu/us/ && \
49-
python3 -m pip install --no-cache-dir numa
47+
python3 -m pip install --no-cache-dir py-libnuma
5048

5149
ARG KMP_BLOCKTIME=1
5250
ENV KMP_BLOCKTIME=${KMP_BLOCKTIME}

optimum/intel/utils/import_utils.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -144,7 +144,7 @@
144144

145145
if _numa_available:
146146
try:
147-
importlib_metadata.version("numa")
147+
importlib_metadata.version("py-libnuma")
148148
except importlib_metadata.PackageNotFoundError:
149149
_numa_available = False
150150

optimum/intel/utils/modeling_utils.py

+8-8
Original file line numberDiff line numberDiff line change
@@ -196,7 +196,7 @@ def bind_cores_for_best_perf():
196196

197197
if not is_numa_available():
198198
logger.error("'numa' module not found")
199-
raise ImportError("'numa' module not found, install with 'pip install numa'")
199+
raise ImportError("'numa' module not found, install with 'pip install py-libnuma'")
200200
import numa
201201

202202
local_size = get_int_from_env(
@@ -205,7 +205,7 @@ def bind_cores_for_best_perf():
205205
rank_id = get_int_from_env(
206206
["LOCAL_RANK", "MPI_LOCALRANKID", "OMPI_COMM_WORLD_LOCAL_RANK", "MV2_COMM_WORLD_LOCAL_RANK"], 0
207207
)
208-
nodes = numa.get_max_node() + 1
208+
nodes = numa.info.get_max_node() + 1
209209
rank_per_node = math.ceil(local_size / nodes)
210210
num_cpus_per_nodes = int(psutil.cpu_count(logical=False) / nodes)
211211
node_id = int(rank_id / rank_per_node)
@@ -216,17 +216,17 @@ def bind_cores_for_best_perf():
216216
else:
217217
num_cpus_per_rank = int(os.getenv("OMP_NUM_THREADS"))
218218
logger.info(f"OMP_NUM_THREADS already set to {num_cpus_per_rank}")
219-
if len(numa.get_membind()) == nodes:
219+
if len(numa.memory.get_membind_nodes()) == nodes:
220220
# if numa memory binding is not set, set it to the node where the rank is running
221-
numa.set_membind([node_id])
221+
numa.memory.set_membind_nodes((node_id))
222222

223223
torch.set_num_threads(num_cpus_per_rank)
224224

225-
if len(numa.get_affinity(0)) == psutil.cpu_count(logical=True):
225+
if len(numa.schedule.get_affinitive_cpus(0)) == psutil.cpu_count(logical=True):
226226
# if numa affinity is unset (default value is set to all logical cores) set it to the physical cores assigned to the rank
227227
cpu_start = num_cpus_per_rank * rank_offset_per_node
228-
numa.set_affinity(
228+
numa.schedule.run_on_cpus(
229229
0,
230-
list(numa.node_to_cpus(node_id))[cpu_start : cpu_start + num_cpus_per_rank],
230+
*(numa.info.node_to_cpus(node_id)[cpu_start : cpu_start + num_cpus_per_rank]),
231231
)
232-
logger.info(f"affinity={numa.get_affinity(0)}, membind = {numa.get_membind()}")
232+
logger.info(f"affinity={numa.schedule.get_affinitive_cpus(0)}, membind = {numa.memory.get_membind_nodes()}")

0 commit comments

Comments
 (0)