mirror of
https://github.com/XuehaiPan/nvitop.git
synced 2026-05-15 14:15:55 -06:00
deps(cachetools): remove third-party dependency cachetools (#147)
This commit is contained in:
parent
0bcb5e0260
commit
652859c84b
18 changed files with 352 additions and 22 deletions
|
|
@ -25,7 +25,7 @@ repos:
|
|||
- id: debug-statements
|
||||
- id: double-quote-string-fixer
|
||||
- repo: https://github.com/astral-sh/ruff-pre-commit
|
||||
rev: v0.9.2
|
||||
rev: v0.9.3
|
||||
hooks:
|
||||
- id: ruff
|
||||
args: [--fix, --exit-non-zero-on-fix]
|
||||
|
|
@ -53,7 +53,7 @@ repos:
|
|||
^docs/source/conf.py$
|
||||
)
|
||||
- repo: https://github.com/codespell-project/codespell
|
||||
rev: v2.3.0
|
||||
rev: v2.4.0
|
||||
hooks:
|
||||
- id: codespell
|
||||
additional_dependencies: [".[toml]"]
|
||||
|
|
|
|||
|
|
@ -25,7 +25,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
|
|||
|
||||
### Removed
|
||||
|
||||
-
|
||||
- Remove third-party dependency `cachetools` by [@XuehaiPan](https://github.com/XuehaiPan) in [#147](https://github.com/XuehaiPan/nvitop/pull/147).
|
||||
|
||||
------
|
||||
|
||||
|
|
|
|||
|
|
@ -115,7 +115,6 @@ An interactive NVIDIA-GPU process viewer and beyond, the one-stop solution for G
|
|||
- NVIDIA Management Library (NVML)
|
||||
- nvidia-ml-py
|
||||
- psutil
|
||||
- cachetools
|
||||
- termcolor
|
||||
- curses<sup>[*](#curses)</sup> (with `libncursesw`)
|
||||
|
||||
|
|
|
|||
10
docs/source/api/caching.rst
Normal file
10
docs/source/api/caching.rst
Normal file
|
|
@ -0,0 +1,10 @@
|
|||
nvitop.caching module
|
||||
---------------------
|
||||
|
||||
.. currentmodule:: nvitop
|
||||
|
||||
.. autosummary::
|
||||
|
||||
ttl_cache
|
||||
|
||||
.. autofunction:: nvitop.ttl_cache
|
||||
|
|
@ -179,6 +179,7 @@ Please refer to section `More than a Monitor <https://github.com/XuehaiPan/nvito
|
|||
api/libnvml
|
||||
api/libcuda
|
||||
api/libcudart
|
||||
api/caching
|
||||
api/utils
|
||||
select
|
||||
callbacks
|
||||
|
|
|
|||
|
|
@ -155,3 +155,6 @@ api
|
|||
utils
|
||||
GpuStatsLogger
|
||||
hostname
|
||||
len
|
||||
maxsize
|
||||
reentrant
|
||||
|
|
|
|||
|
|
@ -20,7 +20,17 @@ import sys
|
|||
|
||||
from nvitop import api
|
||||
from nvitop.api import * # noqa: F403
|
||||
from nvitop.api import collector, device, host, libcuda, libcudart, libnvml, process, utils
|
||||
from nvitop.api import (
|
||||
caching,
|
||||
collector,
|
||||
device,
|
||||
host,
|
||||
libcuda,
|
||||
libcudart,
|
||||
libnvml,
|
||||
process,
|
||||
utils,
|
||||
)
|
||||
from nvitop.select import select_devices
|
||||
from nvitop.version import __version__
|
||||
|
||||
|
|
@ -28,7 +38,7 @@ from nvitop.version import __version__
|
|||
__all__ = [*api.__all__, 'select_devices']
|
||||
|
||||
# Add submodules to the top-level namespace
|
||||
for submodule in (collector, device, host, libcuda, libcudart, libnvml, process, utils):
|
||||
for submodule in (caching, collector, device, host, libcuda, libcudart, libnvml, process, utils):
|
||||
sys.modules[f'{__name__}.{submodule.__name__.rpartition(".")[-1]}'] = submodule
|
||||
|
||||
# Remove the nvitop.select module from sys.modules
|
||||
|
|
|
|||
|
|
@ -16,7 +16,18 @@
|
|||
# ==============================================================================
|
||||
"""The core APIs of nvitop."""
|
||||
|
||||
from nvitop.api import collector, device, host, libcuda, libcudart, libnvml, process, utils
|
||||
from nvitop.api import (
|
||||
caching,
|
||||
collector,
|
||||
device,
|
||||
host,
|
||||
libcuda,
|
||||
libcudart,
|
||||
libnvml,
|
||||
process,
|
||||
utils,
|
||||
)
|
||||
from nvitop.api.caching import ttl_cache
|
||||
from nvitop.api.collector import ResourceMetricCollector, collect_in_background, take_snapshots
|
||||
from nvitop.api.device import (
|
||||
CudaDevice,
|
||||
|
|
@ -76,6 +87,8 @@ __all__ = [
|
|||
'take_snapshots',
|
||||
'collect_in_background',
|
||||
'ResourceMetricCollector',
|
||||
# nvitop.api.caching
|
||||
'ttl_cache',
|
||||
# nvitop.api.utils
|
||||
'NA',
|
||||
'NaType',
|
||||
|
|
|
|||
279
nvitop/api/caching.py
Normal file
279
nvitop/api/caching.py
Normal file
|
|
@ -0,0 +1,279 @@
|
|||
# This file is part of nvitop, the interactive NVIDIA-GPU process viewer.
|
||||
#
|
||||
# Copyright 2021-2025 Xuehai Pan. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ==============================================================================
|
||||
"""Caching utilities."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import builtins
|
||||
import functools
|
||||
import time
|
||||
from threading import RLock
|
||||
from typing import TYPE_CHECKING, Any, NamedTuple, overload
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from collections.abc import Callable, Hashable, Sized
|
||||
from collections.abc import Set as AbstractSet
|
||||
from typing import TypeVar
|
||||
from typing_extensions import ParamSpec, Self
|
||||
|
||||
_P = ParamSpec('_P')
|
||||
_T = TypeVar('_T')
|
||||
|
||||
|
||||
__all__ = ['ttl_cache']
|
||||
|
||||
|
||||
class _CacheInfo(NamedTuple):
|
||||
"""A named tuple representing the cache statistics."""
|
||||
|
||||
hits: int
|
||||
misses: int
|
||||
maxsize: int
|
||||
currsize: int
|
||||
|
||||
|
||||
try:
|
||||
from functools import _make_key
|
||||
except ImportError:
|
||||
|
||||
class _HashedSeq(list):
|
||||
"""This class guarantees that hash() will be called no more than once per element."""
|
||||
|
||||
__slots__ = ('__hashvalue',)
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
seq: tuple[Any, ...],
|
||||
hash: Callable[[Any], int] = builtins.hash, # pylint: disable=redefined-builtin
|
||||
) -> None:
|
||||
"""Initialize the hashed sequence."""
|
||||
self[:] = seq
|
||||
self.__hashvalue = hash(seq)
|
||||
|
||||
def __hash__(self) -> int: # type: ignore[override]
|
||||
"""Return the hash value of the hashed sequence."""
|
||||
return self.__hashvalue
|
||||
|
||||
_KWD_MARK = object()
|
||||
|
||||
# pylint: disable-next=too-many-arguments
|
||||
def _make_key( # type: ignore[misc]
|
||||
args: tuple[Hashable, ...],
|
||||
kwds: dict[str, Hashable],
|
||||
typed: bool,
|
||||
*,
|
||||
kwd_mark: tuple[object, ...] = (_KWD_MARK,),
|
||||
fasttypes: AbstractSet[type] = frozenset({int, str}),
|
||||
tuple: type[tuple] = builtins.tuple, # pylint: disable=redefined-builtin
|
||||
type: type[type] = builtins.type, # pylint: disable=redefined-builtin
|
||||
len: Callable[[Sized], int] = builtins.len, # pylint: disable=redefined-builtin
|
||||
) -> Hashable:
|
||||
"""Make a cache key from optionally typed positional and keyword arguments."""
|
||||
key = args
|
||||
if kwds:
|
||||
key += kwd_mark
|
||||
for item in kwds.items():
|
||||
key += item
|
||||
if typed:
|
||||
key += tuple(type(v) for v in args)
|
||||
if kwds:
|
||||
key += tuple(type(v) for v in kwds.values())
|
||||
elif len(key) == 1 and type(key[0]) in fasttypes:
|
||||
return key[0]
|
||||
return _HashedSeq(key)
|
||||
|
||||
|
||||
class _TTLCacheLink: # pylint: disable=too-few-public-methods
|
||||
__slots__ = ('expires', 'key', 'next', 'prev', 'value')
|
||||
|
||||
# pylint: disable-next=too-many-arguments,too-many-positional-arguments
|
||||
def __init__(
|
||||
self,
|
||||
prev: Self | None,
|
||||
next: Self | None, # pylint: disable=redefined-builtin
|
||||
key: Hashable,
|
||||
value: Any,
|
||||
expires: float | None,
|
||||
) -> None:
|
||||
self.prev: Self = prev # type: ignore[assignment]
|
||||
self.next: Self = next # type: ignore[assignment]
|
||||
self.key: Hashable = key
|
||||
self.value: Any = value
|
||||
self.expires: float = expires # type: ignore[assignment]
|
||||
|
||||
|
||||
@overload
|
||||
def ttl_cache(
|
||||
maxsize: int | None = 128,
|
||||
ttl: float = 600.0,
|
||||
timer: Callable[[], float] = time.monotonic,
|
||||
typed: bool = False,
|
||||
) -> Callable[[Callable[_P, _T]], Callable[_P, _T]]: ...
|
||||
|
||||
|
||||
@overload
|
||||
def ttl_cache(
|
||||
maxsize: Callable[_P, _T],
|
||||
ttl: float = 600.0,
|
||||
timer: Callable[[], float] = time.monotonic,
|
||||
typed: bool = False,
|
||||
) -> Callable[_P, _T]: ...
|
||||
|
||||
|
||||
# pylint: disable-next=too-many-statements
|
||||
def ttl_cache(
|
||||
maxsize: int | Callable[_P, _T] | None = 128,
|
||||
ttl: float = 600.0,
|
||||
timer: Callable[[], float] = time.monotonic,
|
||||
typed: bool = False,
|
||||
) -> Callable[[Callable[_P, _T]], Callable[_P, _T]] | Callable[_P, _T]:
|
||||
"""Time aware cache decorator."""
|
||||
if isinstance(maxsize, int):
|
||||
# Negative maxsize is treated as 0
|
||||
maxsize = max(0, maxsize)
|
||||
elif callable(maxsize) and isinstance(typed, bool):
|
||||
# The user_function was passed in directly via the maxsize argument
|
||||
func, maxsize = maxsize, 128
|
||||
return ttl_cache(maxsize, ttl=ttl, timer=timer, typed=typed)(func)
|
||||
elif maxsize is not None:
|
||||
raise TypeError('Expected first argument to be an integer, a callable, or None')
|
||||
|
||||
if ttl < 0.0:
|
||||
raise ValueError('TTL must be a non-negative number')
|
||||
if not callable(timer):
|
||||
raise TypeError('Timer must be a callable')
|
||||
|
||||
if maxsize == 0 or maxsize is None:
|
||||
return functools.lru_cache(maxsize=maxsize, typed=typed) # type: ignore[return-value]
|
||||
|
||||
# pylint: disable-next=too-many-statements,too-many-locals
|
||||
def wrapper(func: Callable[_P, _T]) -> Callable[_P, _T]:
|
||||
cache: dict[Any, _TTLCacheLink] = {}
|
||||
cache_get = cache.get # bound method to lookup a key or return None
|
||||
cache_len = cache.__len__ # get cache size without calling len()
|
||||
lock = RLock() # because linked-list updates aren't thread-safe
|
||||
root = _TTLCacheLink(*((None,) * 5)) # root of the circular doubly linked list
|
||||
root.prev = root.next = root # initialize by pointing to self
|
||||
hits = misses = 0
|
||||
full = False
|
||||
|
||||
def unlink(link: _TTLCacheLink) -> _TTLCacheLink:
|
||||
with lock:
|
||||
link_prev, link_next = link.prev, link.next
|
||||
link_next.prev, link_prev.next = link_prev, link_next
|
||||
return link_next
|
||||
|
||||
def append(link: _TTLCacheLink) -> _TTLCacheLink:
|
||||
with lock:
|
||||
last = root.prev
|
||||
last.next = root.prev = link
|
||||
link.prev, link.next = last, root
|
||||
return link
|
||||
|
||||
def move_to_end(link: _TTLCacheLink) -> _TTLCacheLink:
|
||||
with lock:
|
||||
unlink(link)
|
||||
append(link)
|
||||
return link
|
||||
|
||||
def expire() -> None:
|
||||
nonlocal full
|
||||
|
||||
with lock:
|
||||
now = timer()
|
||||
front = root.next
|
||||
while front is not root and front.expires < now:
|
||||
del cache[front.key]
|
||||
front = unlink(front)
|
||||
full = cache_len() >= maxsize
|
||||
|
||||
@functools.wraps(func)
|
||||
def wrapped(*args: _P.args, **kwargs: _P.kwargs) -> _T:
|
||||
# Size limited time aware caching
|
||||
nonlocal root, hits, misses, full
|
||||
|
||||
key = _make_key(args, kwargs, typed)
|
||||
with lock:
|
||||
link = cache_get(key)
|
||||
if link is not None:
|
||||
if timer() < link.expires:
|
||||
hits += 1
|
||||
return link.value
|
||||
expire()
|
||||
|
||||
misses += 1
|
||||
result = func(*args, **kwargs)
|
||||
expires = timer() + ttl
|
||||
with lock:
|
||||
if key in cache:
|
||||
# Getting here means that this same key was added to the cache while the lock
|
||||
# was released or the key was expired. Move the link to the front of the
|
||||
# circular queue.
|
||||
link = move_to_end(cache[key])
|
||||
# We need only update the expiration time.
|
||||
link.value = result
|
||||
link.expires = expires
|
||||
else:
|
||||
if full:
|
||||
expire()
|
||||
if full:
|
||||
# Use the old root to store the new key and result.
|
||||
root.key = key
|
||||
root.value = result
|
||||
root.expires = expires
|
||||
# Empty the oldest link and make it the new root.
|
||||
# Keep a reference to the old key and old result to prevent their ref counts
|
||||
# from going to zero during the update. That will prevent potentially
|
||||
# arbitrary object clean-up code (i.e. __del__) from running while we're
|
||||
# still adjusting the links.
|
||||
front = root.next
|
||||
old_key = front.key
|
||||
front.key = front.value = front.expires = None # type: ignore[assignment]
|
||||
# Now update the cache dictionary.
|
||||
del cache[old_key]
|
||||
# Save the potentially reentrant cache[key] assignment for last, after the
|
||||
# root and links have been put in a consistent state.
|
||||
cache[key], root = root, front
|
||||
else:
|
||||
# Put result in a new link at the front of the queue.
|
||||
cache[key] = append(_TTLCacheLink(None, None, key, result, expires))
|
||||
full = cache_len() >= maxsize
|
||||
return result
|
||||
|
||||
def cache_info() -> _CacheInfo:
|
||||
"""Report cache statistics."""
|
||||
with lock:
|
||||
expire()
|
||||
return _CacheInfo(hits, misses, maxsize, cache_len())
|
||||
|
||||
def cache_clear() -> None:
|
||||
"""Clear the cache and cache statistics."""
|
||||
nonlocal hits, misses, full
|
||||
with lock:
|
||||
cache.clear()
|
||||
root.prev = root.next = root
|
||||
root.key = root.value = root.expires = None # type: ignore[assignment]
|
||||
hits = misses = 0
|
||||
full = False
|
||||
|
||||
wrapped.cache_info = cache_info # type: ignore[attr-defined]
|
||||
wrapped.cache_clear = cache_clear # type: ignore[attr-defined]
|
||||
wrapped.cache_parameters = lambda: {'maxsize': maxsize, 'typed': typed} # type: ignore[attr-defined]
|
||||
return wrapped
|
||||
|
||||
return wrapper
|
||||
|
|
@ -39,5 +39,6 @@ from nvitop.gui.library.utils import (
|
|||
cut_string,
|
||||
make_bar,
|
||||
set_color,
|
||||
ttl_cache,
|
||||
)
|
||||
from nvitop.gui.library.widestring import WideString, wcslen
|
||||
|
|
|
|||
|
|
@ -3,9 +3,7 @@
|
|||
|
||||
# pylint: disable=missing-module-docstring,missing-class-docstring,missing-function-docstring
|
||||
|
||||
from cachetools.func import ttl_cache
|
||||
|
||||
from nvitop.api import NA, libnvml, utilization2string
|
||||
from nvitop.api import NA, libnvml, ttl_cache, utilization2string
|
||||
from nvitop.api import MigDevice as MigDeviceBase
|
||||
from nvitop.api import PhysicalDevice as DeviceBase
|
||||
from nvitop.gui.library.process import GpuProcess
|
||||
|
|
|
|||
|
|
@ -3,7 +3,6 @@
|
|||
|
||||
# pylint: disable=missing-module-docstring,missing-class-docstring,missing-function-docstring
|
||||
|
||||
|
||||
from nvitop.api import (
|
||||
NA,
|
||||
GiB,
|
||||
|
|
|
|||
|
|
@ -7,10 +7,25 @@ import contextlib
|
|||
import math
|
||||
import os
|
||||
|
||||
from nvitop.api import NA, colored, host, set_color # noqa: F401 # pylint: disable=unused-import
|
||||
from nvitop.api import NA, colored, host, set_color, ttl_cache
|
||||
from nvitop.gui.library.widestring import WideString
|
||||
|
||||
|
||||
__all__ = [
|
||||
'NA',
|
||||
'USERNAME',
|
||||
'HOSTNAME',
|
||||
'SUPERUSER',
|
||||
'USERCONTEXT',
|
||||
'LARGE_INTEGER',
|
||||
'ttl_cache',
|
||||
'colored',
|
||||
'set_color',
|
||||
'cut_string',
|
||||
'make_bar',
|
||||
]
|
||||
|
||||
|
||||
USERNAME = 'N/A'
|
||||
with contextlib.suppress(ImportError, OSError):
|
||||
USERNAME = host.getuser()
|
||||
|
|
|
|||
|
|
@ -6,9 +6,16 @@
|
|||
import threading
|
||||
import time
|
||||
|
||||
from cachetools.func import ttl_cache
|
||||
|
||||
from nvitop.gui.library import NA, Device, Displayable, colored, cut_string, host, make_bar
|
||||
from nvitop.gui.library import (
|
||||
NA,
|
||||
Device,
|
||||
Displayable,
|
||||
colored,
|
||||
cut_string,
|
||||
host,
|
||||
make_bar,
|
||||
ttl_cache,
|
||||
)
|
||||
from nvitop.version import __version__
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -11,8 +11,6 @@ import time
|
|||
from operator import attrgetter, xor
|
||||
from typing import TYPE_CHECKING, Any, NamedTuple
|
||||
|
||||
from cachetools.func import ttl_cache
|
||||
|
||||
from nvitop.gui.library import (
|
||||
HOSTNAME,
|
||||
LARGE_INTEGER,
|
||||
|
|
@ -27,6 +25,7 @@ from nvitop.gui.library import (
|
|||
colored,
|
||||
cut_string,
|
||||
host,
|
||||
ttl_cache,
|
||||
wcslen,
|
||||
)
|
||||
|
||||
|
|
|
|||
|
|
@ -9,8 +9,6 @@ from collections import deque
|
|||
from functools import partial
|
||||
from itertools import islice
|
||||
|
||||
from cachetools.func import ttl_cache
|
||||
|
||||
from nvitop.gui.library import (
|
||||
NA,
|
||||
SUPERUSER,
|
||||
|
|
@ -22,6 +20,7 @@ from nvitop.gui.library import (
|
|||
WideString,
|
||||
host,
|
||||
send_signal,
|
||||
ttl_cache,
|
||||
)
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -49,7 +49,6 @@ dependencies = [
|
|||
# Sync with nvitop/version.py and requirements.txt
|
||||
"nvidia-ml-py >= 11.450.51, < 12.561.0a0",
|
||||
"psutil >= 5.6.6",
|
||||
"cachetools >= 1.0.1",
|
||||
"termcolor >= 1.0.0",
|
||||
"colorama >= 0.4.0; platform_system == 'Windows'",
|
||||
"windows-curses >= 2.2.0; platform_system == 'Windows'",
|
||||
|
|
@ -204,7 +203,6 @@ ignore = [
|
|||
|
||||
[tool.ruff.lint.isort]
|
||||
known-first-party = ["nvitop", "nvitop_exporter"]
|
||||
known-local-folder = ["nvitop", "nvitop-exporter"]
|
||||
extra-standard-library = ["typing_extensions"]
|
||||
lines-after-imports = 2
|
||||
|
||||
|
|
|
|||
|
|
@ -1,7 +1,6 @@
|
|||
# Sync with pyproject.toml and nvitop/version.py
|
||||
nvidia-ml-py >= 11.450.51, < 12.561.0a0
|
||||
psutil >= 5.6.6
|
||||
cachetools >= 1.0.1
|
||||
termcolor >= 1.0.0
|
||||
colorama >= 0.4.0; platform_system == 'Windows'
|
||||
windows-curses >= 2.2.0; platform_system == 'Windows'
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue