-
-
Notifications
You must be signed in to change notification settings - Fork 393
Expand file tree
/
Copy path_io_kqueue.py
More file actions
294 lines (252 loc) · 11.3 KB
/
_io_kqueue.py
File metadata and controls
294 lines (252 loc) · 11.3 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
from __future__ import annotations
import errno
import select
import sys
from contextlib import contextmanager
from typing import TYPE_CHECKING, Literal
import attrs
import outcome
from .. import _core
from ._run import _public
from ._wakeup_socketpair import WakeupSocketpair
if TYPE_CHECKING:
from collections.abc import Callable, Iterator
from typing_extensions import TypeAlias
from .._core import Abort, Task, UnboundedQueue
from .._file_io import _HasFileNo
assert not TYPE_CHECKING or (sys.platform != "linux" and sys.platform != "win32")
EventResult: TypeAlias = "list[select.kevent]"
@attrs.frozen(eq=False)
class _KqueueStatistics:
tasks_waiting: int
monitors: int
backend: Literal["kqueue"] = attrs.field(init=False, default="kqueue")
@attrs.define(eq=False)
class KqueueIOManager:
_kqueue: select.kqueue = attrs.Factory(select.kqueue)
# {(ident, filter): Task or UnboundedQueue}
_registered: dict[tuple[int, int], Task | UnboundedQueue[select.kevent]] = (
attrs.Factory(dict)
)
_force_wakeup: WakeupSocketpair = attrs.Factory(WakeupSocketpair)
_force_wakeup_fd: int | None = None
def __attrs_post_init__(self) -> None:
force_wakeup_event = select.kevent(
self._force_wakeup.wakeup_sock,
select.KQ_FILTER_READ,
select.KQ_EV_ADD,
)
self._kqueue.control([force_wakeup_event], 0)
self._force_wakeup_fd = self._force_wakeup.wakeup_sock.fileno()
def statistics(self) -> _KqueueStatistics:
tasks_waiting = 0
monitors = 0
for receiver in self._registered.values():
if type(receiver) is _core.Task:
tasks_waiting += 1
else:
monitors += 1
return _KqueueStatistics(tasks_waiting=tasks_waiting, monitors=monitors)
def close(self) -> None:
self._kqueue.close()
self._force_wakeup.close()
def force_wakeup(self) -> None:
self._force_wakeup.wakeup_thread_and_signal_safe()
def get_events(self, timeout: float) -> EventResult:
# max_events must be > 0 or kqueue gets cranky
# and we generally want this to be strictly larger than the actual
# number of events we get, so that we can tell that we've gotten
# all the events in just 1 call.
max_events = len(self._registered) + 1
events = []
while True:
batch = self._kqueue.control([], max_events, timeout)
events += batch
if len(batch) < max_events:
break
else: # TODO: test this line
timeout = 0
# and loop back to the start
return events
def process_events(self, events: EventResult) -> None:
for event in events:
key = (event.ident, event.filter)
if event.ident == self._force_wakeup_fd:
self._force_wakeup.drain()
continue
receiver = self._registered[key]
if event.flags & select.KQ_EV_ONESHOT: # TODO: test this branch
del self._registered[key]
if isinstance(receiver, _core.Task):
_core.reschedule(receiver, outcome.Value(event))
else:
receiver.put_nowait(event) # TODO: test this line
# kevent registration is complicated -- e.g. aio submission can
# implicitly perform a EV_ADD, and EVFILT_PROC with NOTE_TRACK will
# automatically register filters for child processes. So our lowlevel
# API is *very* low-level: we expose the kqueue itself for adding
# events or sticking into AIO submission structs, and split waiting
# off into separate methods. It's your responsibility to make sure
# that handle_io never receives an event without a corresponding
# registration! This may be challenging if you want to be careful
# about e.g. KeyboardInterrupt. Possibly this API could be improved to
# be more ergonomic...
@_public
def current_kqueue(self) -> select.kqueue:
"""TODO: these are implemented, but are currently more of a sketch than
anything real. See `#26
<https://github.com/python-trio/trio/issues/26>`__.
"""
return self._kqueue
@contextmanager
@_public
def monitor_kevent(
self,
ident: int,
filter: int,
) -> Iterator[_core.UnboundedQueue[select.kevent]]:
"""TODO: these are implemented, but are currently more of a sketch than
anything real. See `#26
<https://github.com/python-trio/trio/issues/26>`__.
"""
key = (ident, filter)
if key in self._registered:
raise _core.BusyResourceError(
"attempt to register multiple listeners for same ident/filter pair",
)
q = _core.UnboundedQueue[select.kevent]()
self._registered[key] = q
try:
yield q
finally:
del self._registered[key]
@_public
async def wait_kevent(
self,
ident: int,
filter: int,
abort_func: Callable[[BaseException], Abort],
) -> Abort:
"""TODO: these are implemented, but are currently more of a sketch than
anything real. See `#26
<https://github.com/python-trio/trio/issues/26>`__.
"""
key = (ident, filter)
if key in self._registered:
raise _core.BusyResourceError(
"attempt to register multiple listeners for same ident/filter pair",
)
self._registered[key] = _core.current_task()
def abort(cancel_exc: BaseException) -> Abort:
r = abort_func(cancel_exc)
if r is _core.Abort.SUCCEEDED: # TODO: test this branch
del self._registered[key]
return r
# wait_task_rescheduled does not have its return type typed
return await _core.wait_task_rescheduled(abort) # type: ignore[no-any-return]
async def _wait_common(
self,
fd: int | _HasFileNo,
filter: int,
) -> None:
if not isinstance(fd, int):
fd = fd.fileno()
flags = select.KQ_EV_ADD | select.KQ_EV_ONESHOT
event = select.kevent(fd, filter, flags)
self._kqueue.control([event], 0)
def abort(_: BaseException) -> Abort:
event = select.kevent(fd, filter, select.KQ_EV_DELETE)
try:
self._kqueue.control([event], 0)
except OSError as exc:
# kqueue tracks individual fds (*not* the underlying file
# object, see _io_epoll.py for a long discussion of why this
# distinction matters), and automatically deregisters an event
# if the fd is closed. So if kqueue.control says that it
# doesn't know about this event, then probably it's because
# the fd was closed behind our backs. (Too bad we can't ask it
# to wake us up when this happens, versus discovering it after
# the fact... oh well, you can't have everything.)
#
# FreeBSD reports this using EBADF. macOS uses ENOENT.
if exc.errno in (errno.EBADF, errno.ENOENT): # pragma: no branch
pass
else: # pragma: no cover
# As far as we know, this branch can't happen.
raise
return _core.Abort.SUCCEEDED
await self.wait_kevent(fd, filter, abort)
@_public
async def wait_readable(self, fd: int | _HasFileNo) -> None:
"""Block until the kernel reports that the given object is readable.
On Unix systems, ``fd`` must either be an integer file descriptor,
or else an object with a ``.fileno()`` method which returns an
integer file descriptor. Any kind of file descriptor can be passed,
though the exact semantics will depend on your kernel. For example,
this probably won't do anything useful for on-disk files.
On Windows systems, ``fd`` must either be an integer ``SOCKET``
handle, or else an object with a ``.fileno()`` method which returns
an integer ``SOCKET`` handle. File descriptors aren't supported,
and neither are handles that refer to anything besides a
``SOCKET``.
:raises trio.BusyResourceError:
if another task is already waiting for the given socket to
become readable.
:raises trio.ClosedResourceError:
if another task calls :func:`notify_closing` while this
function is still working.
"""
await self._wait_common(fd, select.KQ_FILTER_READ)
@_public
async def wait_writable(self, fd: int | _HasFileNo) -> None:
"""Block until the kernel reports that the given object is writable.
See `wait_readable` for the definition of ``fd``.
:raises trio.BusyResourceError:
if another task is already waiting for the given socket to
become writable.
:raises trio.ClosedResourceError:
if another task calls :func:`notify_closing` while this
function is still working.
"""
await self._wait_common(fd, select.KQ_FILTER_WRITE)
@_public
def notify_closing(self, fd: int | _HasFileNo) -> None:
"""Notify waiters of the given object that it will be closed.
Call this before closing a file descriptor (on Unix) or socket (on
Windows). This will cause any `wait_readable` or `wait_writable`
calls on the given object to immediately wake up and raise
`~trio.ClosedResourceError`.
This doesn't actually close the object – you still have to do that
yourself afterwards. Also, you want to be careful to make sure no
new tasks start waiting on the object in between when you call this
and when it's actually closed. So to close something properly, you
usually want to do these steps in order:
1. Explicitly mark the object as closed, so that any new attempts
to use it will abort before they start.
2. Call `notify_closing` to wake up any already-existing users.
3. Actually close the object.
It's also possible to do them in a different order if that's more
convenient, *but only if* you make sure not to have any checkpoints in
between the steps. This way they all happen in a single atomic
step, so other tasks won't be able to tell what order they happened
in anyway.
"""
if not isinstance(fd, int):
fd = fd.fileno()
for filter_ in [select.KQ_FILTER_READ, select.KQ_FILTER_WRITE]:
key = (fd, filter_)
receiver = self._registered.get(key)
if receiver is None:
continue
if type(receiver) is _core.Task:
event = select.kevent(fd, filter_, select.KQ_EV_DELETE)
self._kqueue.control([event], 0)
exc = _core.ClosedResourceError("another task closed this fd")
_core.reschedule(receiver, outcome.Error(exc))
del self._registered[key]
else:
# XX this is an interesting example of a case where being able
# to close a queue would be useful...
raise NotImplementedError(
"can't close an fd that monitor_kevent is using",
)