perf-debug
nyanpasu64 2023-11-28 11:49:15 -08:00
rodzic bf11cccfd3
commit 395e259f29
1 zmienionych plików z 13 dodań i 4 usunięć

Wyświetl plik

@ -460,16 +460,17 @@ class CorrScope:
shmem: SharedMemory shmem: SharedMemory
completion: "Future[None]" completion: "Future[None]"
# Same size as ProcessPoolExecutor, so threads won't starve if they all # Rely on avail_shmems for backpressure.
# finish a job at the same time. render_to_output: "Queue[RenderToOutput | None]" = Queue()
render_to_output: "Queue[RenderToOutput | None]" = Queue(nthread)
# Release all shmems after finishing rendering. # Release all shmems after finishing rendering.
all_shmems: List[SharedMemory] = [ all_shmems: List[SharedMemory] = [
SharedMemory(create=True, size=framebuffer_nbyte) SharedMemory(create=True, size=framebuffer_nbyte)
for _ in range(nthread) for _ in range(2 * nthread)
] ]
is_submitting = [False, 0]
# Only send unused shmems to a worker process, and wait for it to be # Only send unused shmems to a worker process, and wait for it to be
# returned before reusing. # returned before reusing.
avail_shmems: "Queue[SharedMemory]" = Queue() avail_shmems: "Queue[SharedMemory]" = Queue()
@ -524,11 +525,16 @@ class CorrScope:
# blocks until frames get rendered and shmem is returned by # blocks until frames get rendered and shmem is returned by
# output_thread(). # output_thread().
t = time.perf_counter()
shmem = avail_shmems.get() shmem = avail_shmems.get()
t = time.perf_counter() - t
if t >= 0.001:
print("get shmem", t)
if is_aborted(): if is_aborted():
break break
# blocking # blocking
t = time.perf_counter()
render_to_output.put( render_to_output.put(
RenderToOutput( RenderToOutput(
frame, frame,
@ -541,6 +547,9 @@ class CorrScope:
), ),
) )
) )
t = time.perf_counter() - t
if t >= 0.001:
print("send to render", t)
# TODO if is_aborted(), should we insert class CancellationToken, # TODO if is_aborted(), should we insert class CancellationToken,
# rather than having output_thread() poll it too? # rather than having output_thread() poll it too?