29
29
picamera.lens_shading_table = lst
30
30
```
31
31
"""
32
+
32
33
from __future__ import annotations
34
+ import gc
33
35
import logging
34
36
import time
35
37
from typing import List , Literal , Optional , Tuple
38
40
from scipy .ndimage import zoom
39
41
40
42
from picamera2 import Picamera2
43
+ import picamera2
41
44
42
45
43
46
def load_default_tuning (cam : Picamera2 ) -> dict :
@@ -245,31 +248,34 @@ def adjust_white_balance_from_raw(
245
248
camera .configure (config )
246
249
camera .start ()
247
250
channels = channels_from_bayer_array (camera .capture_array ("raw" ))
248
- #logging.info(f"White balance: channels were retrieved with shape {channels.shape}.")
251
+ # logging.info(f"White balance: channels were retrieved with shape {channels.shape}.")
249
252
if luminance is not None and Cr is not None and Cb is not None :
250
253
# Reconstruct a low-resolution image from the lens shading tables
251
254
# and use it to normalise the raw image, to compensate for
252
255
# the brightest pixels in each channel not coinciding.
253
- grids = grids_from_lst (np .array (luminance )** luminance_power , Cr , Cb )
254
- channel_gains = 1 / grids
256
+ grids = grids_from_lst (np .array (luminance ) ** luminance_power , Cr , Cb )
257
+ channel_gains = 1 / grids
255
258
if channel_gains .shape [1 :] != channels .shape [1 :]:
256
259
channel_gains = upsample_channels (channel_gains , channels .shape [1 :])
257
260
logging .info (f"Before gains, channel maxima are { np .max (channels , axis = (1 ,2 ))} " )
258
261
channels = channels * channel_gains
259
262
logging .info (f"After gains, channel maxima are { np .max (channels , axis = (1 ,2 ))} " )
260
263
if method == "centre" :
261
264
_ , h , w = channels .shape
262
- blue , g1 , g2 , red = np .mean (
263
- channels [:, 9 * h // 20 :11 * h // 20 , 9 * w // 20 :11 * w // 20 ],
264
- axis = (1 ,2 ),
265
- ) - 64
265
+ blue , g1 , g2 , red = (
266
+ np .mean (
267
+ channels [:, 9 * h // 20 : 11 * h // 20 , 9 * w // 20 : 11 * w // 20 ],
268
+ axis = (1 , 2 ),
269
+ )
270
+ - 64
271
+ )
266
272
else :
267
273
# TODO: read black level from camera rather than hard-coding 64
268
274
blue , g1 , g2 , red = np .percentile (channels , percentile , axis = (1 , 2 )) - 64
269
275
green = (g1 + g2 ) / 2.0
270
276
new_awb_gains = (green / red , green / blue )
271
277
if Cr is not None and Cb is not None :
272
- # The LST algorithm normalises Cr and Cb by their minimum.
278
+ # The LST algorithm normalises Cr and Cb by their minimum.
273
279
# The lens shading correction only ever boosts the red and blue values.
274
280
# Here, we decrease the gains by the minimum value of Cr and Cb.
275
281
new_awb_gains = (green / red * np .min (Cr ), green / blue * np .min (Cb ))
@@ -320,40 +326,47 @@ def get_16x12_grid(chan: np.ndarray, dx: int, dy: int):
320
326
"""
321
327
for i in range (11 ):
322
328
for j in range (15 ):
323
- grid .append (np .mean (chan [dy * i : dy * ( 1 + i ), dx * j : dx * ( 1 + j )]))
324
- grid .append (np .mean (chan [dy * i : dy * ( 1 + i ), 15 * dx :]))
329
+ grid .append (np .mean (chan [dy * i : dy * ( 1 + i ), dx * j : dx * ( 1 + j )]))
330
+ grid .append (np .mean (chan [dy * i : dy * ( 1 + i ), 15 * dx :]))
325
331
for j in range (15 ):
326
- grid .append (np .mean (chan [11 * dy :, dx * j : dx * ( 1 + j )]))
327
- grid .append (np .mean (chan [11 * dy :, 15 * dx :]))
332
+ grid .append (np .mean (chan [11 * dy :, dx * j : dx * ( 1 + j )]))
333
+ grid .append (np .mean (chan [11 * dy :, 15 * dx :]))
328
334
"""
329
335
return as np.array, ready for further manipulation
330
336
"""
331
337
return np .reshape (np .array (grid ), (12 , 16 ))
332
338
339
+
333
340
def upsample_channels (grids : np .ndarray , shape : tuple [int ]):
334
341
"""Zoom an image in the last two dimensions
335
342
336
343
This is effectively the inverse operation of `get_16x12_grid`
337
344
"""
338
- zoom_factors = [1 ,] + list (np .ceil (np .array (shape )/ np .array (grids .shape [1 :])))
339
- return zoom (grids , zoom_factors , order = 1 )[:, :shape [0 ], :shape [1 ]]
345
+ zoom_factors = [
346
+ 1 ,
347
+ ] + list (np .ceil (np .array (shape ) / np .array (grids .shape [1 :])))
348
+ return zoom (grids , zoom_factors , order = 1 )[:, : shape [0 ], : shape [1 ]]
349
+
340
350
341
351
def downsampled_channels (channels : np .ndarray , blacklevel = 64 ) -> list [np .ndarray ]:
342
352
"""Generate a downsampled, un-normalised image from which to calculate the LST
343
353
344
354
TODO: blacklevel probably ought to be determined from the camera...
345
355
"""
346
356
channel_shape = np .array (channels .shape [1 :])
347
- lst_shape = np .array ([12 ,16 ])
348
- step = np .ceil (channel_shape / lst_shape ).astype (int )
357
+ lst_shape = np .array ([12 , 16 ])
358
+ step = np .ceil (channel_shape / lst_shape ).astype (int )
349
359
return np .stack (
350
360
[
351
- get_16x12_grid (channels [i , ...].astype (float ) - blacklevel , step [1 ], step [0 ])
361
+ get_16x12_grid (
362
+ channels [i , ...].astype (float ) - blacklevel , step [1 ], step [0 ]
363
+ )
352
364
for i in range (channels .shape [0 ])
353
365
],
354
366
axis = 0 ,
355
367
)
356
368
369
+
357
370
def lst_from_channels (channels : np .ndarray ) -> LensShadingTables :
358
371
"""Given the 4 Bayer colour channels from a white image, generate a LST.
359
372
@@ -373,32 +386,34 @@ def lst_from_grids(grids: np.ndarray) -> LensShadingTables:
373
386
# TODO: make consistent with
374
387
https://git.linuxtv.org/libcamera.git/tree/utils/raspberrypi/ctt/ctt_alsc.py
375
388
"""
376
- r : np .ndarray = grids [3 , ...]
389
+ r : np .ndarray = grids [3 , ...]
377
390
g : np .ndarray = np .mean (grids [1 :3 , ...], axis = 0 )
378
391
b : np .ndarray = grids [0 , ...]
379
392
380
393
# What we actually want to calculate is the gains needed to compensate for the
381
394
# lens shading - that's 1/lens_shading_table_float as we currently have it.
382
395
luminance_gains : np .ndarray = np .max (g ) / g # Minimum luminance gain is 1
383
396
cr_gains : np .ndarray = g / r
384
- #cr_gains /= cr_gains[5, 7] # Normalise so the central colour doesn't change
397
+ # cr_gains /= cr_gains[5, 7] # Normalise so the central colour doesn't change
385
398
cb_gains : np .ndarray = g / b
386
- #cb_gains /= cb_gains[5, 7]
399
+ # cb_gains /= cb_gains[5, 7]
387
400
return luminance_gains , cr_gains , cb_gains
388
401
402
+
389
403
def grids_from_lst (lum : np .ndarray , Cr : np .ndarray , Cb : np .ndarray ) -> np .ndarray :
390
404
"""Convert form luminance/chrominance dict to four RGGB channels
391
-
405
+
392
406
Note that these will be normalised - the maximum green value is always 1.
393
407
Also, note that the channels are BGGR, to be consistent with the
394
408
`channels_from_raw_image` function. This should probably change in the
395
409
future.
396
410
"""
397
- G = 1 / np .array (lum )
398
- R = G / np .array (Cr )
399
- B = G / np .array (Cb )
411
+ G = 1 / np .array (lum )
412
+ R = G / np .array (Cr )
413
+ B = G / np .array (Cb )
400
414
return np .stack ([B , G , G , R ], axis = 0 )
401
415
416
+
402
417
def set_static_lst (
403
418
tuning : dict ,
404
419
luminance : np .ndarray ,
@@ -423,30 +438,23 @@ def set_static_lst(
423
438
]
424
439
alsc ["luminance_lut" ] = np .reshape (luminance , (- 1 )).round (3 ).tolist ()
425
440
426
- def set_static_ccm (
427
- tuning : dict ,
428
- c : list
429
- ) -> None :
441
+
442
+ def set_static_ccm (tuning : dict , c : list ) -> None :
430
443
"""Update the `rpi.alsc` section of a camera tuning dict to use a static correcton.
431
444
432
445
`tuning` will be updated in-place to set its shading to static, and disable any
433
446
adaptive tweaking by the algorithm.
434
447
"""
435
448
ccm = Picamera2 .find_tuning_algo (tuning , "rpi.ccm" )
436
- ccm ["ccms" ] = [{
437
- "ct" : 2860 ,
438
- "ccm" : c
439
- }
440
- ]
449
+ ccm ["ccms" ] = [{"ct" : 2860 , "ccm" : c }]
441
450
442
- def get_static_ccm (
443
- tuning : dict
444
- ) -> None :
445
- """Get the `rpi.ccm` section of a camera tuning dict
446
- """
451
+
452
+ def get_static_ccm (tuning : dict ) -> None :
453
+ """Get the `rpi.ccm` section of a camera tuning dict"""
447
454
ccm = Picamera2 .find_tuning_algo (tuning , "rpi.ccm" )
448
455
return ccm ["ccms" ]
449
456
457
+
450
458
def lst_is_static (tuning : dict ) -> bool :
451
459
"""Whether the lens shading table is set to static"""
452
460
alsc = Picamera2 .find_tuning_algo (tuning , "rpi.alsc" )
@@ -472,7 +480,7 @@ def set_static_geq(
472
480
def _geq_is_static (tuning : dict ) -> bool :
473
481
"""Whether the green equalisation is set to static"""
474
482
geq = Picamera2 .find_tuning_algo (tuning , "rpi.geq" )
475
- return alsc ["offset" ] == 65535
483
+ return geq ["offset" ] == 65535
476
484
477
485
478
486
def index_of_algorithm (algorithms : list [dict ], algorithm : str ):
@@ -499,6 +507,7 @@ def lst_from_camera(camera: Picamera2) -> LensShadingTables:
499
507
channels = raw_channels_from_camera (camera )
500
508
return lst_from_channels (channels )
501
509
510
+
502
511
def raw_channels_from_camera (camera : Picamera2 ) -> LensShadingTables :
503
512
"""Acquire a raw image and return a 4xNxM array of the colour channels."""
504
513
if camera .started :
@@ -521,6 +530,16 @@ def raw_channels_from_camera(camera: Picamera2) -> LensShadingTables:
521
530
return channels_from_bayer_array (raw_image )
522
531
523
532
533
+ def recreate_camera_manager ():
534
+ """Delete and recreate the camera manager.
535
+
536
+ This is necessary to ensure the tuning file is re-read.
537
+ """
538
+ del Picamera2 ._cm
539
+ gc .collect ()
540
+ Picamera2 ._cm = picamera2 .picamera2 .CameraManager ()
541
+
542
+
524
543
if __name__ == "__main__" :
525
544
"""This block is untested but has been updated."""
526
545
with Picamera2 () as cam :
0 commit comments