diff --git a/doc/conf.py b/doc/conf.py index 0853bb85e1..b27e4e72ed 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -94,9 +94,9 @@ # intersphinx configuration intersphinx_mapping = { "python": (f"https://docs.python.org/{sys.version_info.major}", None), - "numpy": ("https://docs.scipy.org/doc/numpy/", None), - "scipy": ("https://docs.scipy.org/doc/scipy/reference", None), - "matplotlib": ("https://matplotlib.org", None), + "numpy": ("https://numpy.org/doc/stable", None), + "scipy": ("https://docs.scipy.org/doc/scipy", None), + "matplotlib": ("https://matplotlib.org/stable", None), } sphinx_gallery_conf = { diff --git a/doc/guides/encoding/jpeg_2k.rst b/doc/guides/encoding/jpeg_2k.rst index e5ed1b7b7f..0fec655ce6 100644 --- a/doc/guides/encoding/jpeg_2k.rst +++ b/doc/guides/encoding/jpeg_2k.rst @@ -24,8 +24,8 @@ Valid Image Pixel Parameters ---------------------------- The table below lists the valid :dcm:`Image Pixel` -module parameters for *Pixel Data* encoded using *JPEG 2000 Lossless* or *JPEG 2000* -encoding. For an explanation of each parameter and its relationship +module parameters for *Pixel Data* encoded using the *JPEG 2000 Lossless* or *JPEG 2000* +transfer syntaxes. For an explanation of each parameter and its relationship with the *Pixel Data* see the :doc:`glossary of Image Pixel elements<../glossary>`. +------------+-----------------------+-----------------+----------------+------------+---------+ diff --git a/doc/guides/encoding/jpeg_ls.rst b/doc/guides/encoding/jpeg_ls.rst index 6ad05a8ef3..fc748dc489 100644 --- a/doc/guides/encoding/jpeg_ls.rst +++ b/doc/guides/encoding/jpeg_ls.rst @@ -12,8 +12,8 @@ Valid Image Pixel Parameters ---------------------------- The table below lists the valid :dcm:`Image Pixel` -module parameters for *Pixel Data* encoded using *JPEG-LS Lossless* or *JPEG-LS -Near-lossless* encoding. For an explanation of each parameter and its relationship +module parameters for *Pixel Data* encoded using the *JPEG-LS Lossless* or *JPEG-LS +Near-lossless* transfer syntaxes. For an explanation of each parameter and its relationship with the *Pixel Data* see the :doc:`glossary of Image Pixel elements<../glossary>`. +------------+-----------------------+-----------------+----------------+------------+---------+ @@ -100,7 +100,7 @@ pixel data should already be in the corresponding color space: * For *Bits Allocated* and *Bits Stored* less than or equal to 8: pixel data must be :func:`converted into YCbCr color space - `. However + `. However you should keep in mind that the conversion operation is lossy. * For *Bits Allocated* and *Bits Stored* between 9 and 16 (inclusive): pixel data should be downscaled to 8-bit (with *Bits Stored*, *Bits @@ -113,7 +113,7 @@ pixel data should already be in the corresponding color space: * For *Photometric Interpretation* ``RGB`` the pixel data must first be :func:`converted into RGB color space - `. However the conversion + `. However the conversion operation is lossy. * For *Photometric Interpretation* ``YBR_FULL`` nothing else is required. diff --git a/doc/guides/encoding/rle_lossless.rst b/doc/guides/encoding/rle_lossless.rst index 95cc727380..4cef9d4cc9 100644 --- a/doc/guides/encoding/rle_lossless.rst +++ b/doc/guides/encoding/rle_lossless.rst @@ -12,8 +12,8 @@ Valid Image Pixel Parameters ---------------------------- The table below lists the valid :dcm:`Image Pixel` -module parameters for *Pixel Data* encoded using *RLE Lossless* encoding. For -an explanation of each parameter and its relationship with the +module parameters for *Pixel Data* encoded using the *RLE Lossless* transfer +syntax. For an explanation of each parameter and its relationship with the *Pixel Data* see the :doc:`glossary of Image Pixel elements<../glossary>`. +------------+-----------------+-----------------+------------+---------+ @@ -55,7 +55,7 @@ corresponding color space: * For *Bits Allocated* and *Bits Stored* less than or equal to 8: pixel data must be :func:`converted into YCbCr color space - `. However + `. However you should keep in mind that the conversion operation is lossy. * For *Bits Allocated* and *Bits Stored* between 9 and 16 (inclusive): pixel data should be downscaled to 8-bit (with *Bits Stored*, *Bits @@ -68,7 +68,7 @@ corresponding color space: * For *Photometric Interpretation* ``RGB`` the pixel data must first be :func:`converted into RGB color space - `. However the conversion + `. However the conversion operation is lossy. * For *Photometric Interpretation* ``YBR_FULL`` nothing else is required. @@ -153,7 +153,7 @@ new *SOP Instance UID*: .. code-block:: python >>> from pydicom import examples - >>> from pydicom.pixel_data_handlers import convert_color_space + >>> from pydicom.pixels import convert_color_space >>> from pydicom.uid import generate_uid >>> ds = examples.rgb_color >>> rgb = ds.pixel_array diff --git a/doc/old/image_data_compression.rst b/doc/old/image_data_compression.rst index 2c5c81986b..cd60b4aaf9 100644 --- a/doc/old/image_data_compression.rst +++ b/doc/old/image_data_compression.rst @@ -121,7 +121,7 @@ Compressing using third-party packages If you need to perform pixel data compression using an encoding method not supported by *pydicom* - such as :dcm:`ISO/IEC 10918-1 JPEG - - then you'll need to find a third-party +` - then you'll need to find a third-party package or application to do so. Once you've done that you have to follow the requirements for compressed *Pixel Data* in the DICOM Standard: diff --git a/doc/old/working_with_overlays.rst b/doc/old/working_with_overlays.rst index d85a48bec9..4bf92ab8e2 100644 --- a/doc/old/working_with_overlays.rst +++ b/doc/old/working_with_overlays.rst @@ -68,7 +68,7 @@ overlay pixels start 1 row above and 1 row to the left of the image pixels. NumPy can be used to modify the pixels, but if the changes are to be saved, they must be bit-packed (using something like -:func:`~pydicom.pixel_data_handlers.numpy_handler.pack_bits`) and written +:func:`~pydicom.pixels.utils.pack_bits`) and written back to the correct element: .. code-block:: python @@ -77,7 +77,7 @@ back to the correct element: arr[10, :] = 1 # Pack the data - from pydicom.pixel_data_handlers.numpy_handler import pack_bits + from pydicom.pixels import pack_bits packed_bytes = pack_bits(arr) # Update the element value diff --git a/doc/old/working_with_pixel_data.rst b/doc/old/working_with_pixel_data.rst index aed6169d59..bb6187118b 100644 --- a/doc/old/working_with_pixel_data.rst +++ b/doc/old/working_with_pixel_data.rst @@ -127,7 +127,7 @@ of ``3`` then the returned pixel data will be in the color space as given by *pydicom* offers a limited ability to convert between 8-bits/channel YBR and RGB color spaces through the -:func:`~pydicom.pixel_data_handlers.util.convert_color_space` +:func:`~pydicom.pixels.processing.convert_color_space` function. When changing the color space you should also change the value of *Photometric Interpretation* to match. @@ -144,13 +144,13 @@ Some DICOM datasets store their output image pixel values in a lookup table (LUT), where the values in *Pixel Data* are the index to a corresponding LUT entry. When a dataset's (0028,0004) *Photometric Interpretation* value is ``PALETTE COLOR`` then the -:func:`~pydicom.pixel_data_handlers.util.apply_color_lut` function can be used +:func:`~pydicom.pixels.processing.apply_color_lut` function can be used to apply a palette color LUT to the pixel data to produce an RGB image. .. code-block:: python from pydicom import examples - from pydicom.pixel_data_handlers.util import apply_color_lut + from pydicom.pixels import apply_color_lut # Fetch an example PALETTE COLOR dataset ds = examples.palette_color @@ -165,7 +165,7 @@ of the pixel data is 8-bit. .. code-block:: python from pydicom import examples - from pydicom.pixel_data_handlers.util import apply_color_lut + from pydicom.pixels import apply_color_lut ds = examples.palette_color arr = ds.pixel_array @@ -186,7 +186,7 @@ Modality LUT or Rescale Operation The DICOM :dcm:`Modality LUT` module converts raw pixel data values to a specific (possibly unitless) physical quantity, such as Hounsfield units for CT. The -:func:`~pydicom.pixel_data_handlers.util.apply_modality_lut` function can be +:func:`~pydicom.pixels.processing.apply_modality_lut` function can be used with an input array of raw values and a dataset containing a Modality LUT module to return the converted values. When a dataset requires multiple grayscale transformations, the Modality LUT transformation is always applied @@ -195,7 +195,7 @@ first. .. code-block:: python from pydicom import examples - from pydicom.pixel_data_handlers.util import apply_modality_lut + from pydicom.pixels import apply_modality_lut ds = examples.ct arr = ds.pixel_array @@ -207,7 +207,7 @@ VOI LUT or Windowing Operation The DICOM :dcm:`VOI LUT` module applies a VOI or windowing operation to input values. The -:func:`~pydicom.pixel_data_handlers.util.apply_voi_lut` function +:func:`~pydicom.pixels.processing.apply_voi_lut` function can be used with an input array and a dataset containing a VOI LUT module to return values with applied VOI LUT or windowing. When a dataset contains multiple VOI or windowing views then a particular view can be returned by @@ -219,7 +219,7 @@ that the modality LUT or rescale operation has already been applied. .. code-block:: python from pydicom import examples - from pydicom.pixel_data_handlers.util import apply_voi_lut + from pydicom.pixels import apply_voi_lut ds = examples.overlay arr = ds.pixel_array diff --git a/doc/reference/encaps.rst b/doc/reference/encaps.rst index 4cd8c223ab..9c822a6bcc 100644 --- a/doc/reference/encaps.rst +++ b/doc/reference/encaps.rst @@ -3,6 +3,7 @@ Bulk Data Encapsulation Utilities (:mod:`pydicom.encaps`) ========================================================= +.. module:: pydicom.encaps .. currentmodule:: pydicom.encaps Functions for parsing and applying encapsulation to bulk data elements such diff --git a/doc/reference/handlers.pixel_data.rst b/doc/reference/handlers.pixel_data.rst index e9c28d2b5f..15ad792672 100644 --- a/doc/reference/handlers.pixel_data.rst +++ b/doc/reference/handlers.pixel_data.rst @@ -3,6 +3,11 @@ Pixel Data Handlers (:mod:`pydicom.pixel_data_handlers`) ======================================================== +.. deprecated:: 3.0 + + The ``pydicom.pixel_data_handlers`` module is deprecated and will be removed + in v4.0. Use the :mod:`~pydicom.pixels` module instead. + .. currentmodule:: pydicom.pixel_data_handlers .. autosummary:: diff --git a/doc/reference/handlers.rst b/doc/reference/handlers.rst index d6813afea5..2bc5ed66f3 100644 --- a/doc/reference/handlers.rst +++ b/doc/reference/handlers.rst @@ -3,6 +3,11 @@ Bulk Data Handlers ================== +.. deprecated:: 3.0 + + The ``pydicom.pixel_data_handlers`` module is deprecated and will be removed + in v4.0. Use the :mod:`~pydicom.pixels` module instead. + Functions for handling bulk data elements such as (7FE0,0010) *Pixel Data* .. toctree:: @@ -10,22 +15,3 @@ Functions for handling bulk data elements such as (7FE0,0010) *Pixel Data* :includehidden: handlers.pixel_data - -Pixel Data Utilities -==================== - -Functions for manipulating (7FE0,0010) *Pixel Data*. - -.. currentmodule:: pydicom.pixel_data_handlers - -.. autosummary:: - :toctree: generated/ - - apply_color_lut - apply_modality_lut - apply_rescale - apply_windowing - apply_voi - apply_voi_lut - convert_color_space - util diff --git a/doc/reference/pixels.decoders.rst b/doc/reference/pixels.decoders.rst index b405a33527..86ebc569e1 100644 --- a/doc/reference/pixels.decoders.rst +++ b/doc/reference/pixels.decoders.rst @@ -3,11 +3,13 @@ Pixel Data Decoders (:mod:`pydicom.pixels.decoders`) ==================================================== +.. module:: pydicom.pixels.decoders + :class:`~pydicom.pixels.decoders.base.Decoder` class instances for decoding native and compressed (7FE0,0010) *Pixel Data*, (7FE0,0008) *Float Pixel Data* and -(7FE0,0009) *Double Float Pixel Data* +(7FE0,0009) *Double Float Pixel Data*. -.. currentmodule:: pydicom.pixels.decoders.base +.. currentmodule:: pydicom.pixels.decoders .. autosummary:: :toctree: generated/ @@ -32,6 +34,8 @@ native and compressed (7FE0,0010) *Pixel Data*, (7FE0,0008) *Float Pixel Data* a Base decoder classes used by all decoders +.. currentmodule:: pydicom.pixels.decoders.base + .. autosummary:: :toctree: generated/ diff --git a/doc/reference/pixels.encoders.rst b/doc/reference/pixels.encoders.rst index f7681c5818..7a5ef9e140 100644 --- a/doc/reference/pixels.encoders.rst +++ b/doc/reference/pixels.encoders.rst @@ -3,10 +3,12 @@ Pixel Data Encoders (:mod:`pydicom.pixels.encoders`) ==================================================== +.. module:: pydicom.pixels.encoders + :class:`~pydicom.pixels.encoders.base.Encoder` class instances for compressing (7FE0,0010) *Pixel Data*. -.. currentmodule:: pydicom.pixels.encoders.base +.. currentmodule:: pydicom.pixels.encoders .. autosummary:: :toctree: generated/ diff --git a/doc/reference/pixels.processing.rst b/doc/reference/pixels.processing.rst new file mode 100644 index 0000000000..a5411a1b85 --- /dev/null +++ b/doc/reference/pixels.processing.rst @@ -0,0 +1,20 @@ +.. _api_pixel_processing: + +Pixel Data Processing (:mod:`pydicom.pixels.processing`) +======================================================== + +Functions for applying image processing to pixel data. + +.. module:: pydicom.pixels.processing +.. currentmodule:: pydicom.pixels.processing + +.. autosummary:: + :toctree: generated/ + + apply_color_lut + apply_modality_lut + apply_rescale + apply_voi_lut + apply_voi + apply_windowing + convert_color_space diff --git a/doc/reference/pixels.rst b/doc/reference/pixels.rst index cb4b14d031..41e1474446 100644 --- a/doc/reference/pixels.rst +++ b/doc/reference/pixels.rst @@ -3,43 +3,45 @@ Pixel Data (:mod:`pydicom.pixels`) ================================== -Functions for handling bulk data elements such as(7FE0,0010) *Pixel Data*, -(7FE0,0008) *Float Pixel Data* and (7FE0,0009) *Double Float Pixel Data* - -Pixel Data Decoding -------------------- - +.. module:: pydicom.pixels .. currentmodule:: pydicom.pixels -.. toctree:: - :maxdepth: 1 - :includehidden: - - pixels.decoders - -Pixel Data Encoding -------------------- +Image processing functions -.. currentmodule:: pydicom.pixels - -.. toctree:: - :maxdepth: 1 - :includehidden: - - pixels.encoders +.. autosummary:: + :toctree: generated/ + apply_color_lut + apply_modality_lut + apply_voi_lut + apply_voi + apply_windowing + convert_color_space -Pixel Data Utilities --------------------- -.. currentmodule:: pydicom.pixels +Utility functions .. autosummary:: :toctree: generated/ + as_pixel_options get_decoder get_encoder - pixel_array iter_pixels - as_pixel_options + pack_bits + pixel_array + unpack_bits + + +Sub-modules +----------- + +.. toctree:: + :maxdepth: 1 + :includehidden: + + pixels.decoders + pixels.encoders + pixels.processing + pixels.utils diff --git a/doc/reference/pixels.utils.rst b/doc/reference/pixels.utils.rst new file mode 100644 index 0000000000..38024f45e6 --- /dev/null +++ b/doc/reference/pixels.utils.rst @@ -0,0 +1,25 @@ +.. _api_pixel_utils: + +Pixel Data Utilities (:mod:`pydicom.pixels.utils`) +================================================== + +.. module:: pydicom.pixels.utils +.. currentmodule:: pydicom.pixels.utils + +Pixel data related utility functions. + +.. autosummary:: + :toctree: generated/ + + as_pixel_options + expand_ybr422 + get_expected_length + get_image_pixel_ids + get_j2k_parameters + get_nr_frames + iter_pixels + pack_bits + pixel_array + pixel_dtype + reshape_pixel_array + unpack_bits diff --git a/doc/reference/uid.rst b/doc/reference/uid.rst index f251ceb562..b6c7ca4225 100644 --- a/doc/reference/uid.rst +++ b/doc/reference/uid.rst @@ -3,6 +3,7 @@ UID Definitions and Utilities (:mod:`pydicom.uid`) ================================================== +.. module:: pydicom.uid .. currentmodule:: pydicom.uid diff --git a/doc/release_notes/v3.0.0.rst b/doc/release_notes/v3.0.0.rst index b69464791e..f8a79bd311 100644 --- a/doc/release_notes/v3.0.0.rst +++ b/doc/release_notes/v3.0.0.rst @@ -3,65 +3,67 @@ Version 3.0.0 Changes ------- -* Removed support for Python <= 3.9 -* All tag formats changed to upper case, no space e.g. "(7FE0,0010)" rather than "(7fe0, 0010)" +* Removed support for Python <= 3.9. +* All tag formats changed to upper case, no space e.g. "(7FE0,0010)" rather than "(7fe0, 0010)". * Values with VR **AE** with an incorrect value length are now handled - gracefully (extra bytes are ignored with a warning) -* A value of 0 for ``NumberOfFrames`` is now handled as 1 frame, with a user warning issued - on reading the pixel data (:issue:`1844`) + gracefully (extra bytes are ignored with a warning). +* A value of 0 for *Number of Frames* is now handled as 1 frame, with a user warning issued + on reading the pixel data (:issue:`1844`). * The value for :attr:`~pydicom.uid.JPEGLossless` has changed from 1.2.840.10008.1.2.4.70 to 1.2.840.10008.1.2.4.57 to match its UID keyword. Use - :attr:`~pydicom.uid.JPEGLosslessSV1` instead for 1.2.840.10008.1.2.4.70 + :attr:`~pydicom.uid.JPEGLosslessSV1` instead for 1.2.840.10008.1.2.4.70. * The theoretical maximum number of instances supported by :class:`~pydicom.fileset.FileSet` has been reduced to 1838265625 to ensure support - for 32-bit systems (:issue:`1743`) + for 32-bit systems (:issue:`1743`). * The characters used by :func:`~pydicom.fileset.generate_filename` when - `alphanumeric` is ``True`` has been reduced to [0-9][A-I,K-Z] + `alphanumeric` is ``True`` has been reduced to [0-9][A-I,K-Z]. * :func:`~pydicom.data.get_testdata_file` and :func:`~pydicom.data.get_testdata_files` - now raise ``ValueError`` if called with an absolute path or pattern + now raise ``ValueError`` if called with an absolute path or pattern. * :func:`~pydicom.uid.generate_uid` has been changed to use a random suffix generated using :func:`~secrets.randbelow` when `entropy_srcs` isn't used, and the maximum allowed length of the `prefix` has been changed to 54 characters - (:issue:`1773`) -* :attr:`~pydicom.dataelem.DataElement.VM` always returns ``1`` for **SQ** - elements (:issue:`1481`) -* DICOM dictionary updated to 2023d + (:issue:`1773`). +* :attr:`DataElement.VM` always returns ``1`` + for **SQ** elements (:issue:`1481`). +* DICOM dictionary updated to 2023d. * :func:`~pydicom.dataset.validate_file_meta` now checks to ensure required - Type 1 elements aren't empty + Type 1 elements aren't empty. * `implicit_vr` and `little_endian` optional arguments added to - :meth:`~pydicom.dataset.Dataset.save_as`. In addition, this method will now - raise an exception if the user tries to convert between little and big endian - datasets. If this is something you need, use :func:`~pydicom.filewriter.dcmwrite` instead. + :meth:`Dataset.save_as()`. In addition, this + method will now raise an exception if the user tries to convert between little + and big endian datasets. If this is something you need, use + :func:`~pydicom.filewriter.dcmwrite` instead. * `implicit_vr`, `little_endian` and `force_encoding` optional arguments - added to :func:`~pydicom.filewriter.dcmwrite` + added to :func:`~pydicom.filewriter.dcmwrite`. * The priority used to decide which encoding to use with - :meth:`~pydicom.dataset.Dataset.save_as` and + :meth:`Dataset.save_as()` and :func:`~pydicom.filewriter.dcmwrite` has been changed to: - 1. The set *Transfer Syntax UID* - 2. The `implicit_vr` and `little_endian` arguments + 1. The set *Transfer Syntax UID*, + 2. The `implicit_vr` and `little_endian` arguments, 3. :attr:`Dataset.is_implicit_VR` and - :attr:`Dataset.is_little_endian` - 4. :attr:`Dataset.original_encoding` + :attr:`Dataset.is_little_endian`, + 4. :attr:`Dataset.original_encoding`. * Datasets containing *Command Set* (0000,eeee) elements can no longer be written using - :meth:`~pydicom.dataset.Dataset.save_as` or :func:`~pydicom.filewriter.dcmwrite`, - use :func:`~pydicom.filewriter.write_dataset` instead. -* The :attr:`~pydicom.dataset.FileDataset.file_meta` elements are no longer - modified when writing + :meth:`Dataset.save_as()` or + :func:`~pydicom.filewriter.dcmwrite`, use :func:`~pydicom.filewriter.write_dataset` + instead. +* A dataset's :attr:`~pydicom.dataset.FileDataset.file_meta` elements are no longer + modified when writing. * :class:`~pydicom.filebase.DicomIO` now requires a readable or writeable buffer during initialisation and :class:`~pydicom.filebase.DicomBytesIO` directly inherits from it. -* The ``pydicom.encoders`` module has been moved to :doc:`pydicom.pixels.encoders`, - the original import path will be removed in v4.0 +* The ``pydicom.encoders`` module has been moved to :mod:`pydicom.pixels.encoders + `, the original import path will be removed in v4.0. * Using GDCM v3.0.23 or lower to decode JPEG-LS datasets with a *Bits Stored* of 6 or 7 produces incorrect results, so attempting to do so now raises an exception. - ``pyjpegls`` or ``pylibjpeg`` with ``pylibjpeg-libjpeg`` can be used instead (:issue:`2008`) + ``pyjpegls`` or ``pylibjpeg`` with ``pylibjpeg-libjpeg`` can be used instead (:issue:`2008`). Removals ~~~~~~~~ -* ``compat`` module removed +* The ``compat`` module has been removed. * The ``dicomdir`` module and ``DicomDir`` class have been removed and reading a DICOMDIR dataset now returns a normal :class:`~pydicom.dataset.FileDataset` instance. For handling DICOM File-sets and DICOMDIR datasets use the @@ -91,30 +93,30 @@ Removals :attr:`~pydicom.uid.UncompressedTransferSyntaxes` * ``PILSupportedCompressedPixelTransferSyntaxes`` * The ``PersonNameUnicode`` class has been removed, use - :class:`~pydicom.valuerep.PersonName` instead + :class:`~pydicom.valuerep.PersonName` instead. * The ``DataElement.description`` attribute has been removed, use - :attr:`DataElement.name` instead + :attr:`DataElement.name` instead. * The ``pixel_data_handlers.rle_handler.rle_encode_frame`` function has been - removed, use :meth:`~pydicom.dataset.Dataset.compress` or + removed, use :meth:`Dataset.compress()` or :attr:`~pydicom.pixels.encoders.base.RLELosslessEncoder` instead. -* Removed the ``_storage_sopclass_uids`` module, import UIDs from the - :doc:`uid` module instead +* The ``_storage_sopclass_uids`` module has been removed, import UIDs from the + :mod:`~pydicom.uid` module instead. * The following properties have been removed: * ``Dataset.parent`` and ``Dataset.parent_seq`` * ``Sequence.parent`` and ``Sequence.parent_dataset`` * ``DataElement.parent`` * The ``overlay_data_handlers`` module has been removed, use the :mod:`~pydicom.overlays` - module instead -* ``config.overlay_data_handlers`` has been removed -* Removed ``Dataset.fix_meta_info()``, encoding state now follows the transfer syntax - instead of the other way around + module instead. +* ``config.overlay_data_handlers`` has been removed. +* ``Dataset.fix_meta_info()`` has been removed as encoding state now follows the + transfer syntax instead of the other way around. Enhancements ------------ * Added details of missing required tag information when adding a dataset to a - File-set (:issue:`1752`) + File-set (:issue:`1752`). * The following UID constants have been added: * :attr:`~pydicom.uid.MPEG2MPMLF` @@ -132,27 +134,27 @@ Enhancements * :attr:`~pydicom.uid.SMPTEST211020UncompressedProgressiveActiveVideo` * :attr:`~pydicom.uid.SMPTEST211020UncompressedInterlacedActiveVideo` * :attr:`~pydicom.uid.SMPTEST211030PCMDigitalAudio` -* Added convenience method :meth:`~pydicom.dataset.Dataset.add_new_private` to add a private tag +* Added convenience method :meth:`~pydicom.dataset.Dataset.add_new_private` to add a private tag. * Added the :ref:`examples` module to make it easier and less - confusing for users to work with the example datasets used by the documentation + confusing for users to work with the example datasets used by the documentation. * Added the ability to set the corresponding dataset encoding for private transfer syntaxes to :class:`~pydicom.uid.UID` via the :meth:`~pydicom.uid.UID.set_private_encoding` - method + method. * Added the ability to register private transfer syntaxes with :func:`~pydicom.uid.register_transfer_syntax` so they can be used when reading - datasets with :func:`~pydicom.filereader.dcmread` -* Warning messages are also sent to the pydicom logger (:issue:`1529`) + datasets with :func:`~pydicom.filereader.dcmread`. +* Warning messages are also sent to the pydicom logger (:issue:`1529`). * Added the following to the :mod:`~pydicom.encaps` module: - * :func:`~pydicom.encaps.parse_basic_offsets` for parsing the Basic Offset Table + * :func:`~pydicom.encaps.parse_basic_offsets` for parsing the Basic Offset Table. * :func:`~pydicom.encaps.parse_fragments` for determining the number of encapsulated - fragments and their byte offsets - * :func:`~pydicom.encaps.generate_fragments` for yielding encapsulated fragments + fragments and their byte offsets. + * :func:`~pydicom.encaps.generate_fragments` for yielding encapsulated fragments. * :func:`~pydicom.encaps.generate_fragmented_frames` for yielding encapsulated frame - fragments - * :func:`~pydicom.encaps.generate_frames` for yielding whole encapsulated frames + fragments. + * :func:`~pydicom.encaps.generate_frames` for yielding whole encapsulated frames. * :func:`~pydicom.encaps.get_frame` for returning the specific encapsulated frame at `index` - without necessarily having to read the preceding frames into memory + without necessarily having to read the preceding frames into memory. These new functions support reading encapsulated data from both :class:`bytes` or any Python object with ``read()``, ``seek()`` and ``tell()`` methods such @@ -161,32 +163,32 @@ Enhancements ` for determining frame boundaries. * Added the `keep_deferred` keyword argument to :meth:`Dataset.get_item() ` to allow accessing the file offset and - element length without having to read the element value. (:issue:`1873`) -* Added the :doc:`pixels` module and a new more flexible backend for + element length without having to read the element value. (:issue:`1873`). +* Added the :mod:`~pydicom.pixels` module and a new more flexible backend for decoding pixel data via :class:`~pydicom.pixels.decoders.base.Decoder` factory class instances. The new decoding backend adds support for the following: - * Returning a view over the original pixel data buffer (:issue:`746`) - * Retrieving specific frames (:issue:`1263`, :issue:`1243`) + * Returning a view over the original pixel data buffer (:issue:`746`). + * Retrieving specific frames (:issue:`1263`, :issue:`1243`). * Returning RGB pixel data by default for JPEG (:issue:`1781`, :issue:`1133` - and many others) + and many others). * Returning excess frames for JPEG when there is no Basic or Extended Offset - Table and the *Number of Frames* is incorrect (:issue:`1666`) + Table and the *Number of Frames* is incorrect (:issue:`1666`). * Returning the decoded pixel data as either a NumPy :class:`~numpy.ndarray` or - `buffer-like object `_ - * Iterating through either all or specific frames + `buffer-like object `_. + * Iterating through either all or specific frames. -* Added support for decoding HTJ2K transfer syntaxes (:issue:`1848`) +* Added support for decoding HTJ2K transfer syntaxes (:issue:`1848`). * Added two functions for returning pixel data as a NumPy :class:`~numpy.ndarray` from a path to a dataset while minimizing memory-usage: :func:`~pydicom.pixels.pixel_array` and :func:`~pydicom.pixels.iter_pixels`. * Added support for the following transfer syntaxes to :meth:`Dataset.compress() ` (:issue:`1997`): - * *JPEG-LS Lossless* - * *JPEG-LS Near Lossless* - * *JPEG 2000 Lossless* - * *JPEG 2000* + * *JPEG-LS Lossless* with :attr:`~pydicom.pixels.encoders.base.JPEGLSLosslessEncoder` + * *JPEG-LS Near Lossless* with :attr:`~pydicom.pixels.encoders.base.JPEGLSNearLosslessEncoder` + * *JPEG 2000 Lossless* with :attr:`~pydicom.pixels.encoders.base.JPEG2000LosslessEncoder` + * *JPEG 2000* with :attr:`~pydicom.pixels.encoders.base.JPEG2000Encoder` See the :doc:`JPEG-LS` and :doc:`JPEG 2000 ` encoding guides for more information. @@ -196,69 +198,100 @@ Fixes ----- * Fixed the GDCM and pylibjpeg handlers changing the *Pixel Representation* value to 0 when the J2K stream disagrees with the dataset and - :attr:`~pydicom.config.APPLY_J2K_CORRECTIONS` is ``True`` (:issue:`1689`) -* Fixed pydicom codify error when relative path did not exist -* Fixed the VR enum sometimes returning invalid values for Python 3.11+ (:issue:`1874`) -* Fixed pixel data handler for Pillow 10.1 raising an AttributeError (:issue:`1907`) + :attr:`~pydicom.config.APPLY_J2K_CORRECTIONS` is ``True`` (:issue:`1689`). +* Fixed pydicom codify error when relative path did not exist. +* Fixed the VR enum sometimes returning invalid values for Python 3.11+ (:issue:`1874`). +* Fixed pixel data handler for Pillow 10.1 raising an AttributeError (:issue:`1907`). * Fixed a possible security issue with :class:`~pydicom.fileset.FileInstance` instances being able to escape the temporary directory when being added to a - :class:`~pydicom.fileset.FileSet` (:issue:`1922`) + :class:`~pydicom.fileset.FileSet` (:issue:`1922`). * Fixed an ``AttributeError`` when running :py:func:`~copy.deepcopy` after - :meth:`Dataset.update` (:issue:`1816`) + :meth:`Dataset.update` (:issue:`1816`). * Fixed :func:`~pydicom.encaps.encapsulate_extended` not returning the correct - values for odd-length frames (:issue:`1968`) + values for odd-length frames (:issue:`1968`). * Fixed using the incorrect encoding when writing datasets converted between - explicit and implicit VR when only the *Transfer Syntax UID* was changed (:issue:`1943`) + explicit and implicit VR when only the *Transfer Syntax UID* was changed (:issue:`1943`). * Fixed the ``jpeg_ls``, ``pillow`` and ``rle`` pixel data handlers not working - correctly when a frame is spread across multiple fragments (:issue:`1774`) + correctly when a frame is spread across multiple fragments (:issue:`1774`). * Added mitigation for a rare case where clearing the pixel data value prior to updating it may sometimes result in :attr:`~pydicom.dataset.Dataset.pixel_array` - returning the previous array instead of creating a new one (:issue:`1983`) + returning the previous array instead of creating a new one (:issue:`1983`). * Fixed a ``KeyError`` when comparing codes with one of the codes having ``scheme_designator`` set to ``SRT`` but not being included in the ``SRT`` - to ``SCT`` code mapping (:issue:`1994`) + to ``SCT`` code mapping (:issue:`1994`). * Fixed JPEG-LS datasets with a *Pixel Representation* of 1 returning incorrect - image data when *Bits Stored* is less than *Bits Allocated* (:issue:`2009`) + image data when *Bits Stored* is less than *Bits Allocated* (:issue:`2009`). * Fixed decoding failures for JPEG-LS datasets with *Bits Allocated* of 16 and - *Bits Stored* <= 8 (:issue:`2010`) -* Fixed the VR not being set correctly with :func:`~pydicom.dataset.Dataset.compress` - (:issue:`2013`) + *Bits Stored* <= 8 (:issue:`2010`). +* Fixed the *Pixel Data* VR not being set correctly with :func:`Dataset.compress() + ` (:issue:`2013`). Deprecations ------------ * :attr:`Dataset.is_little_endian ` and - :attr:`Dataset.is_implicit_VR` will be removed in v4.0 + :attr:`Dataset.is_implicit_VR` will be removed in v4.0. * :attr:`Dataset.read_little_endian` and :attr:`Dataset.read_implicit_vr` will be removed in v4.0, - use :attr:`Dataset.original_encoding` instead + use :attr:`Dataset.original_encoding` instead. * :attr:`Dataset.read_encoding` will be removed in v4.0, - use :attr:`Dataset.original_character_set` instead + use :attr:`Dataset.original_character_set` instead. * The `write_like_original` optional argument to :meth:`Dataset.save_as` and :func:`~pydicom.filewriter.dcmwrite` will be removed in v4.0, use - `enforce_file_format` instead + `enforce_file_format` instead. * The following :mod:`~pydicom.encaps` module functions will be removed in v4.0: * :func:`~pydicom.encaps.get_frame_offsets`, use :func:`~pydicom.encaps.parse_basic_offsets` - instead + instead. * :func:`~pydicom.encaps.generate_pixel_data_fragment`, use :func:`~pydicom.encaps.generate_fragments` - instead + instead. * :func:`~pydicom.encaps.generate_pixel_data_frame`, use :func:`~pydicom.encaps.generate_fragmented_frames` - instead + instead. * :func:`~pydicom.encaps.generate_pixel_data`, use :func:`~pydicom.encaps.generate_frames` - instead + instead. * :func:`~pydicom.encaps.decode_data_sequence`, use :func:`~pydicom.encaps.generate_fragments` - instead + instead. * :func:`~pydicom.encaps.defragment_data`, use :func:`~pydicom.encaps.generate_frames` - instead + instead. * :func:`~pydicom.encaps.read_item`, use :func:`~pydicom.encaps.generate_fragments` - instead + instead. + +* The :mod:`pydicom.pixel_data_handlers` module will be removed in v4.0. All pixel + data processing will use the :mod:`pydicom.pixels` module instead starting + with v3.0. + + * The following functions from :mod:`pydicom.pixel_data_handlers.util` have been + moved to :mod:`pydicom.pixels.processing`: + + * :func:`~pydicom.pixels.processing.apply_color_lut` + * :func:`~pydicom.pixels.processing.apply_modality_lut` + * :func:`~pydicom.pixels.processing.apply_rescale` + * :func:`~pydicom.pixels.processing.apply_voi_lut` + * :func:`~pydicom.pixels.processing.apply_voi` + * :func:`~pydicom.pixels.processing.apply_windowing` + * :func:`~pydicom.pixels.processing.convert_color_space` + + * The following functions from :mod:`pydicom.pixel_data_handlers.util` have been + moved to :mod:`pydicom.pixels.utils`: + + * :func:`~pydicom.pixels.utils.expand_ybr422` + * :func:`~pydicom.pixels.utils.get_expected_length` + * :func:`~pydicom.pixels.utils.get_image_pixel_ids` + * :func:`~pydicom.pixels.utils.get_j2k_parameters` + * :func:`~pydicom.pixels.utils.get_nr_frames` + * :func:`~pydicom.pixels.utils.pack_bits` + * :func:`~pydicom.pixels.utils.pixel_dtype` + * :func:`~pydicom.pixels.utils.reshape_pixel_array` + * :func:`~pydicom.pixels.utils.unpack_bits` + + * :func:`~pydicom.pixel_data_handlers.util.dtype_corrected_for_endianness` will be + removed in v4.0. Pydicom Internals ----------------- -* Repository folder structure refactored -* Renamed top level ``source`` folder to ``util`` -* New CI tools - `dependabot`, and `pre-commit` using black and ruff -* Added a script to hotfix the documentation search function (:issue:`1965`) +* Repository folder structure refactored. +* Renamed top level ``source`` folder to ``util``. +* New CI tools - `dependabot`, and `pre-commit` using black and ruff. +* Added a script to hotfix the documentation search function (:issue:`1965`). diff --git a/src/pydicom/dataset.py b/src/pydicom/dataset.py index a2f8c0432a..8e9d4301ee 100644 --- a/src/pydicom/dataset.py +++ b/src/pydicom/dataset.py @@ -67,8 +67,8 @@ from pydicom.filebase import ReadableBuffer, WriteableBuffer from pydicom.fileutil import path_from_pathlike, PathType from pydicom.misc import warn_and_log -from pydicom.pixel_data_handlers.util import ( - convert_color_space, +from pydicom.pixels.processing import convert_color_space +from pydicom.pixels.utils import ( reshape_pixel_array, get_image_pixel_ids, get_nr_frames, diff --git a/src/pydicom/encoders/__init__.py b/src/pydicom/encoders/__init__.py index 9c2fe4d882..92eb27a960 100644 --- a/src/pydicom/encoders/__init__.py +++ b/src/pydicom/encoders/__init__.py @@ -3,10 +3,8 @@ from pydicom import config from pydicom.misc import warn_and_log -from pydicom.pixels import ( - get_encoder as _get_encoder, - RLELosslessEncoder as _rle_encoder, -) +from pydicom.pixels import get_encoder as _get_encoder +from pydicom.pixels.encoders import RLELosslessEncoder as _rle_encoder _DEPRECATED = { diff --git a/src/pydicom/overlays/numpy_handler.py b/src/pydicom/overlays/numpy_handler.py index 654cfce9bf..058a8861be 100644 --- a/src/pydicom/overlays/numpy_handler.py +++ b/src/pydicom/overlays/numpy_handler.py @@ -38,7 +38,7 @@ HAVE_NP = False from pydicom.misc import warn_and_log -from pydicom.pixel_data_handlers import unpack_bits +from pydicom.pixels.utils import unpack_bits if TYPE_CHECKING: # pragma: no cover from pydicom.dataset import Dataset diff --git a/src/pydicom/pixel_data_handlers/__init__.py b/src/pydicom/pixel_data_handlers/__init__.py index cf583f8bb7..f1d1be55e6 100644 --- a/src/pydicom/pixel_data_handlers/__init__.py +++ b/src/pydicom/pixel_data_handlers/__init__.py @@ -1,13 +1,54 @@ -from pydicom.pixel_data_handlers.util import ( - apply_color_lut, - apply_modality_lut, - apply_voi_lut, - convert_color_space, - apply_voi, - apply_windowing, - pack_bits, - unpack_bits, - expand_ybr422, +# TODO: remove module in v4.0 +from typing import Any + +from pydicom import config +from pydicom.misc import warn_and_log +from pydicom.pixels.processing import ( + apply_color_lut as _apply_color_lut, + apply_modality_lut as _apply_modality_lut, + apply_voi_lut as _apply_voi_lut, + apply_voi as _apply_voi, + apply_windowing as _apply_windowing, + convert_color_space as _convert_color_space, +) +from pydicom.pixels.utils import ( + expand_ybr422 as _expand_ybr422, + pack_bits as _pack_bits, + unpack_bits as _unpack_bits, ) -apply_rescale = apply_modality_lut + +_DEPRECATED = { + "apply_color_lut": _apply_color_lut, + "apply_modality_lut": _apply_modality_lut, + "apply_rescale": _apply_modality_lut, + "apply_voi_lut": _apply_voi_lut, + "apply_voi": _apply_voi, + "apply_windowing": _apply_windowing, + "convert_color_space": _convert_color_space, + "pack_bits": _pack_bits, + "unpack_bits": _unpack_bits, +} +_DEPRECATED_UTIL = { + "expand_ybr422": _expand_ybr422, +} + + +def __getattr__(name: str) -> Any: + if name in _DEPRECATED and not config._use_future: + msg = ( + "The 'pydicom.pixel_data_handlers' module will be removed " + f"in v4.0, please use 'from pydicom.pixels import {name}' instead" + ) + warn_and_log(msg, DeprecationWarning) + return _DEPRECATED[name] + + if name in _DEPRECATED_UTIL and not config._use_future: + msg = ( + "The 'pydicom.pixel_data_handlers' module will be removed " + f"in v4.0, please use 'from pydicom.pixels.utils import {name}' instead" + ) + warn_and_log(msg, DeprecationWarning) + return _DEPRECATED_UTIL[name] + + raise AttributeError(f"module {__name__!r} has no attribute {name!r}") diff --git a/src/pydicom/pixel_data_handlers/gdcm_handler.py b/src/pydicom/pixel_data_handlers/gdcm_handler.py index 9e6178137f..8616da7149 100644 --- a/src/pydicom/pixel_data_handlers/gdcm_handler.py +++ b/src/pydicom/pixel_data_handlers/gdcm_handler.py @@ -33,7 +33,7 @@ from pydicom.encaps import generate_frames, generate_fragmented_frames import pydicom.uid from pydicom.uid import UID, JPEG2000, JPEG2000Lossless -from pydicom.pixel_data_handlers.util import ( +from pydicom.pixels.utils import ( get_expected_length, pixel_dtype, get_j2k_parameters, diff --git a/src/pydicom/pixel_data_handlers/jpeg_ls_handler.py b/src/pydicom/pixel_data_handlers/jpeg_ls_handler.py index 1bfb8fb23a..888fc11a4d 100644 --- a/src/pydicom/pixel_data_handlers/jpeg_ls_handler.py +++ b/src/pydicom/pixel_data_handlers/jpeg_ls_handler.py @@ -20,7 +20,7 @@ HAVE_JPEGLS = False from pydicom.encaps import generate_frames -from pydicom.pixel_data_handlers.util import pixel_dtype, get_nr_frames +from pydicom.pixels.utils import pixel_dtype, get_nr_frames import pydicom.uid if TYPE_CHECKING: # pragma: no cover diff --git a/src/pydicom/pixel_data_handlers/numpy_handler.py b/src/pydicom/pixel_data_handlers/numpy_handler.py index e9628bb805..e55927bbc2 100644 --- a/src/pydicom/pixel_data_handlers/numpy_handler.py +++ b/src/pydicom/pixel_data_handlers/numpy_handler.py @@ -60,7 +60,7 @@ HAVE_NP = False from pydicom.misc import warn_and_log -from pydicom.pixel_data_handlers.util import ( +from pydicom.pixels.utils import ( pixel_dtype, get_expected_length, unpack_bits, diff --git a/src/pydicom/pixel_data_handlers/pillow_handler.py b/src/pydicom/pixel_data_handlers/pillow_handler.py index 82123b7eac..121f2e0b21 100644 --- a/src/pydicom/pixel_data_handlers/pillow_handler.py +++ b/src/pydicom/pixel_data_handlers/pillow_handler.py @@ -31,7 +31,7 @@ from pydicom import config from pydicom.encaps import generate_frames from pydicom.misc import warn_and_log -from pydicom.pixel_data_handlers.util import ( +from pydicom.pixels.utils import ( pixel_dtype, get_j2k_parameters, get_nr_frames, diff --git a/src/pydicom/pixel_data_handlers/pylibjpeg_handler.py b/src/pydicom/pixel_data_handlers/pylibjpeg_handler.py index b09b5d05eb..ae14591334 100644 --- a/src/pydicom/pixel_data_handlers/pylibjpeg_handler.py +++ b/src/pydicom/pixel_data_handlers/pylibjpeg_handler.py @@ -99,7 +99,7 @@ from pydicom import config from pydicom.encaps import generate_frames as frame_generator -from pydicom.pixel_data_handlers.util import ( +from pydicom.pixels.utils import ( pixel_dtype, get_expected_length, reshape_pixel_array, diff --git a/src/pydicom/pixel_data_handlers/rle_handler.py b/src/pydicom/pixel_data_handlers/rle_handler.py index acd12cca3e..56eca7a1ac 100644 --- a/src/pydicom/pixel_data_handlers/rle_handler.py +++ b/src/pydicom/pixel_data_handlers/rle_handler.py @@ -48,7 +48,7 @@ from pydicom.encaps import generate_frames from pydicom.misc import warn_and_log -from pydicom.pixel_data_handlers.util import pixel_dtype, get_nr_frames +from pydicom.pixels.utils import pixel_dtype, get_nr_frames import pydicom.uid if TYPE_CHECKING: # pragma: no cover diff --git a/src/pydicom/pixel_data_handlers/util.py b/src/pydicom/pixel_data_handlers/util.py index 227a9fd6df..f9684375ae 100644 --- a/src/pydicom/pixel_data_handlers/util.py +++ b/src/pydicom/pixel_data_handlers/util.py @@ -1,796 +1,50 @@ # Copyright 2008-2018 pydicom authors. See LICENSE file for details. +# TODO: remove module in v4.0 """Utility functions used in the pixel data handlers.""" -from struct import unpack, unpack_from from sys import byteorder -from typing import Optional, TYPE_CHECKING, cast -from collections.abc import Iterable, ByteString +from typing import Any try: import numpy as np - - HAVE_NP = True except ImportError: - HAVE_NP = False + pass -from pydicom.data import get_palette_files +from pydicom import config from pydicom.misc import warn_and_log -from pydicom.uid import UID -from pydicom.valuerep import VR - -if TYPE_CHECKING: # pragma: no cover - from pydicom.dataset import Dataset - - -# Lookup table for unpacking bit-packed data -_UNPACK_LUT: dict[int, bytes] = { - k: bytes(int(s) for s in reversed(f"{k:08b}")) for k in range(256) -} - - -def apply_color_lut( - arr: "np.ndarray", ds: Optional["Dataset"] = None, palette: str | UID | None = None -) -> "np.ndarray": - """Apply a color palette lookup table to `arr`. - - If (0028,1201-1203) *Palette Color Lookup Table Data* are missing - then (0028,1221-1223) *Segmented Palette Color Lookup Table Data* must be - present and vice versa. The presence of (0028,1204) *Alpha Palette Color - Lookup Table Data* or (0028,1224) *Alpha Segmented Palette Color Lookup - Table Data* is optional. - - Use of this function with the :dcm:`Enhanced Palette Color Lookup Table - Module` or :dcm:`Supplemental Palette Color LUT - Module` is not currently supported. - - Parameters - ---------- - arr : numpy.ndarray - The pixel data to apply the color palette to. - ds : dataset.Dataset, optional - Required if `palette` is not supplied. A - :class:`~pydicom.dataset.Dataset` containing a suitable - :dcm:`Image Pixel` or - :dcm:`Palette Color Lookup Table` Module. - palette : str or uid.UID, optional - Required if `ds` is not supplied. The name of one of the - :dcm:`well-known` color palettes defined by the - DICOM Standard. One of: ``'HOT_IRON'``, ``'PET'``, - ``'HOT_METAL_BLUE'``, ``'PET_20_STEP'``, ``'SPRING'``, ``'SUMMER'``, - ``'FALL'``, ``'WINTER'`` or the corresponding well-known (0008,0018) - *SOP Instance UID*. - - Returns - ------- - numpy.ndarray - The RGB or RGBA pixel data as an array of ``np.uint8`` or ``np.uint16`` - values, depending on the 3rd value of (0028,1201) *Red Palette Color - Lookup Table Descriptor*. - - References - ---------- - - * :dcm:`Image Pixel Module` - * :dcm:`Supplemental Palette Color LUT Module` - * :dcm:`Enhanced Palette Color LUT Module` - * :dcm:`Palette Colour LUT Module` - * :dcm:`Supplemental Palette Color LUTs - ` - """ - # Note: input value (IV) is the stored pixel value in `arr` - # LUTs[IV] -> [R, G, B] values at the IV pixel location in `arr` - if not ds and not palette: - raise ValueError("Either 'ds' or 'palette' is required") - - if palette: - # Well-known palettes are all 8-bits per entry - datasets = { - "1.2.840.10008.1.5.1": "hotiron.dcm", - "1.2.840.10008.1.5.2": "pet.dcm", - "1.2.840.10008.1.5.3": "hotmetalblue.dcm", - "1.2.840.10008.1.5.4": "pet20step.dcm", - "1.2.840.10008.1.5.5": "spring.dcm", - "1.2.840.10008.1.5.6": "summer.dcm", - "1.2.840.10008.1.5.7": "fall.dcm", - "1.2.840.10008.1.5.8": "winter.dcm", - } - if not UID(palette).is_valid: - try: - uids = { - "HOT_IRON": "1.2.840.10008.1.5.1", - "PET": "1.2.840.10008.1.5.2", - "HOT_METAL_BLUE": "1.2.840.10008.1.5.3", - "PET_20_STEP": "1.2.840.10008.1.5.4", - "SPRING": "1.2.840.10008.1.5.5", - "SUMMER": "1.2.840.10008.1.5.6", - "FALL": "1.2.840.10008.1.5.8", - "WINTER": "1.2.840.10008.1.5.7", - } - palette = uids[palette] - except KeyError: - raise ValueError(f"Unknown palette '{palette}'") - - try: - from pydicom import dcmread - - fname = datasets[palette] - ds = dcmread(get_palette_files(fname)[0]) - except KeyError: - raise ValueError(f"Unknown palette '{palette}'") - - ds = cast("Dataset", ds) - - # C.8.16.2.1.1.1: Supplemental Palette Color LUT - # TODO: Requires greyscale visualisation pipeline - if getattr(ds, "PixelPresentation", None) in ["MIXED", "COLOR"]: - raise ValueError( - "Use of this function with the Supplemental Palette Color Lookup " - "Table Module is not currently supported" - ) - - if "RedPaletteColorLookupTableDescriptor" not in ds: - raise ValueError("No suitable Palette Color Lookup Table Module found") - - # All channels are supposed to be identical - lut_desc = cast(list[int], ds.RedPaletteColorLookupTableDescriptor) - # A value of 0 = 2^16 entries - nr_entries = lut_desc[0] or 2**16 - - # May be negative if Pixel Representation is 1 - first_map = lut_desc[1] - # Actual bit depth may be larger (8 bit entries in 16 bits allocated) - nominal_depth = lut_desc[2] - dtype = np.dtype(f"uint{nominal_depth:.0f}") - - luts = [] - if "RedPaletteColorLookupTableData" in ds: - # LUT Data is described by PS3.3, C.7.6.3.1.6 - r_lut = cast(bytes, ds.RedPaletteColorLookupTableData) - g_lut = cast(bytes, ds.GreenPaletteColorLookupTableData) - b_lut = cast(bytes, ds.BluePaletteColorLookupTableData) - a_lut = cast( - bytes | None, getattr(ds, "AlphaPaletteColorLookupTableData", None) - ) - - actual_depth = len(r_lut) / nr_entries * 8 - dtype = np.dtype(f"uint{actual_depth:.0f}") - - for lut_bytes in [ii for ii in [r_lut, g_lut, b_lut, a_lut] if ii]: - luts.append(np.frombuffer(lut_bytes, dtype=dtype)) - elif "SegmentedRedPaletteColorLookupTableData" in ds: - # Segmented LUT Data is described by PS3.3, C.7.9.2 - r_lut = cast(bytes, ds.SegmentedRedPaletteColorLookupTableData) - g_lut = cast(bytes, ds.SegmentedGreenPaletteColorLookupTableData) - b_lut = cast(bytes, ds.SegmentedBluePaletteColorLookupTableData) - a_lut = cast( - bytes | None, getattr(ds, "SegmentedAlphaPaletteColorLookupTableData", None) - ) - - if hasattr(ds, "file_meta"): - is_little_endian = ds.file_meta._tsyntax_encoding[1] - else: - is_little_endian = ds.original_encoding[1] - - if is_little_endian is None: - raise AttributeError( - "Unable to determine the endianness of the dataset, please set " - "an appropriate Transfer Syntax UID in " - f"'{type(ds).__name__}.file_meta'" - ) - - endianness = "><"[is_little_endian] - byte_depth = nominal_depth // 8 - fmt = "B" if byte_depth == 1 else "H" - actual_depth = nominal_depth - - for seg in [ii for ii in [r_lut, g_lut, b_lut, a_lut] if ii]: - len_seg = len(seg) // byte_depth - s_fmt = f"{endianness}{len_seg}{fmt}" - lut_ints = _expand_segmented_lut(unpack(s_fmt, seg), s_fmt) - luts.append(np.asarray(lut_ints, dtype=dtype)) - else: - raise ValueError("No suitable Palette Color Lookup Table Module found") - - if actual_depth not in [8, 16]: - raise ValueError( - f"The bit depth of the LUT data '{actual_depth:.1f}' " - "is invalid (only 8 or 16 bits per entry allowed)" - ) - - lut_lengths = [len(ii) for ii in luts] - if not all(ii == lut_lengths[0] for ii in lut_lengths[1:]): - raise ValueError("LUT data must be the same length") - - # IVs < `first_map` get set to first LUT entry (i.e. index 0) - clipped_iv = np.zeros(arr.shape, dtype=dtype) - # IVs >= `first_map` are mapped by the Palette Color LUTs - # `first_map` may be negative, positive or 0 - mapped_pixels = arr >= first_map - clipped_iv[mapped_pixels] = arr[mapped_pixels] - first_map - # IVs > number of entries get set to last entry - np.clip(clipped_iv, 0, nr_entries - 1, out=clipped_iv) - - # Output array may be RGB or RGBA - out = np.empty(list(arr.shape) + [len(luts)], dtype=dtype) - for ii, lut in enumerate(luts): - out[..., ii] = lut[clipped_iv] - - return out - - -def apply_modality_lut(arr: "np.ndarray", ds: "Dataset") -> "np.ndarray": - """Apply a modality lookup table or rescale operation to `arr`. - - Parameters - ---------- - arr : numpy.ndarray - The :class:`~numpy.ndarray` to apply the modality LUT or rescale - operation to. - ds : dataset.Dataset - A dataset containing a :dcm:`Modality LUT Module - `. - - Returns - ------- - numpy.ndarray - An array with applied modality LUT or rescale operation. If - (0028,3000) *Modality LUT Sequence* is present then returns an array - of ``np.uint8`` or ``np.uint16``, depending on the 3rd value of - (0028,3002) *LUT Descriptor*. If (0028,1052) *Rescale Intercept* and - (0028,1053) *Rescale Slope* are present then returns an array of - ``np.float64``. If neither are present then `arr` will be returned - unchanged. - - Notes - ----- - When *Rescale Slope* and *Rescale Intercept* are used, the output range - is from (min. pixel value * Rescale Slope + Rescale Intercept) to - (max. pixel value * Rescale Slope + Rescale Intercept), where min. and - max. pixel value are determined from (0028,0101) *Bits Stored* and - (0028,0103) *Pixel Representation*. - - References - ---------- - * DICOM Standard, Part 3, :dcm:`Annex C.11.1 - ` - * DICOM Standard, Part 4, :dcm:`Annex N.2.1.1 - ` - """ - if ds.get("ModalityLUTSequence"): - item = cast(list["Dataset"], ds.ModalityLUTSequence)[0] - nr_entries = cast(list[int], item.LUTDescriptor)[0] or 2**16 - first_map = cast(list[int], item.LUTDescriptor)[1] - nominal_depth = cast(list[int], item.LUTDescriptor)[2] - - dtype = f"uint{nominal_depth}" - - # Ambiguous VR, US or OW - unc_data: Iterable[int] - if item["LUTData"].VR == VR.OW: - if hasattr(ds, "file_meta"): - is_little_endian = ds.file_meta._tsyntax_encoding[1] - else: - is_little_endian = ds.original_encoding[1] - - if is_little_endian is None: - raise AttributeError( - "Unable to determine the endianness of the dataset, please set " - "an appropriate Transfer Syntax UID in " - f"'{type(ds).__name__}.file_meta'" - ) - - endianness = "><"[is_little_endian] - unpack_fmt = f"{endianness}{nr_entries}H" - unc_data = unpack(unpack_fmt, cast(bytes, item.LUTData)) - else: - unc_data = cast(list[int], item.LUTData) - - lut_data: np.ndarray = np.asarray(unc_data, dtype=dtype) - - # IVs < `first_map` get set to first LUT entry (i.e. index 0) - clipped_iv = np.zeros(arr.shape, dtype=arr.dtype) - # IVs >= `first_map` are mapped by the Modality LUT - # `first_map` may be negative, positive or 0 - mapped_pixels = arr >= first_map - clipped_iv[mapped_pixels] = arr[mapped_pixels] - first_map - # IVs > number of entries get set to last entry - np.clip(clipped_iv, 0, nr_entries - 1, out=clipped_iv) - - return lut_data[clipped_iv] - elif "RescaleSlope" in ds and "RescaleIntercept" in ds: - arr = arr.astype(np.float64) * cast(float, ds.RescaleSlope) - arr += cast(float, ds.RescaleIntercept) - - return arr - - -def apply_voi_lut( - arr: "np.ndarray", ds: "Dataset", index: int = 0, prefer_lut: bool = True -) -> "np.ndarray": - """Apply a VOI lookup table or windowing operation to `arr`. - - .. versionchanged:: 2.1 - - Added the `prefer_lut` keyword parameter - - Parameters - ---------- - arr : numpy.ndarray - The :class:`~numpy.ndarray` to apply the VOI LUT or windowing operation - to. - ds : dataset.Dataset - A dataset containing a :dcm:`VOI LUT Module`. - If (0028,3010) *VOI LUT Sequence* is present then returns an array - of ``np.uint8`` or ``np.uint16``, depending on the 3rd value of - (0028,3002) *LUT Descriptor*. If (0028,1050) *Window Center* and - (0028,1051) *Window Width* are present then returns an array of - ``np.float64``. If neither are present then `arr` will be returned - unchanged. - index : int, optional - When the VOI LUT Module contains multiple alternative views, this is - the index of the view to return (default ``0``). - prefer_lut : bool - When the VOI LUT Module contains both *Window Width*/*Window Center* - and *VOI LUT Sequence*, if ``True`` (default) then apply the VOI LUT, - otherwise apply the windowing operation. - - Returns - ------- - numpy.ndarray - An array with applied VOI LUT or windowing operation. - - Notes - ----- - When the dataset requires a modality LUT or rescale operation as part of - the Modality LUT module then that must be applied before any windowing - operation. - - See Also - -------- - :func:`~pydicom.pixel_data_handlers.util.apply_modality_lut` - :func:`~pydicom.pixel_data_handlers.util.apply_voi` - :func:`~pydicom.pixel_data_handlers.util.apply_windowing` - - References - ---------- - * DICOM Standard, Part 3, :dcm:`Annex C.11.2 - ` - * DICOM Standard, Part 3, :dcm:`Annex C.8.11.3.1.5 - ` - * DICOM Standard, Part 4, :dcm:`Annex N.2.1.1 - ` - """ - valid_voi = False - if ds.get("VOILUTSequence"): - ds.VOILUTSequence = cast(list["Dataset"], ds.VOILUTSequence) - valid_voi = None not in [ - ds.VOILUTSequence[0].get("LUTDescriptor", None), - ds.VOILUTSequence[0].get("LUTData", None), - ] - valid_windowing = None not in [ - ds.get("WindowCenter", None), - ds.get("WindowWidth", None), - ] - - if valid_voi and valid_windowing: - if prefer_lut: - return apply_voi(arr, ds, index) - - return apply_windowing(arr, ds, index) - - if valid_voi: - return apply_voi(arr, ds, index) - - if valid_windowing: - return apply_windowing(arr, ds, index) - - return arr - - -def apply_voi(arr: "np.ndarray", ds: "Dataset", index: int = 0) -> "np.ndarray": - """Apply a VOI lookup table to `arr`. - - .. versionadded:: 2.1 - - Parameters - ---------- - arr : numpy.ndarray - The :class:`~numpy.ndarray` to apply the VOI LUT to. - ds : dataset.Dataset - A dataset containing a :dcm:`VOI LUT Module`. - If (0028,3010) *VOI LUT Sequence* is present then returns an array - of ``np.uint8`` or ``np.uint16``, depending on the 3rd value of - (0028,3002) *LUT Descriptor*, otherwise `arr` will be returned - unchanged. - index : int, optional - When the VOI LUT Module contains multiple alternative views, this is - the index of the view to return (default ``0``). - - Returns - ------- - numpy.ndarray - An array with applied VOI LUT. - - See Also - -------- - :func:`~pydicom.pixel_data_handlers.util.apply_modality_lut` - :func:`~pydicom.pixel_data_handlers.util.apply_windowing` - - References - ---------- - * DICOM Standard, Part 3, :dcm:`Annex C.11.2 - ` - * DICOM Standard, Part 3, :dcm:`Annex C.8.11.3.1.5 - ` - * DICOM Standard, Part 4, :dcm:`Annex N.2.1.1 - ` - """ - if not ds.get("VOILUTSequence"): - return arr - - if not np.issubdtype(arr.dtype, np.integer): - warn_and_log( - "Applying a VOI LUT on a float input array may give incorrect results" - ) - - # VOI LUT Sequence contains one or more items - item = cast(list["Dataset"], ds.VOILUTSequence)[index] - lut_descriptor = cast(list[int], item.LUTDescriptor) - nr_entries = lut_descriptor[0] or 2**16 - first_map = lut_descriptor[1] - - # PS3.3 C.8.11.3.1.5: may be 8, 10-16 - nominal_depth = lut_descriptor[2] - if nominal_depth in list(range(10, 17)): - dtype = "uint16" - elif nominal_depth == 8: - dtype = "uint8" - else: - raise NotImplementedError( - f"'{nominal_depth}' bits per LUT entry is not supported" - ) - - # Ambiguous VR, US or OW - unc_data: Iterable[int] - if item["LUTData"].VR == VR.OW: - if hasattr(ds, "file_meta"): - is_little_endian = ds.file_meta._tsyntax_encoding[1] - else: - is_little_endian = ds.original_encoding[1] - - if is_little_endian is None: - raise AttributeError( - "Unable to determine the endianness of the dataset, please set " - "an appropriate Transfer Syntax UID in " - f"'{type(ds).__name__}.file_meta'" - ) - - unpack_fmt = f"{'><'[is_little_endian]}{nr_entries}H" - unc_data = unpack_from(unpack_fmt, cast(bytes, item.LUTData)) - else: - unc_data = cast(list[int], item.LUTData) - - lut_data: np.ndarray = np.asarray(unc_data, dtype=dtype) - - # IVs < `first_map` get set to first LUT entry (i.e. index 0) - clipped_iv = np.zeros(arr.shape, dtype=dtype) - # IVs >= `first_map` are mapped by the VOI LUT - # `first_map` may be negative, positive or 0 - mapped_pixels = arr >= first_map - clipped_iv[mapped_pixels] = arr[mapped_pixels] - first_map - # IVs > number of entries get set to last entry - np.clip(clipped_iv, 0, nr_entries - 1, out=clipped_iv) - - return cast("np.ndarray", lut_data[clipped_iv]) - - -def apply_windowing(arr: "np.ndarray", ds: "Dataset", index: int = 0) -> "np.ndarray": - """Apply a windowing operation to `arr`. - - .. versionadded:: 2.1 - - Parameters - ---------- - arr : numpy.ndarray - The :class:`~numpy.ndarray` to apply the windowing operation to. - ds : dataset.Dataset - A dataset containing a :dcm:`VOI LUT Module`. - If (0028,1050) *Window Center* and (0028,1051) *Window Width* are - present then returns an array of ``np.float64``, otherwise `arr` will - be returned unchanged. - index : int, optional - When the VOI LUT Module contains multiple alternative views, this is - the index of the view to return (default ``0``). - - Returns - ------- - numpy.ndarray - An array with applied windowing operation. - - Notes - ----- - When the dataset requires a modality LUT or rescale operation as part of - the Modality LUT module then that must be applied before any windowing - operation. - - See Also - -------- - :func:`~pydicom.pixel_data_handlers.util.apply_modality_lut` - :func:`~pydicom.pixel_data_handlers.util.apply_voi` - - References - ---------- - * DICOM Standard, Part 3, :dcm:`Annex C.11.2 - ` - * DICOM Standard, Part 3, :dcm:`Annex C.8.11.3.1.5 - ` - * DICOM Standard, Part 4, :dcm:`Annex N.2.1.1 - ` - """ - if "WindowWidth" not in ds and "WindowCenter" not in ds: - return arr - - if ds.PhotometricInterpretation not in ["MONOCHROME1", "MONOCHROME2"]: - raise ValueError( - "When performing a windowing operation only 'MONOCHROME1' and " - "'MONOCHROME2' are allowed for (0028,0004) Photometric " - "Interpretation" - ) - - # May be LINEAR (default), LINEAR_EXACT, SIGMOID or not present, VM 1 - voi_func = cast(str, getattr(ds, "VOILUTFunction", "LINEAR")).upper() - # VR DS, VM 1-n - elem = ds["WindowCenter"] - center = cast(list[float], elem.value)[index] if elem.VM > 1 else elem.value - center = cast(float, center) - elem = ds["WindowWidth"] - width = cast(list[float], elem.value)[index] if elem.VM > 1 else elem.value - width = cast(float, width) - - # The output range depends on whether or not a modality LUT or rescale - # operation has been applied - ds.BitsStored = cast(int, ds.BitsStored) - y_min: float - y_max: float - if ds.get("ModalityLUTSequence"): - # Unsigned - see PS3.3 C.11.1.1.1 - y_min = 0 - item = cast(list["Dataset"], ds.ModalityLUTSequence)[0] - bit_depth = cast(list[int], item.LUTDescriptor)[2] - y_max = 2**bit_depth - 1 - elif ds.PixelRepresentation == 0: - # Unsigned - y_min = 0 - y_max = 2**ds.BitsStored - 1 - else: - # Signed - y_min = -(2 ** (ds.BitsStored - 1)) - y_max = 2 ** (ds.BitsStored - 1) - 1 - - slope = ds.get("RescaleSlope", None) - intercept = ds.get("RescaleIntercept", None) - if slope is not None and intercept is not None: - ds.RescaleSlope = cast(float, ds.RescaleSlope) - ds.RescaleIntercept = cast(float, ds.RescaleIntercept) - # Otherwise its the actual data range - y_min = y_min * ds.RescaleSlope + ds.RescaleIntercept - y_max = y_max * ds.RescaleSlope + ds.RescaleIntercept - - y_range = y_max - y_min - arr = arr.astype("float64") - - if voi_func in ["LINEAR", "LINEAR_EXACT"]: - # PS3.3 C.11.2.1.2.1 and C.11.2.1.3.2 - if voi_func == "LINEAR": - if width < 1: - raise ValueError( - "The (0028,1051) Window Width must be greater than or " - "equal to 1 for a 'LINEAR' windowing operation" - ) - center -= 0.5 - width -= 1 - elif width <= 0: - raise ValueError( - "The (0028,1051) Window Width must be greater than 0 " - "for a 'LINEAR_EXACT' windowing operation" - ) - - below = arr <= (center - width / 2) - above = arr > (center + width / 2) - between = np.logical_and(~below, ~above) - - arr[below] = y_min - arr[above] = y_max - if between.any(): - arr[between] = ((arr[between] - center) / width + 0.5) * y_range + y_min - elif voi_func == "SIGMOID": - # PS3.3 C.11.2.1.3.1 - if width <= 0: - raise ValueError( - "The (0028,1051) Window Width must be greater than 0 " - "for a 'SIGMOID' windowing operation" - ) - - arr = y_range / (1 + np.exp(-4 * (arr - center) / width)) + y_min - else: - raise ValueError(f"Unsupported (0028,1056) VOI LUT Function value '{voi_func}'") - - return arr - - -def convert_color_space( - arr: "np.ndarray", current: str, desired: str, per_frame: bool = False -) -> "np.ndarray": - """Convert the image(s) in `arr` from one color space to another. - - .. versionchanged:: 2.2 - - Added `per_frame` keyword parameter. - - Parameters - ---------- - arr : numpy.ndarray - The image(s) as a :class:`numpy.ndarray` with - :attr:`~numpy.ndarray.shape` (frames, rows, columns, 3) - or (rows, columns, 3). - current : str - The current color space, should be a valid value for (0028,0004) - *Photometric Interpretation*. One of ``'RGB'``, ``'YBR_FULL'``, - ``'YBR_FULL_422'``. - desired : str - The desired color space, should be a valid value for (0028,0004) - *Photometric Interpretation*. One of ``'RGB'``, ``'YBR_FULL'``, - ``'YBR_FULL_422'``. - per_frame : bool, optional - If ``True`` and the input array contains multiple frames then process - each frame individually to reduce memory usage. Default ``False``. - - Returns - ------- - numpy.ndarray - The image(s) converted to the desired color space. - - References - ---------- - - * DICOM Standard, Part 3, - :dcm:`Annex C.7.6.3.1.2` - * ISO/IEC 10918-5:2012 (`ITU T.871 - `_), - Section 7 - """ - - def _no_change(arr: "np.ndarray") -> "np.ndarray": - return arr - - _converters = { - "YBR_FULL_422": { - "YBR_FULL_422": _no_change, - "YBR_FULL": _no_change, - "RGB": _convert_YBR_FULL_to_RGB, - }, - "YBR_FULL": { - "YBR_FULL": _no_change, - "YBR_FULL_422": _no_change, - "RGB": _convert_YBR_FULL_to_RGB, - }, - "RGB": { - "RGB": _no_change, - "YBR_FULL": _convert_RGB_to_YBR_FULL, - "YBR_FULL_422": _convert_RGB_to_YBR_FULL, - }, - } - try: - converter = _converters[current][desired] - except KeyError: - raise NotImplementedError( - f"Conversion from {current} to {desired} is not supported." - ) - - if len(arr.shape) == 4 and per_frame: - for idx, frame in enumerate(arr): - arr[idx] = converter(frame) - - return arr - - return converter(arr) - - -def _convert_RGB_to_YBR_FULL(arr: "np.ndarray") -> "np.ndarray": - """Return an ndarray converted from RGB to YBR_FULL color space. - - Parameters - ---------- - arr : numpy.ndarray - An ndarray of an 8-bit per channel images in RGB color space. - - Returns - ------- - numpy.ndarray - The array in YBR_FULL color space. - - References - ---------- - - * DICOM Standard, Part 3, - :dcm:`Annex C.7.6.3.1.2` - * ISO/IEC 10918-5:2012 (`ITU T.871 - `_), - Section 7 - """ - orig_dtype = arr.dtype - - rgb_to_ybr = np.asarray( - [ - [+0.299, -0.299 / 1.772, +0.701 / 1.402], - [+0.587, -0.587 / 1.772, -0.587 / 1.402], - [+0.114, +0.886 / 1.772, -0.114 / 1.402], - ], - dtype=np.float32, - ) - - arr = np.matmul(arr, rgb_to_ybr, dtype=np.float32) - arr += [0.5, 128.5, 128.5] - # Round(x) -> floor of (arr + 0.5) : 0.5 added in previous step - np.floor(arr, out=arr) - # Max(0, arr) -> 0 if 0 >= arr, arr otherwise - # Min(arr, 255) -> arr if arr <= 255, 255 otherwise - np.clip(arr, 0, 255, out=arr) - - return arr.astype(orig_dtype) - - -def _convert_YBR_FULL_to_RGB(arr: "np.ndarray") -> "np.ndarray": - """Return an ndarray converted from YBR_FULL to RGB color space. - - Parameters - ---------- - arr : numpy.ndarray - An ndarray of an 8-bit per channel images in YBR_FULL color space. - - Returns - ------- - numpy.ndarray - The array in RGB color space. - - References - ---------- - - * DICOM Standard, Part 3, - :dcm:`Annex C.7.6.3.1.2` - * ISO/IEC 10918-5:2012, Section 7 - """ - orig_dtype = arr.dtype - - ybr_to_rgb = np.asarray( - [ - [1.000, 1.000, 1.000], - [0.000, -0.114 * 1.772 / 0.587, 1.772], - [1.402, -0.299 * 1.402 / 0.587, 0.000], - ], - dtype=np.float32, - ) - - arr = arr.astype(np.float32) - arr -= [0, 128, 128] - # Round(x) -> floor of (arr + 0.5) - np.matmul(arr, ybr_to_rgb, out=arr) - arr += 0.5 - np.floor(arr, out=arr) - # Max(0, arr) -> 0 if 0 >= arr, arr otherwise - # Min(arr, 255) -> arr if arr <= 255, 255 otherwise - np.clip(arr, 0, 255, out=arr) - return arr.astype(orig_dtype) - - -def dtype_corrected_for_endianness( +from pydicom.pixels.processing import ( + apply_color_lut as _apply_color_lut, + apply_modality_lut as _apply_modality_lut, + apply_voi_lut as _apply_voi_lut, + apply_voi as _apply_voi, + apply_windowing as _apply_windowing, + convert_color_space as _convert_color_space, +) +from pydicom.pixels.utils import ( + expand_ybr422 as _expand_ybr422, + get_expected_length as _get_expected_length, + get_image_pixel_ids as _get_image_pixel_ids, + get_j2k_parameters as _get_j2k_parameters, + get_nr_frames as _get_nr_frames, + pack_bits as _pack_bits, + pixel_dtype as _pixel_dtype, + reshape_pixel_array as _reshape_pixel_array, + unpack_bits as _unpack_bits, +) + + +def _dtype_corrected_for_endianness( is_little_endian: bool, numpy_dtype: "np.dtype" ) -> "np.dtype": """Return a :class:`numpy.dtype` corrected for system and :class:`Dataset` endianness. + .. deprecated:: 3.0 + + This function will be removed in v4.0. + Parameters ---------- is_little_endian : bool @@ -822,721 +76,50 @@ def dtype_corrected_for_endianness( return numpy_dtype -def expand_ybr422(src: ByteString, bits_allocated: int) -> bytes: - """Return ``YBR_FULL_422`` data expanded to ``YBR_FULL``. - - Uncompressed datasets with a (0028,0004) *Photometric Interpretation* of - ``"YBR_FULL_422"`` are subsampled in the horizontal direction by halving - the number of Cb and Cr pixels (i.e. there are two Y pixels for every Cb - and Cr pixel). This function expands the ``YBR_FULL_422`` data to remove - the subsampling and the output is therefore ``YBR_FULL``. - - Parameters - ---------- - src : bytes or bytearray - The YBR_FULL_422 pixel data to be expanded. - bits_allocated : int - The number of bits used to store each pixel, as given by (0028,0100) - *Bits Allocated*. - - Returns - ------- - bytes - The expanded data (as YBR_FULL). - """ - # YBR_FULL_422 is Y Y Cb Cr (i.e. 2 Y pixels for every Cb and Cr pixel) - n_bytes = bits_allocated // 8 - length = len(src) // 2 * 3 - dst = bytearray(length) - - step_src = n_bytes * 4 - step_dst = n_bytes * 6 - for ii in range(n_bytes): - c_b = src[2 * n_bytes + ii :: step_src] - c_r = src[3 * n_bytes + ii :: step_src] - - dst[0 * n_bytes + ii :: step_dst] = src[0 * n_bytes + ii :: step_src] - dst[1 * n_bytes + ii :: step_dst] = c_b - dst[2 * n_bytes + ii :: step_dst] = c_r - - dst[3 * n_bytes + ii :: step_dst] = src[1 * n_bytes + ii :: step_src] - dst[4 * n_bytes + ii :: step_dst] = c_b - dst[5 * n_bytes + ii :: step_dst] = c_r - - return bytes(dst) - - -def _expand_segmented_lut( - data: tuple[int, ...], - fmt: str, - nr_segments: int | None = None, - last_value: int | None = None, -) -> list[int]: - """Return a list containing the expanded lookup table data. - - Parameters - ---------- - data : tuple of int - The decoded segmented palette lookup table data. May be padded by a - trailing null. - fmt : str - The format of the data, should contain `'B'` for 8-bit, `'H'` for - 16-bit, `'<'` for little endian and `'>'` for big endian. - nr_segments : int, optional - Expand at most `nr_segments` from the data. Should be used when - the opcode is ``2`` (indirect). If used then `last_value` should also - be used. - last_value : int, optional - The previous value in the expanded lookup table. Should be used when - the opcode is ``2`` (indirect). If used then `nr_segments` should also - be used. - - Returns - ------- - list of int - The reconstructed lookup table data. - - References - ---------- - - * DICOM Standard, Part 3, Annex C.7.9 - """ - # Indirect segment byte offset is dependent on endianness for 8-bit - # Little endian: e.g. 0x0302 0x0100, big endian, e.g. 0x0203 0x0001 - indirect_ii = [3, 2, 1, 0] if "<" in fmt else [2, 3, 0, 1] - - lut: list[int] = [] - offset = 0 - segments_read = 0 - # Use `offset + 1` to account for possible trailing null - # can do this because all segment types are longer than 2 - while offset + 1 < len(data): - opcode = data[offset] - length = data[offset + 1] - offset += 2 - - if opcode == 0: - # C.7.9.2.1: Discrete segment - lut.extend(data[offset : offset + length]) - offset += length - elif opcode == 1: - # C.7.9.2.2: Linear segment - if lut: - y0 = lut[-1] - elif last_value: - # Indirect segment with linear segment at 0th offset - y0 = last_value - else: - raise ValueError( - "Error expanding a segmented palette color lookup table: " - "the first segment cannot be a linear segment" - ) - - y1 = data[offset] - offset += 1 - - if y0 == y1: - lut.extend([y1] * length) - else: - step = (y1 - y0) / length - vals = np.around(np.linspace(y0 + step, y1, length)) - lut.extend([int(vv) for vv in vals]) - elif opcode == 2: - # C.7.9.2.3: Indirect segment - if not lut: - raise ValueError( - "Error expanding a segmented palette color lookup table: " - "the first segment cannot be an indirect segment" - ) - - if "B" in fmt: - # 8-bit segment entries - ii = [data[offset + vv] for vv in indirect_ii] - byte_offset = (ii[0] << 8 | ii[1]) << 16 | (ii[2] << 8 | ii[3]) - offset += 4 - else: - # 16-bit segment entries - byte_offset = data[offset + 1] << 16 | data[offset] - offset += 2 - - lut.extend(_expand_segmented_lut(data[byte_offset:], fmt, length, lut[-1])) - else: - raise ValueError( - "Error expanding a segmented palette lookup table: " - f"unknown segment type '{opcode}'" - ) - - segments_read += 1 - if segments_read == nr_segments: - return lut - - return lut - - -def get_expected_length(ds: "Dataset", unit: str = "bytes") -> int: - """Return the expected length (in terms of bytes or pixels) of the *Pixel - Data*. - - +------------------------------------------------+-------------+ - | Element | Required or | - +-------------+---------------------------+------+ optional | - | Tag | Keyword | Type | | - +=============+===========================+======+=============+ - | (0028,0002) | SamplesPerPixel | 1 | Required | - +-------------+---------------------------+------+-------------+ - | (0028,0004) | PhotometricInterpretation | 1 | Required | - +-------------+---------------------------+------+-------------+ - | (0028,0008) | NumberOfFrames | 1C | Optional | - +-------------+---------------------------+------+-------------+ - | (0028,0010) | Rows | 1 | Required | - +-------------+---------------------------+------+-------------+ - | (0028,0011) | Columns | 1 | Required | - +-------------+---------------------------+------+-------------+ - | (0028,0100) | BitsAllocated | 1 | Required | - +-------------+---------------------------+------+-------------+ - - Parameters - ---------- - ds : Dataset - The :class:`~pydicom.dataset.Dataset` containing the Image Pixel module - and *Pixel Data*. - unit : str, optional - If ``'bytes'`` then returns the expected length of the *Pixel Data* in - whole bytes and NOT including an odd length trailing NULL padding - byte. If ``'pixels'`` then returns the expected length of the *Pixel - Data* in terms of the total number of pixels (default ``'bytes'``). - - Returns - ------- - int - The expected length of the *Pixel Data* in either whole bytes or - pixels, excluding the NULL trailing padding byte for odd length data. - """ - rows = cast(int, ds.Rows) - columns = cast(int, ds.Columns) - samples_per_pixel = cast(int, ds.SamplesPerPixel) - bits_allocated = cast(int, ds.BitsAllocated) - - length = rows * columns * samples_per_pixel - length *= get_nr_frames(ds) - - if unit == "pixels": - return length - - # Correct for the number of bytes per pixel - if bits_allocated == 1: - # Determine the nearest whole number of bytes needed to contain - # 1-bit pixel data. e.g. 10 x 10 1-bit pixels is 100 bits, which - # are packed into 12.5 -> 13 bytes - length = length // 8 + (length % 8 > 0) - else: - length *= bits_allocated // 8 - - # DICOM Standard, Part 4, Annex C.7.6.3.1.2 - if ds.PhotometricInterpretation == "YBR_FULL_422": - length = length // 3 * 2 - - return length - - -def get_image_pixel_ids(ds: "Dataset") -> dict[str, int]: - """Return a dict of the pixel data affecting element's :func:`id` values. - - +------------------------------------------------+ - | Element | - +-------------+---------------------------+------+ - | Tag | Keyword | Type | - +=============+===========================+======+ - | (0028,0002) | SamplesPerPixel | 1 | - +-------------+---------------------------+------+ - | (0028,0004) | PhotometricInterpretation | 1 | - +-------------+---------------------------+------+ - | (0028,0006) | PlanarConfiguration | 1C | - +-------------+---------------------------+------+ - | (0028,0008) | NumberOfFrames | 1C | - +-------------+---------------------------+------+ - | (0028,0010) | Rows | 1 | - +-------------+---------------------------+------+ - | (0028,0011) | Columns | 1 | - +-------------+---------------------------+------+ - | (0028,0100) | BitsAllocated | 1 | - +-------------+---------------------------+------+ - | (0028,0101) | BitsStored | 1 | - +-------------+---------------------------+------+ - | (0028,0103) | PixelRepresentation | 1 | - +-------------+---------------------------+------+ - | (7FE0,0008) | FloatPixelData | 1C | - +-------------+---------------------------+------+ - | (7FE0,0009) | DoubleFloatPixelData | 1C | - +-------------+---------------------------+------+ - | (7FE0,0010) | PixelData | 1C | - +-------------+---------------------------+------+ - - Parameters - ---------- - ds : Dataset - The :class:`~pydicom.dataset.Dataset` containing the pixel data. - - Returns - ------- - dict - A dict containing the :func:`id` values for the elements that affect - the pixel data. - - """ - keywords = [ - "SamplesPerPixel", - "PhotometricInterpretation", - "PlanarConfiguration", - "NumberOfFrames", - "Rows", - "Columns", - "BitsAllocated", - "BitsStored", - "PixelRepresentation", - "FloatPixelData", - "DoubleFloatPixelData", - "PixelData", - ] - - return {kw: id(getattr(ds, kw, None)) for kw in keywords} - - -def get_j2k_parameters(codestream: bytes) -> dict[str, object]: - """Return a dict containing JPEG 2000 component parameters. - - .. versionadded:: 2.1 - - Parameters - ---------- - codestream : bytes - The JPEG 2000 (ISO/IEC 15444-1) codestream to be parsed. - - Returns - ------- - dict - A dict containing parameters for the first component sample in the - JPEG 2000 `codestream`, or an empty dict if unable to parse the data. - Available parameters are ``{"precision": int, "is_signed": bool}``. - """ - try: - # First 2 bytes must be the SOC marker - if not then wrong format - if codestream[0:2] != b"\xff\x4f": - return {} - - # SIZ is required to be the second marker - Figure A-3 in 15444-1 - if codestream[2:4] != b"\xff\x51": - return {} - - # See 15444-1 A.5.1 for format of the SIZ box and contents - ssiz = codestream[42] - if ssiz & 0x80: - return {"precision": (ssiz & 0x7F) + 1, "is_signed": True} - - return {"precision": ssiz + 1, "is_signed": False} - except (IndexError, TypeError): - pass - - return {} - - -def get_nr_frames(ds: "Dataset", warn: bool = True) -> int: - """Return NumberOfFrames or 1 if NumberOfFrames is None or 0. - - Parameters - ---------- - ds : dataset.Dataset - The :class:`~pydicom.dataset.Dataset` containing the Image Pixel module - corresponding to the data in `arr`. - warn : bool - If ``True`` (the default), a warning is issued if NumberOfFrames - has an invalid value. - - Returns - ------- - int - An integer for the NumberOfFrames or 1 if NumberOfFrames is None or 0 - """ - nr_frames: int | None = getattr(ds, "NumberOfFrames", 1) - # 'NumberOfFrames' may exist in the DICOM file but have value equal to None - if not nr_frames: # None or 0 - if warn: - warn_and_log( - f"A value of {nr_frames} for (0028,0008) 'Number of Frames' is " - "non-conformant. It's recommended that this value be " - "changed to 1" - ) - nr_frames = 1 - - return nr_frames - - -def pack_bits(arr: "np.ndarray", pad: bool = True) -> bytes: - """Pack a binary :class:`numpy.ndarray` for use with *Pixel Data*. - - Should be used in conjunction with (0028,0100) *Bits Allocated* = 1. - - .. versionchanged:: 2.1 - - Added the `pad` keyword parameter and changed to allow `arr` to be - 2 or 3D. - - Parameters - ---------- - arr : numpy.ndarray - The :class:`numpy.ndarray` containing 1-bit data as ints. `arr` must - only contain integer values of 0 and 1 and must have an 'uint' or - 'int' :class:`numpy.dtype`. For the sake of efficiency it's recommended - that the length of `arr` be a multiple of 8 (i.e. that any empty - bit-padding to round out the byte has already been added). The input - `arr` should either be shaped as (rows, columns) or (frames, rows, - columns) or the equivalent 1D array used to ensure that the packed - data is in the correct order. - pad : bool, optional - If ``True`` (default) then add a null byte to the end of the packed - data to ensure even length, otherwise no padding will be added. - - Returns - ------- - bytes - The bit packed data. - - Raises - ------ - ValueError - If `arr` contains anything other than 0 or 1. - - References - ---------- - DICOM Standard, Part 5, - :dcm:`Section 8.1.1` and - :dcm:`Annex D` - """ - if arr.shape == (0,): - return b"" - - # Test array - if not np.array_equal(arr, arr.astype(bool)): - raise ValueError( - "Only binary arrays (containing ones or zeroes) can be packed." - ) - - if len(arr.shape) > 1: - arr = arr.ravel() - - # The array length must be a multiple of 8, pad the end - if arr.shape[0] % 8: - arr = np.append(arr, np.zeros(8 - arr.shape[0] % 8)) - - arr = np.packbits(arr.astype("u1"), bitorder="little") - - packed: bytes = arr.tobytes() - if pad: - return packed + b"\x00" if len(packed) % 2 else packed - - return packed - - -def pixel_dtype(ds: "Dataset", as_float: bool = False) -> "np.dtype": - """Return a :class:`numpy.dtype` for the pixel data in `ds`. - - Suitable for use with IODs containing the Image Pixel module (with - ``as_float=False``) and the Floating Point Image Pixel and Double Floating - Point Image Pixel modules (with ``as_float=True``). - - +------------------------------------------+------------------+ - | Element | Supported | - +-------------+---------------------+------+ values | - | Tag | Keyword | Type | | - +=============+=====================+======+==================+ - | (0028,0101) | BitsAllocated | 1 | 1, 8, 16, 32, 64 | - +-------------+---------------------+------+------------------+ - | (0028,0103) | PixelRepresentation | 1 | 0, 1 | - +-------------+---------------------+------+------------------+ - - Parameters - ---------- - ds : Dataset - The :class:`~pydicom.dataset.Dataset` containing the pixel data you - wish to get the data type for. - as_float : bool, optional - If ``True`` then return a float dtype, otherwise return an integer - dtype (default ``False``). Float dtypes are only supported when - (0028,0101) *Bits Allocated* is 32 or 64. - - Returns - ------- - numpy.dtype - A :class:`numpy.dtype` suitable for containing the pixel data. - - Raises - ------ - NotImplementedError - If the pixel data is of a type that isn't supported by either numpy - or *pydicom*. - """ - if not HAVE_NP: - raise ImportError("Numpy is required to determine the dtype.") +_DEPRECATED = { + "apply_color_lut": _apply_color_lut, + "apply_modality_lut": _apply_modality_lut, + "apply_voi_lut": _apply_voi_lut, + "apply_voi": _apply_voi, + "apply_windowing": _apply_windowing, + "convert_color_space": _convert_color_space, + "pack_bits": _pack_bits, + "unpack_bits": _unpack_bits, +} +_DEPRECATED_UTIL = { + "expand_ybr422": _expand_ybr422, + "get_expected_length": _get_expected_length, + "get_image_pixel_ids": _get_image_pixel_ids, + "get_j2k_parameters": _get_j2k_parameters, + "get_nr_frames": _get_nr_frames, + "pixel_dtype": _pixel_dtype, + "reshape_pixel_array": _reshape_pixel_array, +} - # Prefer Transfer Syntax UID, fall back to the original encoding - if hasattr(ds, "file_meta"): - is_little_endian = ds.file_meta._tsyntax_encoding[1] - else: - is_little_endian = ds.original_encoding[1] - if is_little_endian is None: - raise AttributeError( - "Unable to determine the endianness of the dataset, please set " - "an appropriate Transfer Syntax UID in " - f"'{type(ds).__name__}.file_meta'" +def __getattr__(name: str) -> Any: + if name in _DEPRECATED and not config._use_future: + msg = ( + "The 'pydicom.pixel_data_handlers' module will be removed " + f"in v4.0, please use 'from pydicom.pixels import {name}' instead" ) + warn_and_log(msg, DeprecationWarning) + return _DEPRECATED[name] - if not as_float: - # (0028,0103) Pixel Representation, US, 1 - # Data representation of the pixel samples - # 0x0000 - unsigned int - # 0x0001 - 2's complement (signed int) - pixel_repr = cast(int, ds.PixelRepresentation) - if pixel_repr == 0: - dtype_str = "uint" - elif pixel_repr == 1: - dtype_str = "int" - else: - raise ValueError( - "Unable to determine the data type to use to contain the " - f"Pixel Data as a value of '{pixel_repr}' for '(0028,0103) " - "Pixel Representation' is invalid" - ) - else: - dtype_str = "float" - - # (0028,0100) Bits Allocated, US, 1 - # The number of bits allocated for each pixel sample - # PS3.5 8.1.1: Bits Allocated shall either be 1 or a multiple of 8 - # For bit packed data we use uint8 - bits_allocated = cast(int, ds.BitsAllocated) - if bits_allocated == 1: - dtype_str = "uint8" - elif bits_allocated > 0 and bits_allocated % 8 == 0: - dtype_str += str(bits_allocated) - else: - raise ValueError( - "Unable to determine the data type to use to contain the " - f"Pixel Data as a value of '{bits_allocated}' for '(0028,0100) " - "Bits Allocated' is invalid" + if name in _DEPRECATED_UTIL and not config._use_future: + msg = ( + "The 'pydicom.pixel_data_handlers' module will be removed " + f"in v4.0, please use 'from pydicom.pixels.utils import {name}' instead" ) + warn_and_log(msg, DeprecationWarning) + return _DEPRECATED_UTIL[name] - # Check to see if the dtype is valid for numpy - try: - dtype = np.dtype(dtype_str) - except TypeError: - raise NotImplementedError( - f"The data type '{dtype_str}' needed to contain the Pixel Data " - "is not supported by numpy" + if name == "dtype_corrected_for_endianness" and not config._use_future: + msg = ( + "'dtype_corrected_for_endianness' is deprecated and will be " + "removed in v4.0" ) + warn_and_log(msg, DeprecationWarning) + return _dtype_corrected_for_endianness - # Correct for endianness of the system vs endianness of the dataset - if is_little_endian != (byteorder == "little"): - # 'S' swap from current to opposite - dtype = dtype.newbyteorder("S") - - return dtype - - -def reshape_pixel_array(ds: "Dataset", arr: "np.ndarray") -> "np.ndarray": - """Return a reshaped :class:`numpy.ndarray` `arr`. - - +------------------------------------------+-----------+----------+ - | Element | Supported | | - +-------------+---------------------+------+ values | | - | Tag | Keyword | Type | | | - +=============+=====================+======+===========+==========+ - | (0028,0002) | SamplesPerPixel | 1 | N > 0 | Required | - +-------------+---------------------+------+-----------+----------+ - | (0028,0006) | PlanarConfiguration | 1C | 0, 1 | Optional | - +-------------+---------------------+------+-----------+----------+ - | (0028,0008) | NumberOfFrames | 1C | N > 0 | Optional | - +-------------+---------------------+------+-----------+----------+ - | (0028,0010) | Rows | 1 | N > 0 | Required | - +-------------+---------------------+------+-----------+----------+ - | (0028,0011) | Columns | 1 | N > 0 | Required | - +-------------+---------------------+------+-----------+----------+ - - (0028,0008) *Number of Frames* is required when *Pixel Data* contains - more than 1 frame. (0028,0006) *Planar Configuration* is required when - (0028,0002) *Samples per Pixel* is greater than 1. For certain - compressed transfer syntaxes it is always taken to be either 0 or 1 as - shown in the table below. - - +---------------------------------------------+-----------------------+ - | Transfer Syntax | Planar Configuration | - +------------------------+--------------------+ | - | UID | Name | | - +========================+====================+=======================+ - | 1.2.840.10008.1.2.4.50 | JPEG Baseline | 0 | - +------------------------+--------------------+-----------------------+ - | 1.2.840.10008.1.2.4.57 | JPEG Lossless, | 0 | - | | Non-hierarchical | | - +------------------------+--------------------+-----------------------+ - | 1.2.840.10008.1.2.4.70 | JPEG Lossless, | 0 | - | | Non-hierarchical, | | - | | SV1 | | - +------------------------+--------------------+-----------------------+ - | 1.2.840.10008.1.2.4.80 | JPEG-LS Lossless | 0 | - +------------------------+--------------------+-----------------------+ - | 1.2.840.10008.1.2.4.81 | JPEG-LS Lossy | 0 | - +------------------------+--------------------+-----------------------+ - | 1.2.840.10008.1.2.4.90 | JPEG 2000 Lossless | 0 | - +------------------------+--------------------+-----------------------+ - | 1.2.840.10008.1.2.4.91 | JPEG 2000 Lossy | 0 | - +------------------------+--------------------+-----------------------+ - | 1.2.840.10008.1.2.5 | RLE Lossless | 1 | - +------------------------+--------------------+-----------------------+ - - .. versionchanged:: 2.1 - - JPEG-LS transfer syntaxes changed to *Planar Configuration* of 0 - - Parameters - ---------- - ds : dataset.Dataset - The :class:`~pydicom.dataset.Dataset` containing the Image Pixel module - corresponding to the data in `arr`. - arr : numpy.ndarray - The 1D array containing the pixel data. - - Returns - ------- - numpy.ndarray - A reshaped array containing the pixel data. The shape of the array - depends on the contents of the dataset: - - * For single frame, single sample data (rows, columns) - * For single frame, multi-sample data (rows, columns, planes) - * For multi-frame, single sample data (frames, rows, columns) - * For multi-frame, multi-sample data (frames, rows, columns, planes) - - References - ---------- - - * DICOM Standard, Part 3, - :dcm:`Annex C.7.6.3.1` - * DICOM Standard, Part 5, :dcm:`Section 8.2` - """ - if not HAVE_NP: - raise ImportError("Numpy is required to reshape the pixel array.") - - nr_frames = get_nr_frames(ds) - nr_samples = cast(int, ds.SamplesPerPixel) - - if nr_samples < 1: - raise ValueError( - f"Unable to reshape the pixel array as a value of {nr_samples} " - "for (0028,0002) 'Samples per Pixel' is invalid." - ) - - # Valid values for Planar Configuration are dependent on transfer syntax - if nr_samples > 1: - transfer_syntax = ds.file_meta.TransferSyntaxUID - if transfer_syntax in [ - "1.2.840.10008.1.2.4.50", - "1.2.840.10008.1.2.4.57", - "1.2.840.10008.1.2.4.70", - "1.2.840.10008.1.2.4.80", - "1.2.840.10008.1.2.4.81", - "1.2.840.10008.1.2.4.90", - "1.2.840.10008.1.2.4.91", - ]: - planar_configuration = 0 - elif transfer_syntax in ["1.2.840.10008.1.2.5"]: - planar_configuration = 1 - else: - planar_configuration = ds.PlanarConfiguration - - if planar_configuration not in [0, 1]: - raise ValueError( - "Unable to reshape the pixel array as a value of " - f"{planar_configuration} for (0028,0006) 'Planar " - "Configuration' is invalid." - ) - - rows = cast(int, ds.Rows) - columns = cast(int, ds.Columns) - if nr_frames > 1: - # Multi-frame - if nr_samples == 1: - # Single plane - arr = arr.reshape(nr_frames, rows, columns) - else: - # Multiple planes, usually 3 - if planar_configuration == 0: - arr = arr.reshape(nr_frames, rows, columns, nr_samples) - else: - arr = arr.reshape(nr_frames, nr_samples, rows, columns) - arr = arr.transpose(0, 2, 3, 1) - else: - # Single frame - if nr_samples == 1: - # Single plane - arr = arr.reshape(rows, columns) - else: - # Multiple planes, usually 3 - if planar_configuration == 0: - arr = arr.reshape(rows, columns, nr_samples) - else: - arr = arr.reshape(nr_samples, rows, columns) - arr = arr.transpose(1, 2, 0) - - return arr - - -def unpack_bits(src: bytes, as_array: bool = True) -> "np.ndarray | bytes": - """Unpack the bit-packed data in `src`. - - Suitable for use when (0028,0011) *Bits Allocated* or (60xx,0100) *Overlay - Bits Allocated* is 1. - - If `NumPy `_ is available then it will be used to - unpack the data, otherwise only the standard library will be used, which - is about 20 times slower. - - .. versionchanged:: 2.3 - - Added the `as_array` keyword parameter, support for unpacking - without NumPy, and added :class:`bytes` as a possible return type - - Parameters - ---------- - src : bytes - The bit-packed data. - as_array : bool, optional - If ``False`` then return the unpacked data as :class:`bytes`, otherwise - return a :class:`numpy.ndarray` (default, requires NumPy). - - Returns - ------- - bytes or numpy.ndarray - The unpacked data as an :class:`numpy.ndarray` (if NumPy is available - and ``as_array == True``) or :class:`bytes` otherwise. - - Raises - ------ - ValueError - If `as_array` is ``True`` and NumPy is not available. - - References - ---------- - DICOM Standard, Part 5, - :dcm:`Section 8.1.1` and - :dcm:`Annex D` - """ - if HAVE_NP: - arr = np.frombuffer(src, dtype="u1") - arr = np.unpackbits(arr, bitorder="little") - - return arr if as_array else arr.tobytes() - - if as_array: - raise ValueError("unpack_bits() requires NumPy if 'as_array = True'") - - return b"".join(map(_UNPACK_LUT.__getitem__, src)) + raise AttributeError(f"module {__name__!r} has no attribute {name!r}") diff --git a/src/pydicom/pixels/__init__.py b/src/pydicom/pixels/__init__.py index 16e5db014c..d4b1533964 100644 --- a/src/pydicom/pixels/__init__.py +++ b/src/pydicom/pixels/__init__.py @@ -1,28 +1,19 @@ -from pydicom.pixels.decoders.base import ( - get_decoder, - ImplicitVRLittleEndianDecoder, - ExplicitVRLittleEndianDecoder, - ExplicitVRBigEndianDecoder, - DeflatedExplicitVRLittleEndianDecoder, - JPEGBaseline8BitDecoder, - JPEGExtended12BitDecoder, - JPEGLosslessDecoder, - JPEGLosslessSV1Decoder, - JPEGLSLosslessDecoder, - JPEGLSNearLosslessDecoder, - JPEG2000LosslessDecoder, - JPEG2000Decoder, - HTJ2KLosslessDecoder, - HTJ2KLosslessRPCLDecoder, - HTJ2KDecoder, - RLELosslessDecoder, +# Copyright 2008-2024 pydicom authors. See LICENSE file for details. + +from pydicom.pixels.decoders.base import get_decoder +from pydicom.pixels.encoders.base import get_encoder +from pydicom.pixels.processing import ( + apply_color_lut, + apply_modality_lut, + apply_voi_lut, + apply_voi, + apply_windowing, + convert_color_space, ) -from pydicom.pixels.encoders.base import ( - get_encoder, - RLELosslessEncoder, - JPEGLSLosslessEncoder, - JPEGLSNearLosslessEncoder, - JPEG2000LosslessEncoder, - JPEG2000Encoder, +from pydicom.pixels.utils import ( + as_pixel_options, + iter_pixels, + pack_bits, + pixel_array, + unpack_bits, ) -from pydicom.pixels.utils import pixel_array, iter_pixels, as_pixel_options diff --git a/src/pydicom/pixels/common.py b/src/pydicom/pixels/common.py index 489395b453..6f46a7f015 100644 --- a/src/pydicom/pixels/common.py +++ b/src/pydicom/pixels/common.py @@ -6,11 +6,11 @@ from importlib import import_module from typing import TYPE_CHECKING, Any, TypedDict -from pydicom.dataset import Dataset from pydicom.pixels.utils import as_pixel_options from pydicom.uid import UID if TYPE_CHECKING: # pragma: no cover + from pydicom.dataset import Dataset from pydicom.pixels.decoders.base import DecodeOptions, DecodeFunction from pydicom.pixels.encoders.base import EncodeOptions, EncodeFunction diff --git a/src/pydicom/pixels/decoders/__init__.py b/src/pydicom/pixels/decoders/__init__.py index e69de29bb2..aabbe7c3dc 100644 --- a/src/pydicom/pixels/decoders/__init__.py +++ b/src/pydicom/pixels/decoders/__init__.py @@ -0,0 +1,20 @@ +# Copyright 2008-2024 pydicom authors. See LICENSE file for details. + +from pydicom.pixels.decoders.base import ( + ImplicitVRLittleEndianDecoder, + ExplicitVRLittleEndianDecoder, + ExplicitVRBigEndianDecoder, + DeflatedExplicitVRLittleEndianDecoder, + JPEGBaseline8BitDecoder, + JPEGExtended12BitDecoder, + JPEGLosslessDecoder, + JPEGLosslessSV1Decoder, + JPEGLSLosslessDecoder, + JPEGLSNearLosslessDecoder, + JPEG2000LosslessDecoder, + JPEG2000Decoder, + HTJ2KLosslessDecoder, + HTJ2KLosslessRPCLDecoder, + HTJ2KDecoder, + RLELosslessDecoder, +) diff --git a/src/pydicom/pixels/decoders/base.py b/src/pydicom/pixels/decoders/base.py index 4f7e32006a..954aa6ff2a 100644 --- a/src/pydicom/pixels/decoders/base.py +++ b/src/pydicom/pixels/decoders/base.py @@ -4,7 +4,7 @@ from collections.abc import Callable, Iterator, Iterable import logging import sys -from typing import Any, BinaryIO, cast +from typing import Any, BinaryIO, cast, TYPE_CHECKING try: import numpy as np @@ -14,7 +14,6 @@ HAVE_NP = False from pydicom import config -from pydicom.dataset import Dataset from pydicom.encaps import get_frame, generate_frames from pydicom.misc import warn_and_log from pydicom.pixels.common import ( @@ -24,8 +23,11 @@ CoderBase, PhotometricInterpretation as PI, ) -from pydicom.pixels.utils import _get_jpg_parameters -from pydicom.pixel_data_handlers.util import convert_color_space, get_j2k_parameters +from pydicom.pixels.processing import convert_color_space +from pydicom.pixels.utils import ( + _get_jpg_parameters, + get_j2k_parameters, +) from pydicom.uid import ( ImplicitVRLittleEndian, ExplicitVRLittleEndian, @@ -49,6 +51,9 @@ JPEGTransferSyntaxes, ) +if TYPE_CHECKING: # pragma: no cover + from pydicom.dataset import Dataset + LOGGER = logging.getLogger(__name__) @@ -554,7 +559,7 @@ def _set_options_ds(self, ds: "Dataset") -> None: self.set_option("pixel_keyword", px_keyword[0]) self.set_option("pixel_vr", ds[px_keyword[0]].VR) - def set_source(self, src: Buffer | Dataset | BinaryIO) -> None: + def set_source(self, src: "Buffer | Dataset | BinaryIO") -> None: """Set the pixel data to be decoded. Parameters @@ -564,6 +569,8 @@ def set_source(self, src: Buffer | Dataset | BinaryIO) -> None: :class:`~pydicom.dataset.Dataset` containing the pixel data and associated group ``0x0028`` elements. """ + from pydicom.dataset import Dataset + if isinstance(src, Dataset): self._set_options_ds(src) self._src = src[self.pixel_keyword].value @@ -710,7 +717,7 @@ def __init__(self, uid: UID) -> None: def as_array( self, - src: Dataset | Buffer | BinaryIO, + src: "Dataset | Buffer | BinaryIO", *, index: int | None = None, validate: bool = True, @@ -1046,7 +1053,7 @@ def _as_array_native(runner: DecodeRunner, index: int | None) -> "np.ndarray": def as_buffer( self, - src: Dataset | Buffer | BinaryIO, + src: "Dataset | Buffer | BinaryIO", *, index: int | None = None, validate: bool = True, @@ -1275,7 +1282,7 @@ def _as_buffer_native(runner: DecodeRunner, index: int | None) -> Buffer: def iter_array( self, - src: Dataset | Buffer | BinaryIO, + src: "Dataset | Buffer | BinaryIO", *, indices: Iterable[int] | None = None, raw: bool = False, @@ -1433,7 +1440,7 @@ def iter_array( def iter_buffer( self, - src: Dataset | Buffer | BinaryIO, + src: "Dataset | Buffer | BinaryIO", *, indices: Iterable[int] | None = None, validate: bool = True, diff --git a/src/pydicom/pixels/encoders/__init__.py b/src/pydicom/pixels/encoders/__init__.py index e69de29bb2..dbde1a952b 100644 --- a/src/pydicom/pixels/encoders/__init__.py +++ b/src/pydicom/pixels/encoders/__init__.py @@ -0,0 +1,9 @@ +# Copyright 2008-2024 pydicom authors. See LICENSE file for details. + +from pydicom.pixels.encoders.base import ( + RLELosslessEncoder, + JPEGLSLosslessEncoder, + JPEGLSNearLosslessEncoder, + JPEG2000LosslessEncoder, + JPEG2000Encoder, +) diff --git a/src/pydicom/pixels/encoders/base.py b/src/pydicom/pixels/encoders/base.py index 984f78fc5f..e373a4c2b6 100644 --- a/src/pydicom/pixels/encoders/base.py +++ b/src/pydicom/pixels/encoders/base.py @@ -5,7 +5,7 @@ import logging import math import sys -from typing import Any, cast +from typing import Any, cast, TYPE_CHECKING try: import numpy as np @@ -13,7 +13,6 @@ pass from pydicom import config -from pydicom.dataset import Dataset from pydicom.pixels.common import Buffer, RunnerBase, CoderBase, RunnerOptions from pydicom.uid import ( UID, @@ -29,6 +28,9 @@ JPEGLSTransferSyntaxes, ) +if TYPE_CHECKING: # pragma: no cover + from pydicom.dataset import Dataset + LOGGER = logging.getLogger(__name__) @@ -260,6 +262,8 @@ def set_source(self, src: "np.ndarray | Dataset | Buffer") -> None: the pixel data and associated group ``0x0028`` elements. * If a :class:`numpy.ndarray` then an array containing the image data. """ + from pydicom.dataset import Dataset + if isinstance(src, Dataset): self._set_options_ds(src) self._src = src.PixelData diff --git a/src/pydicom/pixels/encoders/gdcm.py b/src/pydicom/pixels/encoders/gdcm.py index 7e86ed727b..86c72a5eee 100644 --- a/src/pydicom/pixels/encoders/gdcm.py +++ b/src/pydicom/pixels/encoders/gdcm.py @@ -1,3 +1,4 @@ +# Copyright 2008-2024 pydicom authors. See LICENSE file for details. """Interface for *Pixel Data* encoding, not intended to be used directly.""" from typing import cast diff --git a/src/pydicom/pixels/encoders/native.py b/src/pydicom/pixels/encoders/native.py index 3d3dd14f84..53e7e3dd76 100644 --- a/src/pydicom/pixels/encoders/native.py +++ b/src/pydicom/pixels/encoders/native.py @@ -1,4 +1,4 @@ -# Copyright 2008-2021 pydicom authors. See LICENSE file for details. +# Copyright 2008-2024 pydicom authors. See LICENSE file for details. """Interface for *Pixel Data* encoding, not intended to be used directly.""" from itertools import groupby diff --git a/src/pydicom/pixels/processing.py b/src/pydicom/pixels/processing.py new file mode 100644 index 0000000000..f18039724c --- /dev/null +++ b/src/pydicom/pixels/processing.py @@ -0,0 +1,888 @@ +# Copyright 2008-2024 pydicom authors. See LICENSE file for details. +"""Pixel data processing functions.""" + +from struct import unpack, unpack_from +from typing import TYPE_CHECKING, cast +from collections.abc import Iterable + +try: + import numpy as np + + HAVE_NP = True +except ImportError: + HAVE_NP = False + +from pydicom.data import get_palette_files +from pydicom.misc import warn_and_log +from pydicom.uid import UID +from pydicom.valuerep import VR + +if TYPE_CHECKING: # pragma: no cover + from pydicom.dataset import Dataset + + +def apply_color_lut( + arr: "np.ndarray", ds: "Dataset | None" = None, palette: str | UID | None = None +) -> "np.ndarray": + """Apply a color palette lookup table to `arr`. + + If (0028,1201-1203) *Palette Color Lookup Table Data* are missing + then (0028,1221-1223) *Segmented Palette Color Lookup Table Data* must be + present and vice versa. The presence of (0028,1204) *Alpha Palette Color + Lookup Table Data* or (0028,1224) *Alpha Segmented Palette Color Lookup + Table Data* is optional. + + Use of this function with the :dcm:`Enhanced Palette Color Lookup Table + Module` or :dcm:`Supplemental Palette Color LUT + Module` is not currently supported. + + Parameters + ---------- + arr : numpy.ndarray + The pixel data to apply the color palette to. + ds : dataset.Dataset, optional + Required if `palette` is not supplied. A + :class:`~pydicom.dataset.Dataset` containing a suitable + :dcm:`Image Pixel` or + :dcm:`Palette Color Lookup Table` Module. + palette : str or uid.UID, optional + Required if `ds` is not supplied. The name of one of the + :dcm:`well-known` color palettes defined by the + DICOM Standard. One of: ``'HOT_IRON'``, ``'PET'``, + ``'HOT_METAL_BLUE'``, ``'PET_20_STEP'``, ``'SPRING'``, ``'SUMMER'``, + ``'FALL'``, ``'WINTER'`` or the corresponding well-known (0008,0018) + *SOP Instance UID*. + + Returns + ------- + numpy.ndarray + The RGB or RGBA pixel data as an array of ``np.uint8`` or ``np.uint16`` + values, depending on the 3rd value of (0028,1201) *Red Palette Color + Lookup Table Descriptor*. + + References + ---------- + + * :dcm:`Image Pixel Module` + * :dcm:`Supplemental Palette Color LUT Module` + * :dcm:`Enhanced Palette Color LUT Module` + * :dcm:`Palette Colour LUT Module` + * :dcm:`Supplemental Palette Color LUTs + ` + """ + # Note: input value (IV) is the stored pixel value in `arr` + # LUTs[IV] -> [R, G, B] values at the IV pixel location in `arr` + if not ds and not palette: + raise ValueError("Either 'ds' or 'palette' is required") + + if palette: + # Well-known palettes are all 8-bits per entry + datasets = { + "1.2.840.10008.1.5.1": "hotiron.dcm", + "1.2.840.10008.1.5.2": "pet.dcm", + "1.2.840.10008.1.5.3": "hotmetalblue.dcm", + "1.2.840.10008.1.5.4": "pet20step.dcm", + "1.2.840.10008.1.5.5": "spring.dcm", + "1.2.840.10008.1.5.6": "summer.dcm", + "1.2.840.10008.1.5.7": "fall.dcm", + "1.2.840.10008.1.5.8": "winter.dcm", + } + if not UID(palette).is_valid: + try: + uids = { + "HOT_IRON": "1.2.840.10008.1.5.1", + "PET": "1.2.840.10008.1.5.2", + "HOT_METAL_BLUE": "1.2.840.10008.1.5.3", + "PET_20_STEP": "1.2.840.10008.1.5.4", + "SPRING": "1.2.840.10008.1.5.5", + "SUMMER": "1.2.840.10008.1.5.6", + "FALL": "1.2.840.10008.1.5.8", + "WINTER": "1.2.840.10008.1.5.7", + } + palette = uids[palette] + except KeyError: + raise ValueError(f"Unknown palette '{palette}'") + + try: + from pydicom import dcmread + + fname = datasets[palette] + ds = dcmread(get_palette_files(fname)[0]) + except KeyError: + raise ValueError(f"Unknown palette '{palette}'") + + ds = cast("Dataset", ds) + + # C.8.16.2.1.1.1: Supplemental Palette Color LUT + # TODO: Requires greyscale visualisation pipeline + if getattr(ds, "PixelPresentation", None) in ["MIXED", "COLOR"]: + raise ValueError( + "Use of this function with the Supplemental Palette Color Lookup " + "Table Module is not currently supported" + ) + + if "RedPaletteColorLookupTableDescriptor" not in ds: + raise ValueError("No suitable Palette Color Lookup Table Module found") + + # All channels are supposed to be identical + lut_desc = cast(list[int], ds.RedPaletteColorLookupTableDescriptor) + # A value of 0 = 2^16 entries + nr_entries = lut_desc[0] or 2**16 + + # May be negative if Pixel Representation is 1 + first_map = lut_desc[1] + # Actual bit depth may be larger (8 bit entries in 16 bits allocated) + nominal_depth = lut_desc[2] + dtype = np.dtype(f"uint{nominal_depth:.0f}") + + luts = [] + if "RedPaletteColorLookupTableData" in ds: + # LUT Data is described by PS3.3, C.7.6.3.1.6 + r_lut = cast(bytes, ds.RedPaletteColorLookupTableData) + g_lut = cast(bytes, ds.GreenPaletteColorLookupTableData) + b_lut = cast(bytes, ds.BluePaletteColorLookupTableData) + a_lut = cast( + bytes | None, getattr(ds, "AlphaPaletteColorLookupTableData", None) + ) + + actual_depth = len(r_lut) / nr_entries * 8 + dtype = np.dtype(f"uint{actual_depth:.0f}") + + for lut_bytes in [ii for ii in [r_lut, g_lut, b_lut, a_lut] if ii]: + luts.append(np.frombuffer(lut_bytes, dtype=dtype)) + elif "SegmentedRedPaletteColorLookupTableData" in ds: + # Segmented LUT Data is described by PS3.3, C.7.9.2 + r_lut = cast(bytes, ds.SegmentedRedPaletteColorLookupTableData) + g_lut = cast(bytes, ds.SegmentedGreenPaletteColorLookupTableData) + b_lut = cast(bytes, ds.SegmentedBluePaletteColorLookupTableData) + a_lut = cast( + bytes | None, getattr(ds, "SegmentedAlphaPaletteColorLookupTableData", None) + ) + + if hasattr(ds, "file_meta"): + is_little_endian = ds.file_meta._tsyntax_encoding[1] + else: + is_little_endian = ds.original_encoding[1] + + if is_little_endian is None: + raise AttributeError( + "Unable to determine the endianness of the dataset, please set " + "an appropriate Transfer Syntax UID in " + f"'{type(ds).__name__}.file_meta'" + ) + + endianness = "><"[is_little_endian] + byte_depth = nominal_depth // 8 + fmt = "B" if byte_depth == 1 else "H" + actual_depth = nominal_depth + + for seg in [ii for ii in [r_lut, g_lut, b_lut, a_lut] if ii]: + len_seg = len(seg) // byte_depth + s_fmt = f"{endianness}{len_seg}{fmt}" + lut_ints = _expand_segmented_lut(unpack(s_fmt, seg), s_fmt) + luts.append(np.asarray(lut_ints, dtype=dtype)) + else: + raise ValueError("No suitable Palette Color Lookup Table Module found") + + if actual_depth not in [8, 16]: + raise ValueError( + f"The bit depth of the LUT data '{actual_depth:.1f}' " + "is invalid (only 8 or 16 bits per entry allowed)" + ) + + lut_lengths = [len(ii) for ii in luts] + if not all(ii == lut_lengths[0] for ii in lut_lengths[1:]): + raise ValueError("LUT data must be the same length") + + # IVs < `first_map` get set to first LUT entry (i.e. index 0) + clipped_iv = np.zeros(arr.shape, dtype=dtype) + # IVs >= `first_map` are mapped by the Palette Color LUTs + # `first_map` may be negative, positive or 0 + mapped_pixels = arr >= first_map + clipped_iv[mapped_pixels] = arr[mapped_pixels] - first_map + # IVs > number of entries get set to last entry + np.clip(clipped_iv, 0, nr_entries - 1, out=clipped_iv) + + # Output array may be RGB or RGBA + out = np.empty(list(arr.shape) + [len(luts)], dtype=dtype) + for ii, lut in enumerate(luts): + out[..., ii] = lut[clipped_iv] + + return out + + +def apply_modality_lut(arr: "np.ndarray", ds: "Dataset") -> "np.ndarray": + """Apply a modality lookup table or rescale operation to `arr`. + + Parameters + ---------- + arr : numpy.ndarray + The :class:`~numpy.ndarray` to apply the modality LUT or rescale + operation to. + ds : dataset.Dataset + A dataset containing a :dcm:`Modality LUT Module + `. + + Returns + ------- + numpy.ndarray + An array with applied modality LUT or rescale operation. If + (0028,3000) *Modality LUT Sequence* is present then returns an array + of ``np.uint8`` or ``np.uint16``, depending on the 3rd value of + (0028,3002) *LUT Descriptor*. If (0028,1052) *Rescale Intercept* and + (0028,1053) *Rescale Slope* are present then returns an array of + ``np.float64``. If neither are present then `arr` will be returned + unchanged. + + Notes + ----- + When *Rescale Slope* and *Rescale Intercept* are used, the output range + is from (min. pixel value * Rescale Slope + Rescale Intercept) to + (max. pixel value * Rescale Slope + Rescale Intercept), where min. and + max. pixel value are determined from (0028,0101) *Bits Stored* and + (0028,0103) *Pixel Representation*. + + References + ---------- + * DICOM Standard, Part 3, :dcm:`Annex C.11.1 + ` + * DICOM Standard, Part 4, :dcm:`Annex N.2.1.1 + ` + """ + if ds.get("ModalityLUTSequence"): + item = cast(list["Dataset"], ds.ModalityLUTSequence)[0] + nr_entries = cast(list[int], item.LUTDescriptor)[0] or 2**16 + first_map = cast(list[int], item.LUTDescriptor)[1] + nominal_depth = cast(list[int], item.LUTDescriptor)[2] + + dtype = f"uint{nominal_depth}" + + # Ambiguous VR, US or OW + unc_data: Iterable[int] + if item["LUTData"].VR == VR.OW: + if hasattr(ds, "file_meta"): + is_little_endian = ds.file_meta._tsyntax_encoding[1] + else: + is_little_endian = ds.original_encoding[1] + + if is_little_endian is None: + raise AttributeError( + "Unable to determine the endianness of the dataset, please set " + "an appropriate Transfer Syntax UID in " + f"'{type(ds).__name__}.file_meta'" + ) + + endianness = "><"[is_little_endian] + unpack_fmt = f"{endianness}{nr_entries}H" + unc_data = unpack(unpack_fmt, cast(bytes, item.LUTData)) + else: + unc_data = cast(list[int], item.LUTData) + + lut_data: np.ndarray = np.asarray(unc_data, dtype=dtype) + + # IVs < `first_map` get set to first LUT entry (i.e. index 0) + clipped_iv = np.zeros(arr.shape, dtype=arr.dtype) + # IVs >= `first_map` are mapped by the Modality LUT + # `first_map` may be negative, positive or 0 + mapped_pixels = arr >= first_map + clipped_iv[mapped_pixels] = arr[mapped_pixels] - first_map + # IVs > number of entries get set to last entry + np.clip(clipped_iv, 0, nr_entries - 1, out=clipped_iv) + + return lut_data[clipped_iv] + elif "RescaleSlope" in ds and "RescaleIntercept" in ds: + arr = arr.astype(np.float64) * cast(float, ds.RescaleSlope) + arr += cast(float, ds.RescaleIntercept) + + return arr + + +apply_rescale = apply_modality_lut + + +def apply_voi_lut( + arr: "np.ndarray", ds: "Dataset", index: int = 0, prefer_lut: bool = True +) -> "np.ndarray": + """Apply a VOI lookup table or windowing operation to `arr`. + + .. versionchanged:: 2.1 + + Added the `prefer_lut` keyword parameter + + Parameters + ---------- + arr : numpy.ndarray + The :class:`~numpy.ndarray` to apply the VOI LUT or windowing operation + to. + ds : dataset.Dataset + A dataset containing a :dcm:`VOI LUT Module`. + If (0028,3010) *VOI LUT Sequence* is present then returns an array + of ``np.uint8`` or ``np.uint16``, depending on the 3rd value of + (0028,3002) *LUT Descriptor*. If (0028,1050) *Window Center* and + (0028,1051) *Window Width* are present then returns an array of + ``np.float64``. If neither are present then `arr` will be returned + unchanged. + index : int, optional + When the VOI LUT Module contains multiple alternative views, this is + the index of the view to return (default ``0``). + prefer_lut : bool + When the VOI LUT Module contains both *Window Width*/*Window Center* + and *VOI LUT Sequence*, if ``True`` (default) then apply the VOI LUT, + otherwise apply the windowing operation. + + Returns + ------- + numpy.ndarray + An array with applied VOI LUT or windowing operation. + + Notes + ----- + When the dataset requires a modality LUT or rescale operation as part of + the Modality LUT module then that must be applied before any windowing + operation. + + See Also + -------- + :func:`~pydicom.pixels.processing.apply_modality_lut` + :func:`~pydicom.pixels.processing.apply_voi` + :func:`~pydicom.pixels.processing.apply_windowing` + + References + ---------- + * DICOM Standard, Part 3, :dcm:`Annex C.11.2 + ` + * DICOM Standard, Part 3, :dcm:`Annex C.8.11.3.1.5 + ` + * DICOM Standard, Part 4, :dcm:`Annex N.2.1.1 + ` + """ + valid_voi = False + if ds.get("VOILUTSequence"): + ds.VOILUTSequence = cast(list["Dataset"], ds.VOILUTSequence) + valid_voi = None not in [ + ds.VOILUTSequence[0].get("LUTDescriptor", None), + ds.VOILUTSequence[0].get("LUTData", None), + ] + valid_windowing = None not in [ + ds.get("WindowCenter", None), + ds.get("WindowWidth", None), + ] + + if valid_voi and valid_windowing: + if prefer_lut: + return apply_voi(arr, ds, index) + + return apply_windowing(arr, ds, index) + + if valid_voi: + return apply_voi(arr, ds, index) + + if valid_windowing: + return apply_windowing(arr, ds, index) + + return arr + + +def apply_voi(arr: "np.ndarray", ds: "Dataset", index: int = 0) -> "np.ndarray": + """Apply a VOI lookup table to `arr`. + + .. versionadded:: 2.1 + + Parameters + ---------- + arr : numpy.ndarray + The :class:`~numpy.ndarray` to apply the VOI LUT to. + ds : dataset.Dataset + A dataset containing a :dcm:`VOI LUT Module`. + If (0028,3010) *VOI LUT Sequence* is present then returns an array + of ``np.uint8`` or ``np.uint16``, depending on the 3rd value of + (0028,3002) *LUT Descriptor*, otherwise `arr` will be returned + unchanged. + index : int, optional + When the VOI LUT Module contains multiple alternative views, this is + the index of the view to return (default ``0``). + + Returns + ------- + numpy.ndarray + An array with applied VOI LUT. + + See Also + -------- + :func:`~pydicom.pixels.processing.apply_modality_lut` + :func:`~pydicom.pixels.processing.apply_windowing` + + References + ---------- + * DICOM Standard, Part 3, :dcm:`Annex C.11.2 + ` + * DICOM Standard, Part 3, :dcm:`Annex C.8.11.3.1.5 + ` + * DICOM Standard, Part 4, :dcm:`Annex N.2.1.1 + ` + """ + if not ds.get("VOILUTSequence"): + return arr + + if not np.issubdtype(arr.dtype, np.integer): + warn_and_log( + "Applying a VOI LUT on a float input array may give incorrect results" + ) + + # VOI LUT Sequence contains one or more items + item = cast(list["Dataset"], ds.VOILUTSequence)[index] + lut_descriptor = cast(list[int], item.LUTDescriptor) + nr_entries = lut_descriptor[0] or 2**16 + first_map = lut_descriptor[1] + + # PS3.3 C.8.11.3.1.5: may be 8, 10-16 + nominal_depth = lut_descriptor[2] + if nominal_depth in list(range(10, 17)): + dtype = "uint16" + elif nominal_depth == 8: + dtype = "uint8" + else: + raise NotImplementedError( + f"'{nominal_depth}' bits per LUT entry is not supported" + ) + + # Ambiguous VR, US or OW + unc_data: Iterable[int] + if item["LUTData"].VR == VR.OW: + if hasattr(ds, "file_meta"): + is_little_endian = ds.file_meta._tsyntax_encoding[1] + else: + is_little_endian = ds.original_encoding[1] + + if is_little_endian is None: + raise AttributeError( + "Unable to determine the endianness of the dataset, please set " + "an appropriate Transfer Syntax UID in " + f"'{type(ds).__name__}.file_meta'" + ) + + unpack_fmt = f"{'><'[is_little_endian]}{nr_entries}H" + unc_data = unpack_from(unpack_fmt, cast(bytes, item.LUTData)) + else: + unc_data = cast(list[int], item.LUTData) + + lut_data: np.ndarray = np.asarray(unc_data, dtype=dtype) + + # IVs < `first_map` get set to first LUT entry (i.e. index 0) + clipped_iv = np.zeros(arr.shape, dtype=dtype) + # IVs >= `first_map` are mapped by the VOI LUT + # `first_map` may be negative, positive or 0 + mapped_pixels = arr >= first_map + clipped_iv[mapped_pixels] = arr[mapped_pixels] - first_map + # IVs > number of entries get set to last entry + np.clip(clipped_iv, 0, nr_entries - 1, out=clipped_iv) + + return cast("np.ndarray", lut_data[clipped_iv]) + + +def apply_windowing(arr: "np.ndarray", ds: "Dataset", index: int = 0) -> "np.ndarray": + """Apply a windowing operation to `arr`. + + .. versionadded:: 2.1 + + Parameters + ---------- + arr : numpy.ndarray + The :class:`~numpy.ndarray` to apply the windowing operation to. + ds : dataset.Dataset + A dataset containing a :dcm:`VOI LUT Module`. + If (0028,1050) *Window Center* and (0028,1051) *Window Width* are + present then returns an array of ``np.float64``, otherwise `arr` will + be returned unchanged. + index : int, optional + When the VOI LUT Module contains multiple alternative views, this is + the index of the view to return (default ``0``). + + Returns + ------- + numpy.ndarray + An array with applied windowing operation. + + Notes + ----- + When the dataset requires a modality LUT or rescale operation as part of + the Modality LUT module then that must be applied before any windowing + operation. + + See Also + -------- + :func:`~pydicom.pixels.processing.apply_modality_lut` + :func:`~pydicom.pixels.processing.apply_voi` + + References + ---------- + * DICOM Standard, Part 3, :dcm:`Annex C.11.2 + ` + * DICOM Standard, Part 3, :dcm:`Annex C.8.11.3.1.5 + ` + * DICOM Standard, Part 4, :dcm:`Annex N.2.1.1 + ` + """ + if "WindowWidth" not in ds and "WindowCenter" not in ds: + return arr + + if ds.PhotometricInterpretation not in ["MONOCHROME1", "MONOCHROME2"]: + raise ValueError( + "When performing a windowing operation only 'MONOCHROME1' and " + "'MONOCHROME2' are allowed for (0028,0004) Photometric " + "Interpretation" + ) + + # May be LINEAR (default), LINEAR_EXACT, SIGMOID or not present, VM 1 + voi_func = cast(str, getattr(ds, "VOILUTFunction", "LINEAR")).upper() + # VR DS, VM 1-n + elem = ds["WindowCenter"] + center = cast(list[float], elem.value)[index] if elem.VM > 1 else elem.value + center = cast(float, center) + elem = ds["WindowWidth"] + width = cast(list[float], elem.value)[index] if elem.VM > 1 else elem.value + width = cast(float, width) + + # The output range depends on whether or not a modality LUT or rescale + # operation has been applied + ds.BitsStored = cast(int, ds.BitsStored) + y_min: float + y_max: float + if ds.get("ModalityLUTSequence"): + # Unsigned - see PS3.3 C.11.1.1.1 + y_min = 0 + item = cast(list["Dataset"], ds.ModalityLUTSequence)[0] + bit_depth = cast(list[int], item.LUTDescriptor)[2] + y_max = 2**bit_depth - 1 + elif ds.PixelRepresentation == 0: + # Unsigned + y_min = 0 + y_max = 2**ds.BitsStored - 1 + else: + # Signed + y_min = -(2 ** (ds.BitsStored - 1)) + y_max = 2 ** (ds.BitsStored - 1) - 1 + + slope = ds.get("RescaleSlope", None) + intercept = ds.get("RescaleIntercept", None) + if slope is not None and intercept is not None: + ds.RescaleSlope = cast(float, ds.RescaleSlope) + ds.RescaleIntercept = cast(float, ds.RescaleIntercept) + # Otherwise its the actual data range + y_min = y_min * ds.RescaleSlope + ds.RescaleIntercept + y_max = y_max * ds.RescaleSlope + ds.RescaleIntercept + + y_range = y_max - y_min + arr = arr.astype("float64") + + if voi_func in ["LINEAR", "LINEAR_EXACT"]: + # PS3.3 C.11.2.1.2.1 and C.11.2.1.3.2 + if voi_func == "LINEAR": + if width < 1: + raise ValueError( + "The (0028,1051) Window Width must be greater than or " + "equal to 1 for a 'LINEAR' windowing operation" + ) + center -= 0.5 + width -= 1 + elif width <= 0: + raise ValueError( + "The (0028,1051) Window Width must be greater than 0 " + "for a 'LINEAR_EXACT' windowing operation" + ) + + below = arr <= (center - width / 2) + above = arr > (center + width / 2) + between = np.logical_and(~below, ~above) + + arr[below] = y_min + arr[above] = y_max + if between.any(): + arr[between] = ((arr[between] - center) / width + 0.5) * y_range + y_min + elif voi_func == "SIGMOID": + # PS3.3 C.11.2.1.3.1 + if width <= 0: + raise ValueError( + "The (0028,1051) Window Width must be greater than 0 " + "for a 'SIGMOID' windowing operation" + ) + + arr = y_range / (1 + np.exp(-4 * (arr - center) / width)) + y_min + else: + raise ValueError(f"Unsupported (0028,1056) VOI LUT Function value '{voi_func}'") + + return arr + + +def convert_color_space( + arr: "np.ndarray", current: str, desired: str, per_frame: bool = False +) -> "np.ndarray": + """Convert the image(s) in `arr` from one color space to another. + + .. versionchanged:: 2.2 + + Added `per_frame` keyword parameter. + + Parameters + ---------- + arr : numpy.ndarray + The image(s) as a :class:`numpy.ndarray` with + :attr:`~numpy.ndarray.shape` (frames, rows, columns, 3) + or (rows, columns, 3). + current : str + The current color space, should be a valid value for (0028,0004) + *Photometric Interpretation*. One of ``'RGB'``, ``'YBR_FULL'``, + ``'YBR_FULL_422'``. + desired : str + The desired color space, should be a valid value for (0028,0004) + *Photometric Interpretation*. One of ``'RGB'``, ``'YBR_FULL'``, + ``'YBR_FULL_422'``. + per_frame : bool, optional + If ``True`` and the input array contains multiple frames then process + each frame individually to reduce memory usage. Default ``False``. + + Returns + ------- + numpy.ndarray + The image(s) converted to the desired color space. + + References + ---------- + + * DICOM Standard, Part 3, + :dcm:`Annex C.7.6.3.1.2` + * ISO/IEC 10918-5:2012 (`ITU T.871 + `_), + Section 7 + """ + + def _no_change(arr: "np.ndarray") -> "np.ndarray": + return arr + + _converters = { + "YBR_FULL_422": { + "YBR_FULL_422": _no_change, + "YBR_FULL": _no_change, + "RGB": _convert_YBR_FULL_to_RGB, + }, + "YBR_FULL": { + "YBR_FULL": _no_change, + "YBR_FULL_422": _no_change, + "RGB": _convert_YBR_FULL_to_RGB, + }, + "RGB": { + "RGB": _no_change, + "YBR_FULL": _convert_RGB_to_YBR_FULL, + "YBR_FULL_422": _convert_RGB_to_YBR_FULL, + }, + } + try: + converter = _converters[current][desired] + except KeyError: + raise NotImplementedError( + f"Conversion from {current} to {desired} is not supported." + ) + + if len(arr.shape) == 4 and per_frame: + for idx, frame in enumerate(arr): + arr[idx] = converter(frame) + + return arr + + return converter(arr) + + +def _convert_RGB_to_YBR_FULL(arr: "np.ndarray") -> "np.ndarray": + """Return an ndarray converted from RGB to YBR_FULL color space. + + Parameters + ---------- + arr : numpy.ndarray + An ndarray of an 8-bit per channel images in RGB color space. + + Returns + ------- + numpy.ndarray + The array in YBR_FULL color space. + + References + ---------- + + * DICOM Standard, Part 3, + :dcm:`Annex C.7.6.3.1.2` + * ISO/IEC 10918-5:2012 (`ITU T.871 + `_), + Section 7 + """ + orig_dtype = arr.dtype + + rgb_to_ybr = np.asarray( + [ + [+0.299, -0.299 / 1.772, +0.701 / 1.402], + [+0.587, -0.587 / 1.772, -0.587 / 1.402], + [+0.114, +0.886 / 1.772, -0.114 / 1.402], + ], + dtype=np.float32, + ) + + arr = np.matmul(arr, rgb_to_ybr, dtype=np.float32) + arr += [0.5, 128.5, 128.5] + # Round(x) -> floor of (arr + 0.5) : 0.5 added in previous step + np.floor(arr, out=arr) + # Max(0, arr) -> 0 if 0 >= arr, arr otherwise + # Min(arr, 255) -> arr if arr <= 255, 255 otherwise + np.clip(arr, 0, 255, out=arr) + + return arr.astype(orig_dtype) + + +def _convert_YBR_FULL_to_RGB(arr: "np.ndarray") -> "np.ndarray": + """Return an ndarray converted from YBR_FULL to RGB color space. + + Parameters + ---------- + arr : numpy.ndarray + An ndarray of an 8-bit per channel images in YBR_FULL color space. + + Returns + ------- + numpy.ndarray + The array in RGB color space. + + References + ---------- + + * DICOM Standard, Part 3, + :dcm:`Annex C.7.6.3.1.2` + * ISO/IEC 10918-5:2012, Section 7 + """ + orig_dtype = arr.dtype + + ybr_to_rgb = np.asarray( + [ + [1.000, 1.000, 1.000], + [0.000, -0.114 * 1.772 / 0.587, 1.772], + [1.402, -0.299 * 1.402 / 0.587, 0.000], + ], + dtype=np.float32, + ) + + arr = arr.astype(np.float32) + arr -= [0, 128, 128] + + # Round(x) -> floor of (arr + 0.5) + np.matmul(arr, ybr_to_rgb, out=arr) + arr += 0.5 + np.floor(arr, out=arr) + # Max(0, arr) -> 0 if 0 >= arr, arr otherwise + # Min(arr, 255) -> arr if arr <= 255, 255 otherwise + np.clip(arr, 0, 255, out=arr) + + return arr.astype(orig_dtype) + + +def _expand_segmented_lut( + data: tuple[int, ...], + fmt: str, + nr_segments: int | None = None, + last_value: int | None = None, +) -> list[int]: + """Return a list containing the expanded lookup table data. + + Parameters + ---------- + data : tuple of int + The decoded segmented palette lookup table data. May be padded by a + trailing null. + fmt : str + The format of the data, should contain `'B'` for 8-bit, `'H'` for + 16-bit, `'<'` for little endian and `'>'` for big endian. + nr_segments : int, optional + Expand at most `nr_segments` from the data. Should be used when + the opcode is ``2`` (indirect). If used then `last_value` should also + be used. + last_value : int, optional + The previous value in the expanded lookup table. Should be used when + the opcode is ``2`` (indirect). If used then `nr_segments` should also + be used. + + Returns + ------- + list of int + The reconstructed lookup table data. + + References + ---------- + + * DICOM Standard, Part 3, Annex C.7.9 + """ + # Indirect segment byte offset is dependent on endianness for 8-bit + # Little endian: e.g. 0x0302 0x0100, big endian, e.g. 0x0203 0x0001 + indirect_ii = [3, 2, 1, 0] if "<" in fmt else [2, 3, 0, 1] + + lut: list[int] = [] + offset = 0 + segments_read = 0 + # Use `offset + 1` to account for possible trailing null + # can do this because all segment types are longer than 2 + while offset + 1 < len(data): + opcode = data[offset] + length = data[offset + 1] + offset += 2 + + if opcode == 0: + # C.7.9.2.1: Discrete segment + lut.extend(data[offset : offset + length]) + offset += length + elif opcode == 1: + # C.7.9.2.2: Linear segment + if lut: + y0 = lut[-1] + elif last_value: + # Indirect segment with linear segment at 0th offset + y0 = last_value + else: + raise ValueError( + "Error expanding a segmented palette color lookup table: " + "the first segment cannot be a linear segment" + ) + + y1 = data[offset] + offset += 1 + + if y0 == y1: + lut.extend([y1] * length) + else: + step = (y1 - y0) / length + vals = np.around(np.linspace(y0 + step, y1, length)) + lut.extend([int(vv) for vv in vals]) + elif opcode == 2: + # C.7.9.2.3: Indirect segment + if not lut: + raise ValueError( + "Error expanding a segmented palette color lookup table: " + "the first segment cannot be an indirect segment" + ) + + if "B" in fmt: + # 8-bit segment entries + ii = [data[offset + vv] for vv in indirect_ii] + byte_offset = (ii[0] << 8 | ii[1]) << 16 | (ii[2] << 8 | ii[3]) + offset += 4 + else: + # 16-bit segment entries + byte_offset = data[offset + 1] << 16 | data[offset] + offset += 2 + + lut.extend(_expand_segmented_lut(data[byte_offset:], fmt, length, lut[-1])) + else: + raise ValueError( + "Error expanding a segmented palette lookup table: " + f"unknown segment type '{opcode}'" + ) + + segments_read += 1 + if segments_read == nr_segments: + return lut + + return lut diff --git a/src/pydicom/pixels/utils.py b/src/pydicom/pixels/utils.py index f4c04134f3..939b03631d 100644 --- a/src/pydicom/pixels/utils.py +++ b/src/pydicom/pixels/utils.py @@ -1,13 +1,14 @@ # Copyright 2008-2024 pydicom authors. See LICENSE file for details. """Utilities for pixel data handling.""" -from collections.abc import Iterable, Iterator +from collections.abc import Iterable, Iterator, ByteString import importlib import logging from pathlib import Path from os import PathLike from struct import unpack, Struct -from typing import BinaryIO, Any, cast +from sys import byteorder +from typing import BinaryIO, Any, cast, TYPE_CHECKING try: import numpy as np @@ -17,17 +18,15 @@ HAVE_NP = False from pydicom.charset import default_encoding -from pydicom.dataset import Dataset from pydicom._dicom_dict import DicomDictionary -from pydicom.filereader import ( - read_preamble, - _read_file_meta_info, - read_dataset, - _at_pixel_data, -) + +from pydicom.misc import warn_and_log from pydicom.tag import BaseTag from pydicom.uid import UID +if TYPE_CHECKING: # pragma: no cover + from pydicom.dataset import Dataset + LOGGER = logging.getLogger(__name__) @@ -54,6 +53,10 @@ (0x7FE0, 0x0009): "DoubleFloatPixelData", (0x7FE0, 0x0010): "PixelData", } +# Lookup table for unpacking bit-packed data +_UNPACK_LUT: dict[int, bytes] = { + k: bytes(int(s) for s in reversed(f"{k:08b}")) for k in range(256) +} # JPEG/JPEG-LS SOF markers _SOF = { @@ -79,7 +82,7 @@ def _array_common( f: BinaryIO, specific_tags: list[BaseTag | int], **kwargs: Any -) -> tuple[Dataset, dict[str, Any]]: +) -> tuple["Dataset", dict[str, Any]]: """Return a dataset from `f` and a corresponding decoding options dict. Parameters @@ -102,6 +105,12 @@ def _array_common( * The required and optional arguments for the pixel data decoding functions. """ + from pydicom.filereader import ( + read_preamble, + _read_file_meta_info, + read_dataset, + _at_pixel_data, + ) # Read preamble (if present) read_preamble(f, force=True) @@ -161,7 +170,7 @@ def _array_common( return ds, opts -def as_pixel_options(ds: Dataset, **kwargs: Any) -> dict[str, Any]: +def as_pixel_options(ds: "Dataset", **kwargs: Any) -> dict[str, Any]: """Return a dict containing the image pixel element values from `ds`. Parameters @@ -214,6 +223,217 @@ def as_pixel_options(ds: Dataset, **kwargs: Any) -> dict[str, Any]: return opts +def expand_ybr422(src: ByteString, bits_allocated: int) -> bytes: + """Return ``YBR_FULL_422`` data expanded to ``YBR_FULL``. + + Uncompressed datasets with a (0028,0004) *Photometric Interpretation* of + ``"YBR_FULL_422"`` are subsampled in the horizontal direction by halving + the number of Cb and Cr pixels (i.e. there are two Y pixels for every Cb + and Cr pixel). This function expands the ``YBR_FULL_422`` data to remove + the subsampling and the output is therefore ``YBR_FULL``. + + Parameters + ---------- + src : bytes or bytearray + The YBR_FULL_422 pixel data to be expanded. + bits_allocated : int + The number of bits used to store each pixel, as given by (0028,0100) + *Bits Allocated*. + + Returns + ------- + bytes + The expanded data (as YBR_FULL). + """ + # YBR_FULL_422 is Y Y Cb Cr (i.e. 2 Y pixels for every Cb and Cr pixel) + n_bytes = bits_allocated // 8 + length = len(src) // 2 * 3 + dst = bytearray(length) + + step_src = n_bytes * 4 + step_dst = n_bytes * 6 + for ii in range(n_bytes): + c_b = src[2 * n_bytes + ii :: step_src] + c_r = src[3 * n_bytes + ii :: step_src] + + dst[0 * n_bytes + ii :: step_dst] = src[0 * n_bytes + ii :: step_src] + dst[1 * n_bytes + ii :: step_dst] = c_b + dst[2 * n_bytes + ii :: step_dst] = c_r + + dst[3 * n_bytes + ii :: step_dst] = src[1 * n_bytes + ii :: step_src] + dst[4 * n_bytes + ii :: step_dst] = c_b + dst[5 * n_bytes + ii :: step_dst] = c_r + + return bytes(dst) + + +def get_expected_length(ds: "Dataset", unit: str = "bytes") -> int: + """Return the expected length (in terms of bytes or pixels) of the *Pixel + Data*. + + +------------------------------------------------+-------------+ + | Element | Required or | + +-------------+---------------------------+------+ optional | + | Tag | Keyword | Type | | + +=============+===========================+======+=============+ + | (0028,0002) | SamplesPerPixel | 1 | Required | + +-------------+---------------------------+------+-------------+ + | (0028,0004) | PhotometricInterpretation | 1 | Required | + +-------------+---------------------------+------+-------------+ + | (0028,0008) | NumberOfFrames | 1C | Optional | + +-------------+---------------------------+------+-------------+ + | (0028,0010) | Rows | 1 | Required | + +-------------+---------------------------+------+-------------+ + | (0028,0011) | Columns | 1 | Required | + +-------------+---------------------------+------+-------------+ + | (0028,0100) | BitsAllocated | 1 | Required | + +-------------+---------------------------+------+-------------+ + + Parameters + ---------- + ds : Dataset + The :class:`~pydicom.dataset.Dataset` containing the Image Pixel module + and *Pixel Data*. + unit : str, optional + If ``'bytes'`` then returns the expected length of the *Pixel Data* in + whole bytes and NOT including an odd length trailing NULL padding + byte. If ``'pixels'`` then returns the expected length of the *Pixel + Data* in terms of the total number of pixels (default ``'bytes'``). + + Returns + ------- + int + The expected length of the *Pixel Data* in either whole bytes or + pixels, excluding the NULL trailing padding byte for odd length data. + """ + rows = cast(int, ds.Rows) + columns = cast(int, ds.Columns) + samples_per_pixel = cast(int, ds.SamplesPerPixel) + bits_allocated = cast(int, ds.BitsAllocated) + + length = rows * columns * samples_per_pixel + length *= get_nr_frames(ds) + + if unit == "pixels": + return length + + # Correct for the number of bytes per pixel + if bits_allocated == 1: + # Determine the nearest whole number of bytes needed to contain + # 1-bit pixel data. e.g. 10 x 10 1-bit pixels is 100 bits, which + # are packed into 12.5 -> 13 bytes + length = length // 8 + (length % 8 > 0) + else: + length *= bits_allocated // 8 + + # DICOM Standard, Part 4, Annex C.7.6.3.1.2 + if ds.PhotometricInterpretation == "YBR_FULL_422": + length = length // 3 * 2 + + return length + + +def get_image_pixel_ids(ds: "Dataset") -> dict[str, int]: + """Return a dict of the pixel data affecting element's :func:`id` values. + + +------------------------------------------------+ + | Element | + +-------------+---------------------------+------+ + | Tag | Keyword | Type | + +=============+===========================+======+ + | (0028,0002) | SamplesPerPixel | 1 | + +-------------+---------------------------+------+ + | (0028,0004) | PhotometricInterpretation | 1 | + +-------------+---------------------------+------+ + | (0028,0006) | PlanarConfiguration | 1C | + +-------------+---------------------------+------+ + | (0028,0008) | NumberOfFrames | 1C | + +-------------+---------------------------+------+ + | (0028,0010) | Rows | 1 | + +-------------+---------------------------+------+ + | (0028,0011) | Columns | 1 | + +-------------+---------------------------+------+ + | (0028,0100) | BitsAllocated | 1 | + +-------------+---------------------------+------+ + | (0028,0101) | BitsStored | 1 | + +-------------+---------------------------+------+ + | (0028,0103) | PixelRepresentation | 1 | + +-------------+---------------------------+------+ + | (7FE0,0008) | FloatPixelData | 1C | + +-------------+---------------------------+------+ + | (7FE0,0009) | DoubleFloatPixelData | 1C | + +-------------+---------------------------+------+ + | (7FE0,0010) | PixelData | 1C | + +-------------+---------------------------+------+ + + Parameters + ---------- + ds : Dataset + The :class:`~pydicom.dataset.Dataset` containing the pixel data. + + Returns + ------- + dict + A dict containing the :func:`id` values for the elements that affect + the pixel data. + + """ + keywords = [ + "SamplesPerPixel", + "PhotometricInterpretation", + "PlanarConfiguration", + "NumberOfFrames", + "Rows", + "Columns", + "BitsAllocated", + "BitsStored", + "PixelRepresentation", + "FloatPixelData", + "DoubleFloatPixelData", + "PixelData", + ] + + return {kw: id(getattr(ds, kw, None)) for kw in keywords} + + +def get_j2k_parameters(codestream: bytes) -> dict[str, object]: + """Return a dict containing JPEG 2000 component parameters. + + .. versionadded:: 2.1 + + Parameters + ---------- + codestream : bytes + The JPEG 2000 (ISO/IEC 15444-1) codestream to be parsed. + + Returns + ------- + dict + A dict containing parameters for the first component sample in the + JPEG 2000 `codestream`, or an empty dict if unable to parse the data. + Available parameters are ``{"precision": int, "is_signed": bool}``. + """ + try: + # First 2 bytes must be the SOC marker - if not then wrong format + if codestream[0:2] != b"\xff\x4f": + return {} + + # SIZ is required to be the second marker - Figure A-3 in 15444-1 + if codestream[2:4] != b"\xff\x51": + return {} + + # See 15444-1 A.5.1 for format of the SIZ box and contents + ssiz = codestream[42] + if ssiz & 0x80: + return {"precision": (ssiz & 0x7F) + 1, "is_signed": True} + + return {"precision": ssiz + 1, "is_signed": False} + except (IndexError, TypeError): + pass + + return {} + + def _get_jpg_parameters(src: bytes) -> dict[str, Any]: """Return a dict containing JPEG or JPEG-LS encoding parameters. @@ -317,10 +537,41 @@ def _get_jpg_parameters(src: bytes) -> dict[str, Any]: return info +def get_nr_frames(ds: "Dataset", warn: bool = True) -> int: + """Return NumberOfFrames or 1 if NumberOfFrames is None or 0. + + Parameters + ---------- + ds : dataset.Dataset + The :class:`~pydicom.dataset.Dataset` containing the Image Pixel module + corresponding to the data in `arr`. + warn : bool + If ``True`` (the default), a warning is issued if NumberOfFrames + has an invalid value. + + Returns + ------- + int + An integer for the NumberOfFrames or 1 if NumberOfFrames is None or 0 + """ + nr_frames: int | None = getattr(ds, "NumberOfFrames", 1) + # 'NumberOfFrames' may exist in the DICOM file but have value equal to None + if not nr_frames: # None or 0 + if warn: + warn_and_log( + f"A value of {nr_frames} for (0028,0008) 'Number of Frames' is " + "non-conformant. It's recommended that this value be " + "changed to 1" + ) + nr_frames = 1 + + return nr_frames + + def iter_pixels( src: str | PathLike[str] | BinaryIO, *, - ds_out: Dataset | None = None, + ds_out: "Dataset | None" = None, specific_tags: list[BaseTag | int] | None = None, indices: Iterable[int] | None = None, raw: bool = False, @@ -417,6 +668,7 @@ def iter_pixels( native transfer syntaxes with ``view_only=True`` a read-only :class:`~numpy.ndarray` will be yielded. """ + from pydicom.dataset import Dataset from pydicom.pixels import get_decoder f: BinaryIO @@ -458,6 +710,72 @@ def iter_pixels( f.seek(file_offset) +def pack_bits(arr: "np.ndarray", pad: bool = True) -> bytes: + """Pack a binary :class:`numpy.ndarray` for use with *Pixel Data*. + + Should be used in conjunction with (0028,0100) *Bits Allocated* = 1. + + .. versionchanged:: 2.1 + + Added the `pad` keyword parameter and changed to allow `arr` to be + 2 or 3D. + + Parameters + ---------- + arr : numpy.ndarray + The :class:`numpy.ndarray` containing 1-bit data as ints. `arr` must + only contain integer values of 0 and 1 and must have an 'uint' or + 'int' :class:`numpy.dtype`. For the sake of efficiency it's recommended + that the length of `arr` be a multiple of 8 (i.e. that any empty + bit-padding to round out the byte has already been added). The input + `arr` should either be shaped as (rows, columns) or (frames, rows, + columns) or the equivalent 1D array used to ensure that the packed + data is in the correct order. + pad : bool, optional + If ``True`` (default) then add a null byte to the end of the packed + data to ensure even length, otherwise no padding will be added. + + Returns + ------- + bytes + The bit packed data. + + Raises + ------ + ValueError + If `arr` contains anything other than 0 or 1. + + References + ---------- + DICOM Standard, Part 5, + :dcm:`Section 8.1.1` and + :dcm:`Annex D` + """ + if arr.shape == (0,): + return b"" + + # Test array + if not np.array_equal(arr, arr.astype(bool)): + raise ValueError( + "Only binary arrays (containing ones or zeroes) can be packed." + ) + + if len(arr.shape) > 1: + arr = arr.ravel() + + # The array length must be a multiple of 8, pad the end + if arr.shape[0] % 8: + arr = np.append(arr, np.zeros(8 - arr.shape[0] % 8)) + + arr = np.packbits(arr.astype("u1"), bitorder="little") + + packed: bytes = arr.tobytes() + if pad: + return packed + b"\x00" if len(packed) % 2 else packed + + return packed + + def _passes_version_check(package_name: str, minimum_version: tuple[int, ...]) -> bool: """Return True if `package_name` is available and its version is greater or equal to `minimum_version` @@ -474,7 +792,7 @@ def _passes_version_check(package_name: str, minimum_version: tuple[int, ...]) - def pixel_array( src: str | PathLike[str] | BinaryIO, *, - ds_out: Dataset | None = None, + ds_out: "Dataset | None" = None, specific_tags: list[int] | None = None, index: int | None = None, raw: bool = False, @@ -573,6 +891,7 @@ def pixel_array( native transfer syntaxes with ``view_only=True`` a read-only :class:`~numpy.ndarray` will be returned. """ + from pydicom.dataset import Dataset from pydicom.pixels import get_decoder f: BinaryIO @@ -614,3 +933,308 @@ def pixel_array( ds_out._dict.update(ds._dict) return arr + + +def pixel_dtype(ds: "Dataset", as_float: bool = False) -> "np.dtype": + """Return a :class:`numpy.dtype` for the pixel data in `ds`. + + Suitable for use with IODs containing the Image Pixel module (with + ``as_float=False``) and the Floating Point Image Pixel and Double Floating + Point Image Pixel modules (with ``as_float=True``). + + +------------------------------------------+------------------+ + | Element | Supported | + +-------------+---------------------+------+ values | + | Tag | Keyword | Type | | + +=============+=====================+======+==================+ + | (0028,0101) | BitsAllocated | 1 | 1, 8, 16, 32, 64 | + +-------------+---------------------+------+------------------+ + | (0028,0103) | PixelRepresentation | 1 | 0, 1 | + +-------------+---------------------+------+------------------+ + + Parameters + ---------- + ds : Dataset + The :class:`~pydicom.dataset.Dataset` containing the pixel data you + wish to get the data type for. + as_float : bool, optional + If ``True`` then return a float dtype, otherwise return an integer + dtype (default ``False``). Float dtypes are only supported when + (0028,0101) *Bits Allocated* is 32 or 64. + + Returns + ------- + numpy.dtype + A :class:`numpy.dtype` suitable for containing the pixel data. + + Raises + ------ + NotImplementedError + If the pixel data is of a type that isn't supported by either numpy + or *pydicom*. + """ + if not HAVE_NP: + raise ImportError("Numpy is required to determine the dtype.") + + # Prefer Transfer Syntax UID, fall back to the original encoding + if hasattr(ds, "file_meta"): + is_little_endian = ds.file_meta._tsyntax_encoding[1] + else: + is_little_endian = ds.original_encoding[1] + + if is_little_endian is None: + raise AttributeError( + "Unable to determine the endianness of the dataset, please set " + "an appropriate Transfer Syntax UID in " + f"'{type(ds).__name__}.file_meta'" + ) + + if not as_float: + # (0028,0103) Pixel Representation, US, 1 + # Data representation of the pixel samples + # 0x0000 - unsigned int + # 0x0001 - 2's complement (signed int) + pixel_repr = cast(int, ds.PixelRepresentation) + if pixel_repr == 0: + dtype_str = "uint" + elif pixel_repr == 1: + dtype_str = "int" + else: + raise ValueError( + "Unable to determine the data type to use to contain the " + f"Pixel Data as a value of '{pixel_repr}' for '(0028,0103) " + "Pixel Representation' is invalid" + ) + else: + dtype_str = "float" + + # (0028,0100) Bits Allocated, US, 1 + # The number of bits allocated for each pixel sample + # PS3.5 8.1.1: Bits Allocated shall either be 1 or a multiple of 8 + # For bit packed data we use uint8 + bits_allocated = cast(int, ds.BitsAllocated) + if bits_allocated == 1: + dtype_str = "uint8" + elif bits_allocated > 0 and bits_allocated % 8 == 0: + dtype_str += str(bits_allocated) + else: + raise ValueError( + "Unable to determine the data type to use to contain the " + f"Pixel Data as a value of '{bits_allocated}' for '(0028,0100) " + "Bits Allocated' is invalid" + ) + + # Check to see if the dtype is valid for numpy + try: + dtype = np.dtype(dtype_str) + except TypeError: + raise NotImplementedError( + f"The data type '{dtype_str}' needed to contain the Pixel Data " + "is not supported by numpy" + ) + + # Correct for endianness of the system vs endianness of the dataset + if is_little_endian != (byteorder == "little"): + # 'S' swap from current to opposite + dtype = dtype.newbyteorder("S") + + return dtype + + +def reshape_pixel_array(ds: "Dataset", arr: "np.ndarray") -> "np.ndarray": + """Return a reshaped :class:`numpy.ndarray` `arr`. + + +------------------------------------------+-----------+----------+ + | Element | Supported | | + +-------------+---------------------+------+ values | | + | Tag | Keyword | Type | | | + +=============+=====================+======+===========+==========+ + | (0028,0002) | SamplesPerPixel | 1 | N > 0 | Required | + +-------------+---------------------+------+-----------+----------+ + | (0028,0006) | PlanarConfiguration | 1C | 0, 1 | Optional | + +-------------+---------------------+------+-----------+----------+ + | (0028,0008) | NumberOfFrames | 1C | N > 0 | Optional | + +-------------+---------------------+------+-----------+----------+ + | (0028,0010) | Rows | 1 | N > 0 | Required | + +-------------+---------------------+------+-----------+----------+ + | (0028,0011) | Columns | 1 | N > 0 | Required | + +-------------+---------------------+------+-----------+----------+ + + (0028,0008) *Number of Frames* is required when *Pixel Data* contains + more than 1 frame. (0028,0006) *Planar Configuration* is required when + (0028,0002) *Samples per Pixel* is greater than 1. For certain + compressed transfer syntaxes it is always taken to be either 0 or 1 as + shown in the table below. + + +---------------------------------------------+-----------------------+ + | Transfer Syntax | Planar Configuration | + +------------------------+--------------------+ | + | UID | Name | | + +========================+====================+=======================+ + | 1.2.840.10008.1.2.4.50 | JPEG Baseline | 0 | + +------------------------+--------------------+-----------------------+ + | 1.2.840.10008.1.2.4.57 | JPEG Lossless, | 0 | + | | Non-hierarchical | | + +------------------------+--------------------+-----------------------+ + | 1.2.840.10008.1.2.4.70 | JPEG Lossless, | 0 | + | | Non-hierarchical, | | + | | SV1 | | + +------------------------+--------------------+-----------------------+ + | 1.2.840.10008.1.2.4.80 | JPEG-LS Lossless | 0 | + +------------------------+--------------------+-----------------------+ + | 1.2.840.10008.1.2.4.81 | JPEG-LS Lossy | 0 | + +------------------------+--------------------+-----------------------+ + | 1.2.840.10008.1.2.4.90 | JPEG 2000 Lossless | 0 | + +------------------------+--------------------+-----------------------+ + | 1.2.840.10008.1.2.4.91 | JPEG 2000 Lossy | 0 | + +------------------------+--------------------+-----------------------+ + | 1.2.840.10008.1.2.5 | RLE Lossless | 1 | + +------------------------+--------------------+-----------------------+ + + .. versionchanged:: 2.1 + + JPEG-LS transfer syntaxes changed to *Planar Configuration* of 0 + + Parameters + ---------- + ds : dataset.Dataset + The :class:`~pydicom.dataset.Dataset` containing the Image Pixel module + corresponding to the data in `arr`. + arr : numpy.ndarray + The 1D array containing the pixel data. + + Returns + ------- + numpy.ndarray + A reshaped array containing the pixel data. The shape of the array + depends on the contents of the dataset: + + * For single frame, single sample data (rows, columns) + * For single frame, multi-sample data (rows, columns, planes) + * For multi-frame, single sample data (frames, rows, columns) + * For multi-frame, multi-sample data (frames, rows, columns, planes) + + References + ---------- + + * DICOM Standard, Part 3, + :dcm:`Annex C.7.6.3.1` + * DICOM Standard, Part 5, :dcm:`Section 8.2` + """ + if not HAVE_NP: + raise ImportError("Numpy is required to reshape the pixel array.") + + nr_frames = get_nr_frames(ds) + nr_samples = cast(int, ds.SamplesPerPixel) + + if nr_samples < 1: + raise ValueError( + f"Unable to reshape the pixel array as a value of {nr_samples} " + "for (0028,0002) 'Samples per Pixel' is invalid." + ) + + # Valid values for Planar Configuration are dependent on transfer syntax + if nr_samples > 1: + transfer_syntax = ds.file_meta.TransferSyntaxUID + if transfer_syntax in [ + "1.2.840.10008.1.2.4.50", + "1.2.840.10008.1.2.4.57", + "1.2.840.10008.1.2.4.70", + "1.2.840.10008.1.2.4.80", + "1.2.840.10008.1.2.4.81", + "1.2.840.10008.1.2.4.90", + "1.2.840.10008.1.2.4.91", + ]: + planar_configuration = 0 + elif transfer_syntax in ["1.2.840.10008.1.2.5"]: + planar_configuration = 1 + else: + planar_configuration = ds.PlanarConfiguration + + if planar_configuration not in [0, 1]: + raise ValueError( + "Unable to reshape the pixel array as a value of " + f"{planar_configuration} for (0028,0006) 'Planar " + "Configuration' is invalid." + ) + + rows = cast(int, ds.Rows) + columns = cast(int, ds.Columns) + if nr_frames > 1: + # Multi-frame + if nr_samples == 1: + # Single plane + arr = arr.reshape(nr_frames, rows, columns) + else: + # Multiple planes, usually 3 + if planar_configuration == 0: + arr = arr.reshape(nr_frames, rows, columns, nr_samples) + else: + arr = arr.reshape(nr_frames, nr_samples, rows, columns) + arr = arr.transpose(0, 2, 3, 1) + else: + # Single frame + if nr_samples == 1: + # Single plane + arr = arr.reshape(rows, columns) + else: + # Multiple planes, usually 3 + if planar_configuration == 0: + arr = arr.reshape(rows, columns, nr_samples) + else: + arr = arr.reshape(nr_samples, rows, columns) + arr = arr.transpose(1, 2, 0) + + return arr + + +def unpack_bits(src: bytes, as_array: bool = True) -> "np.ndarray | bytes": + """Unpack the bit-packed data in `src`. + + Suitable for use when (0028,0011) *Bits Allocated* or (60xx,0100) *Overlay + Bits Allocated* is 1. + + If `NumPy `_ is available then it will be used to + unpack the data, otherwise only the standard library will be used, which + is about 20 times slower. + + .. versionchanged:: 2.3 + + Added the `as_array` keyword parameter, support for unpacking + without NumPy, and added :class:`bytes` as a possible return type + + Parameters + ---------- + src : bytes + The bit-packed data. + as_array : bool, optional + If ``False`` then return the unpacked data as :class:`bytes`, otherwise + return a :class:`numpy.ndarray` (default, requires NumPy). + + Returns + ------- + bytes or numpy.ndarray + The unpacked data as an :class:`numpy.ndarray` (if NumPy is available + and ``as_array == True``) or :class:`bytes` otherwise. + + Raises + ------ + ValueError + If `as_array` is ``True`` and NumPy is not available. + + References + ---------- + DICOM Standard, Part 5, + :dcm:`Section 8.1.1` and + :dcm:`Annex D` + """ + if HAVE_NP: + arr = np.frombuffer(src, dtype="u1") + arr = np.unpackbits(arr, bitorder="little") + + return arr if as_array else arr.tobytes() + + if as_array: + raise ValueError("unpack_bits() requires NumPy if 'as_array = True'") + + return b"".join(map(_UNPACK_LUT.__getitem__, src)) diff --git a/tests/pixels/test_decoder_base.py b/tests/pixels/test_decoder_base.py index 5250fe1f59..d5add26f43 100644 --- a/tests/pixels/test_decoder_base.py +++ b/tests/pixels/test_decoder_base.py @@ -10,10 +10,11 @@ from pydicom import config from pydicom.dataset import Dataset from pydicom.encaps import get_frame, generate_frames, encapsulate -from pydicom.pixels import get_decoder, ExplicitVRLittleEndianDecoder +from pydicom.pixels import get_decoder from pydicom.pixels.common import PhotometricInterpretation as PI +from pydicom.pixels.decoders import ExplicitVRLittleEndianDecoder from pydicom.pixels.decoders.base import DecodeRunner, Decoder -from pydicom.pixel_data_handlers.util import convert_color_space +from pydicom.pixels.processing import convert_color_space from pydicom.uid import ( ExplicitVRLittleEndian, diff --git a/tests/pixels/test_decoder_native.py b/tests/pixels/test_decoder_native.py index b4c2c548d8..eaa5b7e619 100644 --- a/tests/pixels/test_decoder_native.py +++ b/tests/pixels/test_decoder_native.py @@ -4,8 +4,8 @@ from pydicom import dcmread from pydicom.data import get_testdata_file -from pydicom.pixels import ( - get_decoder, +from pydicom.pixels import get_decoder +from pydicom.pixels.decoders import ( ImplicitVRLittleEndianDecoder, ExplicitVRLittleEndianDecoder, ExplicitVRBigEndianDecoder, diff --git a/tests/pixels/test_decoder_pylibjpeg.py b/tests/pixels/test_decoder_pylibjpeg.py index 539a398bdd..5f0e15a152 100644 --- a/tests/pixels/test_decoder_pylibjpeg.py +++ b/tests/pixels/test_decoder_pylibjpeg.py @@ -28,7 +28,7 @@ HTJ2K, RLELossless, ) -from pydicom.pixel_data_handlers.util import get_j2k_parameters +from pydicom.pixels.utils import get_j2k_parameters from .pixels_reference import ( PIXEL_REFERENCE, diff --git a/tests/pixels/test_decoder_rle.py b/tests/pixels/test_decoder_rle.py index 87e94ae0c9..13f0408474 100644 --- a/tests/pixels/test_decoder_rle.py +++ b/tests/pixels/test_decoder_rle.py @@ -7,7 +7,8 @@ from pydicom.config import debug from pydicom.encaps import get_frame, generate_frames, encapsulate -from pydicom.pixels import get_decoder, RLELosslessDecoder +from pydicom.pixels import get_decoder +from pydicom.pixels.decoders import RLELosslessDecoder from pydicom.pixels.decoders.rle import ( _rle_parse_header, _rle_decode_segment, diff --git a/tests/pixels/test_encoder_base.py b/tests/pixels/test_encoder_base.py index 2ad67f38d9..fbbaea81b4 100644 --- a/tests/pixels/test_encoder_base.py +++ b/tests/pixels/test_encoder_base.py @@ -15,10 +15,10 @@ from pydicom import config, examples from pydicom.data import get_testdata_file from pydicom.dataset import Dataset -from pydicom.pixels import RLELosslessEncoder +from pydicom.pixels.encoders import RLELosslessEncoder from pydicom.pixels.common import PhotometricInterpretation as PI from pydicom.pixels.encoders.base import Encoder, EncodeRunner -from pydicom.pixel_data_handlers.util import get_expected_length +from pydicom.pixels.utils import get_expected_length from pydicom.uid import ( UID, RLELossless, diff --git a/tests/pixels/test_encoder_gdcm.py b/tests/pixels/test_encoder_gdcm.py index 7f21cbde12..dda13fc890 100644 --- a/tests/pixels/test_encoder_gdcm.py +++ b/tests/pixels/test_encoder_gdcm.py @@ -4,11 +4,11 @@ from pydicom import dcmread from pydicom.data import get_testdata_file -from pydicom.pixels import RLELosslessEncoder +from pydicom.pixels.encoders import RLELosslessEncoder from pydicom.pixels.encoders.base import EncodeRunner from pydicom.pixels.encoders.gdcm import _rle_encode as gdcm_rle_encode -from pydicom.pixel_data_handlers.rle_handler import _rle_decode_frame -from pydicom.pixel_data_handlers.util import reshape_pixel_array +from pydicom.pixels.decoders.rle import _rle_decode_frame +from pydicom.pixels.utils import reshape_pixel_array from pydicom.uid import RLELossless, JPEG2000, ExplicitVRLittleEndian diff --git a/tests/pixels/test_encoder_pydicom.py b/tests/pixels/test_encoder_pydicom.py index cc7bb1d301..488ed91d0f 100644 --- a/tests/pixels/test_encoder_pydicom.py +++ b/tests/pixels/test_encoder_pydicom.py @@ -15,14 +15,11 @@ from pydicom.data import get_testdata_file from pydicom.dataset import FileMetaDataset from pydicom.encaps import get_frame -from pydicom.pixels import RLELosslessEncoder +from pydicom.pixels.encoders import RLELosslessEncoder from pydicom.pixels.encoders.base import EncodeRunner from pydicom.pixels.encoders.native import _encode_frame, _encode_segment, _encode_row -from pydicom.pixel_data_handlers.rle_handler import ( - _rle_decode_frame, - _rle_decode_segment, -) -from pydicom.pixel_data_handlers.util import reshape_pixel_array +from pydicom.pixels.decoders.rle import _rle_decode_frame, _rle_decode_segment +from pydicom.pixels.utils import reshape_pixel_array from pydicom.uid import RLELossless diff --git a/tests/pixels/test_encoder_pyjpegls.py b/tests/pixels/test_encoder_pyjpegls.py index fa13a290b3..80b8381923 100644 --- a/tests/pixels/test_encoder_pyjpegls.py +++ b/tests/pixels/test_encoder_pyjpegls.py @@ -13,9 +13,11 @@ from pydicom import Dataset, examples from pydicom.encaps import encapsulate, get_frame -from pydicom.pixels import ( +from pydicom.pixels.encoders import ( JPEGLSLosslessEncoder, JPEGLSNearLosslessEncoder, +) +from pydicom.pixels.decoders import ( JPEGLSLosslessDecoder, JPEGLSNearLosslessDecoder, ) diff --git a/tests/pixels/test_encoder_pylibjpeg.py b/tests/pixels/test_encoder_pylibjpeg.py index 31499b5383..ab89a7ff7a 100644 --- a/tests/pixels/test_encoder_pylibjpeg.py +++ b/tests/pixels/test_encoder_pylibjpeg.py @@ -15,15 +15,17 @@ from pydicom import dcmread, examples from pydicom.data import get_testdata_file from pydicom.encaps import encapsulate, get_frame -from pydicom.pixels import ( +from pydicom.pixels.encoders import ( JPEG2000LosslessEncoder, JPEG2000Encoder, +) +from pydicom.pixels.decoders import ( JPEG2000LosslessDecoder, JPEG2000Decoder, ) from pydicom.pixels.encoders.pylibjpeg import is_available from pydicom.pixels.utils import as_pixel_options -from pydicom.pixel_data_handlers.util import get_expected_length, get_j2k_parameters +from pydicom.pixels.utils import get_expected_length, get_j2k_parameters from pydicom.uid import RLELossless, JPEG2000 diff --git a/tests/pixels/test_processing.py b/tests/pixels/test_processing.py new file mode 100644 index 0000000000..eecc4d5f00 --- /dev/null +++ b/tests/pixels/test_processing.py @@ -0,0 +1,1677 @@ +# Copyright 2008-2024 pydicom authors. See LICENSE file for details. +"""Tests for the pixels.processing module.""" + +import os +from struct import unpack, pack + +import pytest + +try: + import numpy as np + + HAVE_NP = True +except ImportError: + HAVE_NP = False + +from pydicom import dcmread, config +from pydicom.data import get_testdata_file, get_palette_files +from pydicom.dataset import Dataset, FileMetaDataset +from pydicom.pixels.processing import ( + convert_color_space, + apply_color_lut, + _expand_segmented_lut, + apply_modality_lut, + apply_voi_lut, + apply_voi, + apply_windowing, +) +from pydicom.uid import ExplicitVRLittleEndian, ImplicitVRLittleEndian + + +# PAL: PALETTE COLOR Photometric Interpretation +# SEG: Segmented Palette Color +# SUP: Supplemental Palette Color +# LE, BE: little endian, big endian encoding +# 8/8, 1 sample/pixel, 1 frame +PAL_08_256_0_16_1F = get_testdata_file("OBXXXX1A.dcm") +PAL_08_200_0_16_1F = get_testdata_file("OT-PAL-8-face.dcm") +# 8/8, 1 sample/pixel, 2 frame +PAL_08_256_0_16_2F = get_testdata_file("OBXXXX1A_2frame.dcm") +# PALETTE COLOR with 16-bit LUTs (no indirect segments) +PAL_SEG_LE_16_1F = get_testdata_file("gdcm-US-ALOKA-16.dcm") +PAL_SEG_BE_16_1F = get_testdata_file("gdcm-US-ALOKA-16_big.dcm") +# Supplemental palette colour + VOI windowing +SUP_16_16_2F = get_testdata_file("eCT_Supplemental.dcm") +# 8 bit, 3 samples/pixel, 1 and 2 frame datasets +# RGB colorspace, uncompressed +RGB_8_3_1F = get_testdata_file("SC_rgb.dcm") +RGB_8_3_2F = get_testdata_file("SC_rgb_2frame.dcm") +# MOD: Modality LUT +# SEQ: Modality LUT Sequence +MOD_16 = get_testdata_file("CT_small.dcm") +MOD_16_SEQ = get_testdata_file("mlut_18.dcm") +# VOI: VOI LUT Sequence +# WIN: Windowing operation +WIN_12_1F = get_testdata_file("MR-SIEMENS-DICOM-WithOverlays.dcm") +VOI_08_1F = get_testdata_file("vlut_04.dcm") + + +@pytest.mark.skipif(not HAVE_NP, reason="Numpy is not available") +class TestConvertColourSpace: + """Tests for convert_color_space().""" + + def test_unknown_current_raises(self): + """Test an unknown current color space raises exception.""" + with pytest.raises( + NotImplementedError, match="Conversion from TEST to RGB is not suppo" + ): + convert_color_space(None, "TEST", "RGB") + + def test_unknown_desired_raises(self): + """Test an unknown desdired color space raises exception.""" + with pytest.raises( + NotImplementedError, match="Conversion from RGB to TEST is not suppo" + ): + convert_color_space(None, "RGB", "TEST") + + @pytest.mark.parametrize( + "current, desired", + [ + ("RGB", "RGB"), + ("YBR_FULL", "YBR_FULL"), + ("YBR_FULL", "YBR_FULL_422"), + ("YBR_FULL_422", "YBR_FULL_422"), + ("YBR_FULL_422", "YBR_FULL"), + ], + ) + def test_current_is_desired(self, current, desired): + """Test that the array is unchanged when current matches desired.""" + arr = np.ones((2, 3)) + assert np.array_equal(arr, convert_color_space(arr, current, desired)) + + def test_rgb_ybr_rgb_single_frame(self): + """Test round trip conversion of single framed pixel data.""" + ds = dcmread(RGB_8_3_1F) + + arr = ds.pixel_array + assert (255, 0, 0) == tuple(arr[5, 50, :]) + assert (255, 128, 128) == tuple(arr[15, 50, :]) + assert (0, 255, 0) == tuple(arr[25, 50, :]) + assert (128, 255, 128) == tuple(arr[35, 50, :]) + assert (0, 0, 255) == tuple(arr[45, 50, :]) + assert (128, 128, 255) == tuple(arr[55, 50, :]) + assert (0, 0, 0) == tuple(arr[65, 50, :]) + assert (64, 64, 64) == tuple(arr[75, 50, :]) + assert (192, 192, 192) == tuple(arr[85, 50, :]) + assert (255, 255, 255) == tuple(arr[95, 50, :]) + + ybr = convert_color_space(arr, "RGB", "YBR_FULL") + assert (76, 85, 255) == tuple(ybr[5, 50, :]) + assert (166, 107, 192) == tuple(ybr[15, 50, :]) + assert (150, 44, 21) == tuple(ybr[25, 50, :]) + assert (203, 86, 75) == tuple(ybr[35, 50, :]) + assert (29, 255, 107) == tuple(ybr[45, 50, :]) + assert (142, 192, 118) == tuple(ybr[55, 50, :]) + assert (0, 128, 128) == tuple(ybr[65, 50, :]) + assert (64, 128, 128) == tuple(ybr[75, 50, :]) + assert (192, 128, 128) == tuple(ybr[85, 50, :]) + assert (255, 128, 128) == tuple(ybr[95, 50, :]) + + # Round trip -> rounding errors get compounded + rgb = convert_color_space(ybr, "YBR_FULL", "RGB") + # All pixels within +/- 1 units + assert np.allclose(rgb, arr, atol=1) + assert rgb.shape == arr.shape + + def test_rgb_ybr_rgb_multi_frame(self): + """Test round trip conversion of multi-framed pixel data.""" + ds = dcmread(RGB_8_3_2F) + + arr = ds.pixel_array + assert (255, 0, 0) == tuple(arr[0, 5, 50, :]) + assert (255, 128, 128) == tuple(arr[0, 15, 50, :]) + assert (0, 255, 0) == tuple(arr[0, 25, 50, :]) + assert (128, 255, 128) == tuple(arr[0, 35, 50, :]) + assert (0, 0, 255) == tuple(arr[0, 45, 50, :]) + assert (128, 128, 255) == tuple(arr[0, 55, 50, :]) + assert (0, 0, 0) == tuple(arr[0, 65, 50, :]) + assert (64, 64, 64) == tuple(arr[0, 75, 50, :]) + assert (192, 192, 192) == tuple(arr[0, 85, 50, :]) + assert (255, 255, 255) == tuple(arr[0, 95, 50, :]) + # Frame 2 is frame 1 inverted + assert np.array_equal((2**ds.BitsAllocated - 1) - arr[1], arr[0]) + + ybr = convert_color_space(arr, "RGB", "YBR_FULL") + assert (76, 85, 255) == tuple(ybr[0, 5, 50, :]) + assert (166, 107, 192) == tuple(ybr[0, 15, 50, :]) + assert (150, 44, 21) == tuple(ybr[0, 25, 50, :]) + assert (203, 86, 75) == tuple(ybr[0, 35, 50, :]) + assert (29, 255, 107) == tuple(ybr[0, 45, 50, :]) + assert (142, 192, 118) == tuple(ybr[0, 55, 50, :]) + assert (0, 128, 128) == tuple(ybr[0, 65, 50, :]) + assert (64, 128, 128) == tuple(ybr[0, 75, 50, :]) + assert (192, 128, 128) == tuple(ybr[0, 85, 50, :]) + assert (255, 128, 128) == tuple(ybr[0, 95, 50, :]) + # Frame 2 + assert (179, 171, 1) == tuple(ybr[1, 5, 50, :]) + assert (89, 149, 65) == tuple(ybr[1, 15, 50, :]) + assert (105, 212, 235) == tuple(ybr[1, 25, 50, :]) + assert (52, 170, 181) == tuple(ybr[1, 35, 50, :]) + assert (226, 1, 149) == tuple(ybr[1, 45, 50, :]) + assert (113, 65, 138) == tuple(ybr[1, 55, 50, :]) + assert (255, 128, 128) == tuple(ybr[1, 65, 50, :]) + assert (191, 128, 128) == tuple(ybr[1, 75, 50, :]) + assert (63, 128, 128) == tuple(ybr[1, 85, 50, :]) + assert (0, 128, 128) == tuple(ybr[1, 95, 50, :]) + + # Round trip -> rounding errors get compounded + rgb = convert_color_space(ybr, "YBR_FULL", "RGB") + # All pixels within +/- 1 units + assert np.allclose(rgb, arr, atol=1) + assert rgb.shape == arr.shape + + def test_frame_by_frame(self): + """Test processing frame-by-frame.""" + ds = dcmread(RGB_8_3_2F) + + arr = ds.pixel_array + ybr = convert_color_space(arr, "RGB", "YBR_FULL", per_frame=True) + assert (76, 85, 255) == tuple(ybr[0, 5, 50, :]) + assert (166, 107, 192) == tuple(ybr[0, 15, 50, :]) + assert (150, 44, 21) == tuple(ybr[0, 25, 50, :]) + assert (203, 86, 75) == tuple(ybr[0, 35, 50, :]) + assert (29, 255, 107) == tuple(ybr[0, 45, 50, :]) + assert (142, 192, 118) == tuple(ybr[0, 55, 50, :]) + assert (0, 128, 128) == tuple(ybr[0, 65, 50, :]) + assert (64, 128, 128) == tuple(ybr[0, 75, 50, :]) + assert (192, 128, 128) == tuple(ybr[0, 85, 50, :]) + assert (255, 128, 128) == tuple(ybr[0, 95, 50, :]) + # Frame 2 + assert (179, 171, 1) == tuple(ybr[1, 5, 50, :]) + assert (89, 149, 65) == tuple(ybr[1, 15, 50, :]) + assert (105, 212, 235) == tuple(ybr[1, 25, 50, :]) + assert (52, 170, 181) == tuple(ybr[1, 35, 50, :]) + assert (226, 1, 149) == tuple(ybr[1, 45, 50, :]) + assert (113, 65, 138) == tuple(ybr[1, 55, 50, :]) + assert (255, 128, 128) == tuple(ybr[1, 65, 50, :]) + assert (191, 128, 128) == tuple(ybr[1, 75, 50, :]) + assert (63, 128, 128) == tuple(ybr[1, 85, 50, :]) + assert (0, 128, 128) == tuple(ybr[1, 95, 50, :]) + + +@pytest.mark.skipif(not HAVE_NP, reason="Numpy is not available") +class TestModalityLUT: + """Tests for apply_modality_lut().""" + + def test_slope_intercept(self): + """Test the rescale slope/intercept transform.""" + ds = dcmread(MOD_16) + assert 1 == ds.RescaleSlope + assert -1024 == ds.RescaleIntercept + arr = ds.pixel_array + out = apply_modality_lut(arr, ds) + assert out.flags.writeable + assert np.float64 == out.dtype + + assert np.array_equal(arr - 1024, out) + + ds.RescaleSlope = 2.5 + ds.RescaleIntercept = -2048 + out = apply_modality_lut(arr, ds) + assert np.array_equal(arr * 2.5 - 2048, out) + + def test_lut_sequence(self): + """Test the LUT Sequence transform.""" + ds = dcmread(MOD_16_SEQ) + seq = ds.ModalityLUTSequence[0] + assert [4096, -2048, 16] == seq.LUTDescriptor + arr = ds.pixel_array + assert -2048 == arr.min() + assert 4095 == arr.max() + out = apply_modality_lut(arr, ds) + + # IV > 2047 -> LUT[4095] + mapped_pixels = arr > 2047 + assert seq.LUTData[-1] == out[mapped_pixels][0] + assert (seq.LUTData[-1] == out[mapped_pixels]).all() + assert out.flags.writeable + assert out.dtype == np.uint16 + + assert [65535, 65535, 49147, 49147, 65535] == list(out[0, 50:55]) + assert [65535, 65535, 65535, 65535, 65535] == list(out[50, 50:55]) + assert [65535, 65535, 65535, 65535, 65535] == list(out[100, 50:55]) + assert [65535, 65535, 49147, 49147, 65535] == list(out[150, 50:55]) + assert [65535, 65535, 49147, 49147, 65535] == list(out[200, 50:55]) + assert 39321 == out[185, 340] + assert 45867 == out[185, 385] + assert 52428 == out[228, 385] + assert 58974 == out[291, 385] + + def test_lut_sequence_zero_entries(self): + """Test that 0 entries is interpreted correctly.""" + # LUTDescriptor[0] of 0 -> 65536, but only 4096 entries so any + # attempt to access LUTData[4096] or higher will raise IndexError + ds = dcmread(MOD_16_SEQ) + seq = ds.ModalityLUTSequence[0] + seq.LUTDescriptor = [0, 0, 16] + assert 4096 == len(seq.LUTData) + arr = np.asarray([0, 4095, 4096, 65535]) + msg = r"index 4096 is out of bounds" + with pytest.raises(IndexError, match=msg): + apply_modality_lut(arr, ds) + + # LUTData with 65536 entries + seq.LUTData = [0] * 65535 + [1] + out = apply_modality_lut(arr, ds) + assert [0, 0, 0, 1] == list(out) + + def test_unchanged(self): + """Test no modality LUT transform.""" + ds = dcmread(MOD_16) + del ds.RescaleSlope + del ds.RescaleIntercept + arr = ds.pixel_array + out = apply_modality_lut(arr, ds) + assert arr is out + + ds.ModalityLUTSequence = [] + out = apply_modality_lut(arr, ds) + assert arr is out + + def test_lutdata_ow(self): + """Test LUT Data with VR OW.""" + ds = dcmread(MOD_16_SEQ) + assert ds.original_encoding == (False, True) + seq = ds.ModalityLUTSequence[0] + assert [4096, -2048, 16] == seq.LUTDescriptor + seq["LUTData"].VR = "OW" + seq.LUTData = pack("<4096H", *seq.LUTData) + arr = ds.pixel_array + assert -2048 == arr.min() + assert 4095 == arr.max() + out = apply_modality_lut(arr, ds) + + # IV > 2047 -> LUT[4095] + mapped_pixels = arr > 2047 + assert 65535 == out[mapped_pixels][0] + assert (65535 == out[mapped_pixels]).all() + assert out.flags.writeable + assert out.dtype == np.uint16 + + assert [65535, 65535, 49147, 49147, 65535] == list(out[0, 50:55]) + assert [65535, 65535, 65535, 65535, 65535] == list(out[50, 50:55]) + assert [65535, 65535, 65535, 65535, 65535] == list(out[100, 50:55]) + assert [65535, 65535, 49147, 49147, 65535] == list(out[150, 50:55]) + assert [65535, 65535, 49147, 49147, 65535] == list(out[200, 50:55]) + assert 39321 == out[185, 340] + assert 45867 == out[185, 385] + assert 52428 == out[228, 385] + assert 58974 == out[291, 385] + + def test_no_endianness_raises(self): + ds = dcmread(MOD_16_SEQ) + assert ds.original_encoding == (False, True) + seq = ds.ModalityLUTSequence[0] + assert [4096, -2048, 16] == seq.LUTDescriptor + seq["LUTData"].VR = "OW" + seq.LUTData = pack("<4096H", *seq.LUTData) + arr = ds.pixel_array + del ds.file_meta + ds._read_little = None + msg = ( + "Unable to determine the endianness of the dataset, please set " + "an appropriate Transfer Syntax UID in 'FileDataset.file_meta'" + ) + with pytest.raises(AttributeError, match=msg): + apply_modality_lut(arr, ds) + + def test_file_meta(self): + """Test using file meta to determine endianness""" + ds = dcmread(MOD_16_SEQ) + seq = ds.ModalityLUTSequence[0] + assert [4096, -2048, 16] == seq.LUTDescriptor + seq["LUTData"].VR = "OW" + seq.LUTData = pack("<4096H", *seq.LUTData) + arr = ds.pixel_array + ds._read_little = None + out = apply_modality_lut(arr, ds) + + assert 39321 == out[185, 340] + assert 45867 == out[185, 385] + assert 52428 == out[228, 385] + assert 58974 == out[291, 385] + + def test_original_encoding(self): + """Test using original encoding to determine endianness""" + ds = dcmread(MOD_16_SEQ) + seq = ds.ModalityLUTSequence[0] + assert [4096, -2048, 16] == seq.LUTDescriptor + seq["LUTData"].VR = "OW" + seq.LUTData = pack("<4096H", *seq.LUTData) + arr = ds.pixel_array + del ds.file_meta + assert ds.original_encoding == (False, True) + out = apply_modality_lut(arr, ds) + + assert 39321 == out[185, 340] + assert 45867 == out[185, 385] + assert 52428 == out[228, 385] + assert 58974 == out[291, 385] + + +@pytest.mark.skipif(not HAVE_NP, reason="Numpy is not available") +class TestApplyColorLUT: + """Tests for apply_color_lut().""" + + def setup_method(self): + """Setup the tests""" + self.o_palette = get_palette_files("pet.dcm")[0] + self.n_palette = get_palette_files("pet.dcm")[0][:-3] + "tmp" + + def teardown_method(self): + """Teardown the tests""" + if os.path.exists(self.n_palette): + os.rename(self.n_palette, self.o_palette) + + def test_neither_ds_nor_palette_raises(self): + """Test missing `ds` and `palette` raise an exception.""" + ds = dcmread(PAL_08_256_0_16_1F) + msg = r"Either 'ds' or 'palette' is required" + with pytest.raises(ValueError, match=msg): + apply_color_lut(ds.pixel_array) + + def test_palette_unknown_raises(self, disable_value_validation): + """Test using an unknown `palette` raise an exception.""" + ds = dcmread(PAL_08_256_0_16_1F) + # Palette name + msg = r"Unknown palette 'TEST'" + with pytest.raises(ValueError, match=msg): + apply_color_lut(ds.pixel_array, palette="TEST") + + # SOP Instance UID + msg = r"Unknown palette '1.2.840.10008.1.1'" + with pytest.raises(ValueError, match=msg): + apply_color_lut(ds.pixel_array, palette="1.2.840.10008.1.1") + + def test_palette_unavailable_raises(self, disable_value_validation): + """Test using a missing `palette` raise an exception.""" + os.rename(self.o_palette, self.n_palette) + ds = dcmread(PAL_08_256_0_16_1F) + msg = r"list index out of range" + with pytest.raises(IndexError, match=msg): + apply_color_lut(ds.pixel_array, palette="PET") + + def test_supplemental_raises(self): + """Test that supplemental palette color LUT raises exception.""" + ds = dcmread(SUP_16_16_2F) + msg = ( + r"Use of this function with the Supplemental Palette Color Lookup " + r"Table Module is not currently supported" + ) + with pytest.raises(ValueError, match=msg): + apply_color_lut(ds.pixel_array, ds) + + def test_invalid_bit_depth_raises(self): + """Test that an invalid bit depth raises an exception.""" + ds = dcmread(PAL_08_256_0_16_1F) + ds.RedPaletteColorLookupTableDescriptor[2] = 15 + msg = r"data type ['\"]uint15['\"] not understood" + with pytest.raises(TypeError, match=msg): + apply_color_lut(ds.pixel_array, ds) + + def test_invalid_lut_bit_depth_raises(self): + """Test that an invalid LUT bit depth raises an exception.""" + ds = dcmread(PAL_08_256_0_16_1F) + ds.RedPaletteColorLookupTableData = ds.RedPaletteColorLookupTableData[:-2] + ds.GreenPaletteColorLookupTableData = ds.GreenPaletteColorLookupTableData[:-2] + ds.BluePaletteColorLookupTableData = ds.BluePaletteColorLookupTableData[:-2] + msg = ( + r"The bit depth of the LUT data '15.9' is invalid \(only 8 or 16 " + r"bits per entry allowed\)" + ) + with pytest.raises(ValueError, match=msg): + apply_color_lut(ds.pixel_array, ds) + + def test_unequal_lut_length_raises(self): + """Test that an unequal LUT lengths raise an exception.""" + ds = dcmread(PAL_08_256_0_16_1F) + ds.BluePaletteColorLookupTableData = ds.BluePaletteColorLookupTableData[:-2] + msg = r"LUT data must be the same length" + with pytest.raises(ValueError, match=msg): + apply_color_lut(ds.pixel_array, ds) + + def test_no_palette_color(self): + """Test that an unequal LUT lengths raise an exception.""" + ds = dcmread(PAL_08_256_0_16_1F) + del ds.RedPaletteColorLookupTableData + msg = r"No suitable Palette Color Lookup Table Module found" + with pytest.raises(ValueError, match=msg): + apply_color_lut(ds.pixel_array, ds) + + def test_uint08_16(self): + """Test uint8 Pixel Data with 16-bit LUT entries.""" + ds = dcmread(PAL_08_200_0_16_1F, force=True) + ds.file_meta = FileMetaDataset() + ds.file_meta.TransferSyntaxUID = ImplicitVRLittleEndian + assert 8 == ds.BitsStored + assert 16 == ds.RedPaletteColorLookupTableDescriptor[2] + arr = ds.pixel_array + orig = arr.copy() + rgb = apply_color_lut(arr, ds) + assert (480, 640, 3) == rgb.shape + assert [0, 0, 0] == list(rgb[0, 0, :]) + assert [9216, 9216, 9216] == list(rgb[0, 4, :]) + assert [18688, 18688, 18688] == list(rgb[0, 9, :]) + assert [27904, 33536, 0] == list(rgb[0, 638, :]) + assert [18688, 24320, 0] == list(rgb[479, 639, :]) + + # original `arr` is unchanged + assert np.array_equal(orig, arr) + + def test_uint08_16_2frame(self): + """Test 2 frame uint8 Pixel Data with 16-bit LUT entries.""" + ds = dcmread(PAL_08_256_0_16_2F) + assert 8 == ds.BitsStored + assert 16 == ds.RedPaletteColorLookupTableDescriptor[2] + arr = ds.pixel_array + orig = arr.copy() + rgb = apply_color_lut(arr, ds) + assert (2, 600, 800, 3) == rgb.shape + assert [9472, 15872, 24064] == list(rgb[0, 0, 0, :]) + assert [34816, 43520, 54016] == list(rgb[0, 12, 12, :]) + assert [65280, 65280, 65280] == list(rgb[0, 17, 110, :]) + assert [0, 0, 0] == list(rgb[0, 77, 103, :]) + assert [23040, 52480, 65280] == list(rgb[0, 478, 793, :]) + + # 2nd frame is inverse of 1st, so won't be coloured correctly + ref = np.asarray( + [ + [26112, 26112, 26112], + [54528, 54528, 54528], + [54528, 54528, 54528], + [16640, 16640, 16640], + [49152, 45056, 22016], + [34816, 43520, 54016], + [5632, 9984, 14848], + [62464, 2816, 2816], + [3072, 5632, 8192], + [3072, 5632, 8192], + ] + ) + assert np.array_equal(ref, rgb[1, 143:153, 355, :]) + + # original `arr` is unchanged + assert np.array_equal(orig, arr) + + def test_uint16_16_segmented_little(self): + """Test uint16 Pixel Data with 16-bit LUT entries.""" + # Endianness from file_meta + ds = dcmread(PAL_SEG_LE_16_1F) + assert 16 == ds.BitsStored + assert 16 == ds.RedPaletteColorLookupTableDescriptor[2] + arr = ds.pixel_array + orig = arr.copy() + rgb = apply_color_lut(arr, ds) + assert (480, 640, 3) == rgb.shape + assert [10280, 11565, 16705] == list(rgb[0, 0, :]) + assert [10280, 11565, 16705] == list(rgb[0, 320, :]) + assert [10280, 11565, 16705] == list(rgb[0, 639, :]) + assert [0, 0, 0] == list(rgb[240, 0, :]) + assert [257, 257, 257] == list(rgb[240, 320, :]) + assert [2313, 2313, 2313] == list(rgb[240, 639, :]) + assert [10280, 11565, 16705] == list(rgb[479, 0, :]) + assert [10280, 11565, 16705] == list(rgb[479, 320, :]) + assert [10280, 11565, 16705] == list(rgb[479, 639, :]) + + assert (orig == arr).all() + + # Endianness from original encoding + ds = dcmread(PAL_SEG_LE_16_1F) + assert 16 == ds.BitsStored + assert 16 == ds.RedPaletteColorLookupTableDescriptor[2] + arr = ds.pixel_array + orig = arr.copy() + del ds.file_meta + rgb = apply_color_lut(arr, ds) + assert (480, 640, 3) == rgb.shape + assert [10280, 11565, 16705] == list(rgb[0, 0, :]) + assert [10280, 11565, 16705] == list(rgb[0, 320, :]) + assert [10280, 11565, 16705] == list(rgb[0, 639, :]) + assert [0, 0, 0] == list(rgb[240, 0, :]) + assert [257, 257, 257] == list(rgb[240, 320, :]) + assert [2313, 2313, 2313] == list(rgb[240, 639, :]) + assert [10280, 11565, 16705] == list(rgb[479, 0, :]) + assert [10280, 11565, 16705] == list(rgb[479, 320, :]) + assert [10280, 11565, 16705] == list(rgb[479, 639, :]) + + assert (orig == arr).all() + + # No endianness raises + ds._read_little = None + msg = ( + "Unable to determine the endianness of the dataset, please set " + "an appropriate Transfer Syntax UID in 'FileDataset.file_meta'" + ) + with pytest.raises(AttributeError, match=msg): + apply_color_lut(arr, ds) + + def test_uint16_16_segmented_big(self): + """Test big endian uint16 Pixel Data with 16-bit LUT entries.""" + ds = dcmread(PAL_SEG_BE_16_1F) + assert 16 == ds.BitsStored + assert 16 == ds.RedPaletteColorLookupTableDescriptor[2] + arr = ds.pixel_array + rgb = apply_color_lut(arr, ds) + assert (480, 640, 3) == rgb.shape + assert [10280, 11565, 16705] == list(rgb[0, 0, :]) + assert [10280, 11565, 16705] == list(rgb[0, 320, :]) + assert [10280, 11565, 16705] == list(rgb[0, 639, :]) + assert [0, 0, 0] == list(rgb[240, 0, :]) + assert [257, 257, 257] == list(rgb[240, 320, :]) + assert [2313, 2313, 2313] == list(rgb[240, 639, :]) + assert [10280, 11565, 16705] == list(rgb[479, 0, :]) + assert [10280, 11565, 16705] == list(rgb[479, 320, :]) + assert [10280, 11565, 16705] == list(rgb[479, 639, :]) + + def test_16_allocated_8_entries(self): + """Test LUT with 8-bit entries in 16 bits allocated.""" + ds = dcmread(PAL_08_200_0_16_1F, force=True) + ds.file_meta = FileMetaDataset() + ds.file_meta.TransferSyntaxUID = ImplicitVRLittleEndian + ds.RedPaletteColorLookupTableDescriptor = [200, 0, 8] + lut = pack("<200H", *list(range(0, 200))) + assert 400 == len(lut) + ds.RedPaletteColorLookupTableData = lut + ds.GreenPaletteColorLookupTableData = lut + ds.BluePaletteColorLookupTableData = lut + arr = ds.pixel_array + assert (56, 149) == (arr.min(), arr.max()) + out = apply_color_lut(arr, ds) + # Because the LUTs are mapped index to value (i.e. LUT[0] = 0, + # LUT[149] = 149), the output array should equal the input array + # but with three channels of identical values + assert np.array_equal(arr, out[:, :, 0]) + assert np.array_equal(arr, out[:, :, 1]) + assert np.array_equal(arr, out[:, :, 2]) + + def test_alpha(self): + """Test applying a color palette with an alpha channel.""" + ds = dcmread(PAL_08_256_0_16_1F) + ds.AlphaPaletteColorLookupTableData = b"\x00\x80" * 256 + arr = ds.pixel_array + rgba = apply_color_lut(arr, ds) + assert (600, 800, 4) == rgba.shape + assert 32768 == rgba[:, :, 3][0, 0] + assert (32768 == rgba[:, :, 3]).any() + + def test_well_known_palette(self, disable_value_validation): + """Test using a well-known palette.""" + ds = dcmread(PAL_08_256_0_16_1F) + # Drop it to 8-bit + arr = ds.pixel_array + rgb = apply_color_lut(arr, palette="PET") + line = rgb[68:88, 364, :] + ref = [ + [249, 122, 12], + [255, 130, 4], + [255, 136, 16], + [255, 134, 12], + [253, 126, 4], + [239, 112, 32], + [211, 84, 88], + [197, 70, 116], + [177, 50, 156], + [168, 40, 176], + [173, 46, 164], + [185, 58, 140], + [207, 80, 96], + [209, 82, 92], + [189, 62, 132], + [173, 46, 164], + [168, 40, 176], + [162, 34, 188], + [162, 34, 188], + [154, 26, 204], + ] + assert np.array_equal(np.asarray(ref), line) + uid = apply_color_lut(arr, palette="1.2.840.10008.1.5.2") + assert np.array_equal(uid, rgb) + + def test_first_map_positive(self): + """Test a positive first mapping value.""" + ds = dcmread(PAL_08_200_0_16_1F, force=True) + ds.file_meta = FileMetaDataset() + ds.file_meta.TransferSyntaxUID = ImplicitVRLittleEndian + ds.RedPaletteColorLookupTableDescriptor[1] = 10 + arr = ds.pixel_array + rgb = apply_color_lut(arr, ds) + # All IVs < 10 should be set to LUT[0] + # All IVs >= 10 should be shifted down 10 entries + # Original IV range is 56 to 149 -> 46 to 139 + # LUT[88] -> LUT[78] = [33280, 56320, 65280] + # LUT[149] -> LUT[139] = [50944, 16384, 27904] + assert [33280, 56320, 65280] == list(rgb[arr == 88][0]) + assert ([33280, 56320, 65280] == rgb[arr == 88]).all() + assert [50944, 16384, 27904] == list(rgb[arr == 149][0]) + assert ([50944, 16384, 27904] == rgb[arr == 149]).all() + + def test_first_map_negative(self): + """Test a negative first mapping value.""" + ds = dcmread(PAL_08_200_0_16_1F, force=True) + ds.file_meta = FileMetaDataset() + ds.file_meta.TransferSyntaxUID = ImplicitVRLittleEndian + ds["RedPaletteColorLookupTableDescriptor"].VR = "SS" + ds.RedPaletteColorLookupTableDescriptor[1] = -10 + arr = ds.pixel_array + rgb = apply_color_lut(arr, ds) + # All IVs < -10 should be set to LUT[0] + # All IVs >= -10 should be shifted up 10 entries + # Original IV range is 56 to 149 -> 66 to 159 + # LUT[60] -> LUT[70] = [33280 61952 65280] + # LUT[130] -> LUT[140] = [60160, 25600, 37376] + assert [33280, 61952, 65280] == list(rgb[arr == 60][0]) + assert ([33280, 61952, 65280] == rgb[arr == 60]).all() + assert [60160, 25600, 37376] == list(rgb[arr == 130][0]) + assert ([60160, 25600, 37376] == rgb[arr == 130]).all() + + def test_unchanged(self): + """Test dataset with no LUT is unchanged.""" + # Regression test for #1068 + ds = dcmread(MOD_16, force=True) + assert "RedPaletteColorLookupTableDescriptor" not in ds + msg = r"No suitable Palette Color Lookup Table Module found" + with pytest.raises(ValueError, match=msg): + apply_color_lut(ds.pixel_array, ds) + + +@pytest.mark.skipif(not HAVE_NP, reason="Numpy is not available") +class TestExpandSegmentedLUT: + """Tests for _expand_segmented_lut().""" + + def test_discrete(self): + """Test expanding a discrete segment.""" + data = (0, 1, 0) + assert [0] == _expand_segmented_lut(data, "H") + + data = (0, 2, 0, 112) + assert [0, 112] == _expand_segmented_lut(data, "H") + + data = (0, 2, 0, -112) + assert [0, -112] == _expand_segmented_lut(data, "H") + + data = (0, 2, 0, 112, 0, 0) + assert [0, 112] == _expand_segmented_lut(data, "H") + + data = (0, 2, 0, -112, 0, 0) + assert [0, -112] == _expand_segmented_lut(data, "H") + + def test_linear(self): + """Test expanding a linear segment.""" + # Linear can never be the first segment + # Positive slope + data = (0, 2, 0, 28672, 1, 5, 49152) + out = _expand_segmented_lut(data, "H") + assert [0, 28672, 32768, 36864, 40960, 45056, 49152] == out + + data = (0, 1, -400, 1, 5, 0) + out = _expand_segmented_lut(data, "H") + assert [-400, -320, -240, -160, -80, 0] == out + + # Positive slope, floating point steps + data = (0, 1, 163, 1, 48, 255) + out = _expand_segmented_lut(data, "H") + assert (1 + 48) == len(out) + + # No slope + data = (0, 2, 0, 28672, 1, 5, 28672) + out = _expand_segmented_lut(data, "H") + assert [0, 28672, 28672, 28672, 28672, 28672, 28672] == out + + data = (0, 1, -100, 1, 5, -100) + out = _expand_segmented_lut(data, "H") + assert [-100, -100, -100, -100, -100, -100] == out + + # Negative slope + data = (0, 2, 0, 49152, 1, 5, 28672) + out = _expand_segmented_lut(data, "H") + assert [0, 49152, 45056, 40960, 36864, 32768, 28672] == out + + data = (0, 1, 0, 1, 5, -400) + out = _expand_segmented_lut(data, "H") + assert [0, -80, -160, -240, -320, -400] == out + + def test_indirect_08(self): + """Test expanding an indirect segment encoded as 8-bit.""" + # No real world test data available for this + # LSB, MSB + ref_a = [0, 112, 128, 144, 160, 176, 192, 192, 192, 192, 192, 192] + + # Little endian + data = (0, 2, 0, 112, 1, 5, 192, 2, 1, 4, 0, 0, 0) + out = _expand_segmented_lut(data, "B") + assert ref_a == out + + data = (0, 2, 0, 112, 2, 1, 0, 0, 0, 0) + out = _expand_segmented_lut(data, ">B") + assert [0, 112, 0, 112] == out + + # 0x0001 0x0203 is 66051 in BE 16-bit MSB, LSB + data = [0, 1, 0] * 22017 + [0, 2, 1, 2] + [2, 1, 2, 3, 0, 1] + out = _expand_segmented_lut(data, ">B") + assert [0] * 22017 + [1, 2, 1, 2] == out + + def test_indirect_16(self): + """Test expanding an indirect segment encoded as 16-bit.""" + # Start from a discrete segment + data = (0, 2, 0, 112, 1, 5, 192, 2, 2, 0, 0) + out = _expand_segmented_lut(data, "H") + assert [0, 112, 128, 144, 160, 176, 192] * 2 == out + + # Start from a linear segment + data = (0, 2, 0, 112, 1, 5, 192, 2, 1, 4, 0) + out = _expand_segmented_lut(data, "H") + assert [0, 112, 128, 144, 160, 176, 192, 192, 192, 192, 192, 192] == out + + def test_palettes_spring(self): + """Test expanding the SPRING palette.""" + ds = dcmread(get_palette_files("spring.dcm")[0]) + + bs = ds.SegmentedRedPaletteColorLookupTableData + fmt = f"<{len(bs)}B" + data = unpack(fmt, bs) + out = _expand_segmented_lut(data, fmt) + assert [255] * 256 == out + + bs = ds.SegmentedGreenPaletteColorLookupTableData + fmt = f"<{len(bs)}B" + data = unpack(fmt, bs) + out = _expand_segmented_lut(data, fmt) + assert list(range(0, 256)) == out + + bs = ds.SegmentedBluePaletteColorLookupTableData + fmt = f"<{len(bs)}B" + data = unpack(fmt, bs) + out = _expand_segmented_lut(data, fmt) + assert list(range(255, -1, -1)) == out + + def test_palettes_summer(self): + """Test expanding the SUMMER palette.""" + ds = dcmread(get_palette_files("summer.dcm")[0]) + + bs = ds.SegmentedRedPaletteColorLookupTableData + fmt = f"<{len(bs)}B" + data = unpack(fmt, bs) + out = _expand_segmented_lut(data, fmt) + assert [0] * 256 == out + + bs = ds.SegmentedGreenPaletteColorLookupTableData + fmt = f"<{len(bs)}B" + data = unpack(fmt, bs) + out = _expand_segmented_lut(data, fmt) + assert [255, 255, 254, 254, 253] == out[:5] + assert [130, 129, 129, 128, 128] == out[-5:] + + bs = ds.SegmentedBluePaletteColorLookupTableData + fmt = f"<{len(bs)}B" + data = unpack(fmt, bs) + out = _expand_segmented_lut(data, fmt) + assert [0] * 128 == out[:128] + assert [246, 248, 250, 252, 254] == out[-5:] + + def test_palettes_fall(self): + """Test expanding the FALL palette.""" + ds = dcmread(get_palette_files("fall.dcm")[0]) + + bs = ds.SegmentedRedPaletteColorLookupTableData + fmt = f"<{len(bs)}B" + data = unpack(fmt, bs) + out = _expand_segmented_lut(data, fmt) + assert [255] * 256 == out + + bs = ds.SegmentedGreenPaletteColorLookupTableData + fmt = f"<{len(bs)}B" + data = unpack(fmt, bs) + out = _expand_segmented_lut(data, fmt) + assert list(range(255, -1, -1)) == out + + bs = ds.SegmentedBluePaletteColorLookupTableData + fmt = f"<{len(bs)}B" + data = unpack(fmt, bs) + out = _expand_segmented_lut(data, fmt) + assert [0] * 256 == out + + def test_palettes_winter(self): + """Test expanding the WINTER palette.""" + ds = dcmread(get_palette_files("winter.dcm")[0]) + + bs = ds.SegmentedRedPaletteColorLookupTableData + fmt = f"<{len(bs)}B" + data = unpack(fmt, bs) + out = _expand_segmented_lut(data, fmt) + assert [0] * 128 == out[:128] + assert [123, 124, 125, 126, 127] == out[-5:] + + bs = ds.SegmentedGreenPaletteColorLookupTableData + fmt = f"<{len(bs)}B" + data = unpack(fmt, bs) + out = _expand_segmented_lut(data, fmt) + assert list(range(0, 256)) == out + + bs = ds.SegmentedBluePaletteColorLookupTableData + fmt = f"<{len(bs)}B" + data = unpack(fmt, bs) + out = _expand_segmented_lut(data, fmt) + assert [255, 255, 254, 254, 253] == out[:5] + assert [130, 129, 129, 128, 128] == out[-5:] + + def test_first_linear_raises(self): + """Test having a linear segment first raises exception.""" + data = (1, 5, 49152) + msg = ( + r"Error expanding a segmented palette color lookup table: " + r"the first segment cannot be a linear segment" + ) + with pytest.raises(ValueError, match=msg): + _expand_segmented_lut(data, "H") + + def test_first_indirect_raises(self): + """Test having a linear segment first raises exception.""" + data = (2, 5, 2, 0) + msg = ( + r"Error expanding a segmented palette color lookup table: " + r"the first segment cannot be an indirect segment" + ) + with pytest.raises(ValueError, match=msg): + _expand_segmented_lut(data, "H") + + def test_unknown_opcode_raises(self): + """Test having an unknown opcode raises exception.""" + data = (3, 5, 49152) + msg = ( + r"Error expanding a segmented palette lookup table: " + r"unknown segment type '3'" + ) + with pytest.raises(ValueError, match=msg): + _expand_segmented_lut(data, "H") + + +@pytest.mark.skipif(not HAVE_NP, reason="Numpy is not available") +class TestApplyWindowing: + """Tests for apply_windowing().""" + + def test_window_single_view(self): + """Test windowing with a single view.""" + # 12-bit unsigned + ds = dcmread(WIN_12_1F) + assert 16 == ds.BitsAllocated + assert 12 == ds.BitsStored + assert 0 == ds.PixelRepresentation + ds.WindowCenter = 450 + ds.WindowWidth = 790 + assert 450 == ds.WindowCenter + assert 790 == ds.WindowWidth + + arr = ds.pixel_array + assert 642 == arr[326, 130] + out = apply_windowing(arr, ds) + assert 3046.6 == pytest.approx(out[326, 130], abs=0.1) + + def test_window_multi_view(self): + """Test windowing with multiple views.""" + ds = dcmread(WIN_12_1F) + assert 16 == ds.BitsAllocated + assert 12 == ds.BitsStored + assert 0 == ds.PixelRepresentation + if HAVE_NP and config.use_DS_numpy: + expected = np.array([450, 200]) + assert np.allclose(ds.WindowCenter, expected) + expected = np.array([790, 443]) + assert np.allclose(ds.WindowWidth, expected) + else: + assert [450, 200] == ds.WindowCenter + assert [790, 443] == ds.WindowWidth + + arr = ds.pixel_array + assert 642 == arr[326, 130] + out = apply_windowing(arr, ds) + assert 3046.6 == pytest.approx(out[326, 130], abs=0.1) + out = apply_windowing(arr, ds, index=1) + assert 4095.0 == pytest.approx(out[326, 130], abs=0.1) + + def test_window_uint8(self): + """Test windowing an 8-bit unsigned array.""" + ds = Dataset() + ds.PhotometricInterpretation = "MONOCHROME1" + ds.PixelRepresentation = 0 + ds.BitsStored = 8 + arr = np.asarray([0, 1, 128, 254, 255], dtype="uint8") + + # Linear + ds.WindowWidth = 1 + ds.WindowCenter = 0 + assert [255, 255, 255, 255, 255] == apply_windowing(arr, ds).tolist() + + ds.WindowWidth = 128 + ds.WindowCenter = 254 + assert [0, 0, 0, 128.5, 130.5] == pytest.approx( + apply_windowing(arr, ds).tolist(), abs=0.1 + ) + + # Linear exact + ds.VOILUTFunction = "LINEAR_EXACT" + assert [0, 0, 0, 127.5, 129.5] == pytest.approx( + apply_windowing(arr, ds).tolist(), abs=0.1 + ) + + # Sigmoid + ds.VOILUTFunction = "SIGMOID" + assert [0.1, 0.1, 4.9, 127.5, 129.5] == pytest.approx( + apply_windowing(arr, ds).tolist(), abs=0.1 + ) + + def test_window_uint16(self): + """Test windowing a 16-bit unsigned array.""" + ds = Dataset() + ds.PhotometricInterpretation = "MONOCHROME1" + ds.PixelRepresentation = 0 + ds.BitsStored = 16 + arr = np.asarray([0, 1, 32768, 65534, 65535], dtype="uint16") + + ds.WindowWidth = 1 + ds.WindowCenter = 0 + assert [65535] * 5 == apply_windowing(arr, ds).tolist() + + ds.WindowWidth = 32768 + ds.WindowCenter = 254 + assert [32260.5, 32262.5, 65535, 65535, 65535] == pytest.approx( + apply_windowing(arr, ds).tolist(), abs=0.1 + ) + + ds.VOILUTFunction = "LINEAR_EXACT" + assert [32259.5, 32261.5, 65535, 65535, 65535] == pytest.approx( + apply_windowing(arr, ds).tolist(), abs=0.1 + ) + + ds.VOILUTFunction = "SIGMOID" + assert [32259.5, 32261.5, 64319.8, 65512.3, 65512.3] == pytest.approx( + apply_windowing(arr, ds).tolist(), abs=0.1 + ) + + def test_window_uint32(self): + """Test windowing a 32-bit unsigned array.""" + ds = Dataset() + ds.PhotometricInterpretation = "MONOCHROME1" + ds.PixelRepresentation = 0 + ds.BitsStored = 32 + y_max = 2**32 - 1 + arr = np.asarray([0, 1, 2**31, y_max - 1, y_max], dtype="uint32") + + ds.WindowWidth = 1 + ds.WindowCenter = 0 + assert [y_max] * 5 == apply_windowing(arr, ds).tolist() + + ds.WindowWidth = 342423423423 + ds.WindowCenter = 757336 + assert [ + 2147474148.4, + 2147474148.4, + 2174409724, + 2201345299.7, + 2201345299.7, + ] == pytest.approx(apply_windowing(arr, ds).tolist(), abs=0.1) + + ds.VOILUTFunction = "LINEAR_EXACT" + assert [ + 2147474148.3, + 2147474148.4, + 2174409724, + 2201345299.7, + 2201345299.7, + ] == pytest.approx(apply_windowing(arr, ds).tolist(), abs=0.1) + + ds.VOILUTFunction = "SIGMOID" + assert [ + 2147474148.3, + 2147474148.4, + 2174408313.1, + 2201334008.2, + 2201334008.3, + ] == pytest.approx(apply_windowing(arr, ds).tolist(), abs=0.1) + + def test_window_int8(self): + """Test windowing an 8-bit signed array.""" + ds = Dataset() + ds.PhotometricInterpretation = "MONOCHROME1" + ds.PixelRepresentation = 1 + ds.BitsStored = 8 + arr = np.asarray([-128, -127, -1, 0, 1, 126, 127], dtype="int8") + + # Linear + ds.WindowWidth = 1 + ds.WindowCenter = 0 + assert [-128, -128, -128, 127, 127, 127, 127] == pytest.approx( + apply_windowing(arr, ds).tolist() + ) + + ds.WindowWidth = 128 + ds.WindowCenter = -5 + assert [-128, -128, 8.5, 10.5, 12.6, 127, 127] == pytest.approx( + apply_windowing(arr, ds).tolist(), abs=0.1 + ) + + # Linear exact + ds.VOILUTFunction = "LINEAR_EXACT" + assert [-128, -128, 7.5, 9.5, 11.5, 127, 127] == pytest.approx( + apply_windowing(arr, ds).tolist(), abs=0.1 + ) + + # Sigmoid + ds.VOILUTFunction = "SIGMOID" + assert [-122.7, -122.5, 7.5, 9.4, 11.4, 122.8, 122.9] == pytest.approx( + apply_windowing(arr, ds).tolist(), abs=0.1 + ) + + def test_window_int16(self): + """Test windowing an 8-bit signed array.""" + ds = Dataset() + ds.PhotometricInterpretation = "MONOCHROME1" + ds.PixelRepresentation = 1 + ds.BitsStored = 16 + arr = np.asarray([-128, -127, -1, 0, 1, 126, 127], dtype="int16") + + # Linear + ds.WindowWidth = 1 + ds.WindowCenter = 0 + assert [-32768, -32768, -32768, 32767, 32767, 32767, 32767] == pytest.approx( + apply_windowing(arr, ds).tolist(), abs=0.1 + ) + + ds.WindowWidth = 128 + ds.WindowCenter = -5 + assert [-32768, -32768, 2321.6, 2837.6, 3353.7, 32767, 32767] == pytest.approx( + apply_windowing(arr, ds).tolist(), abs=0.1 + ) + + # Linear exact + ds.VOILUTFunction = "LINEAR_EXACT" + assert [-32768, -32768, 2047.5, 2559.5, 3071.5, 32767, 32767] == pytest.approx( + apply_windowing(arr, ds).tolist(), abs=0.1 + ) + + # Sigmoid + ds.VOILUTFunction = "SIGMOID" + assert [ + -31394.1, + -31351.4, + 2044.8, + 2554.3, + 3062.5, + 31692, + 31724.6, + ] == pytest.approx(apply_windowing(arr, ds).tolist(), abs=0.1) + + def test_window_int32(self): + """Test windowing an 32-bit signed array.""" + ds = Dataset() + ds.PhotometricInterpretation = "MONOCHROME1" + ds.PixelRepresentation = 1 + ds.BitsStored = 32 + arr = np.asarray([-128, -127, -1, 0, 1, 126, 127], dtype="int32") + + # Linear + ds.WindowWidth = 1 + ds.WindowCenter = 0 + assert [ + -(2**31), + -(2**31), + -(2**31), + 2**31 - 1, + 2**31 - 1, + 2**31 - 1, + 2**31 - 1, + ] == pytest.approx(apply_windowing(arr, ds).tolist(), abs=0.1) + + ds.WindowWidth = 128 + ds.WindowCenter = -5 + assert [ + -2147483648, + -2147483648, + 152183880, + 186002520.1, + 219821160.3, + 2147483647, + 2147483647, + ] == pytest.approx(apply_windowing(arr, ds).tolist(), abs=0.1) + + # Linear exact + ds.VOILUTFunction = "LINEAR_EXACT" + assert [ + -2147483648, + -2147483648, + 134217727.5, + 167772159.5, + 201326591.5, + 2147483647, + 2147483647, + ] == pytest.approx(apply_windowing(arr, ds).tolist(), abs=0.1) + + # Sigmoid + ds.VOILUTFunction = "SIGMOID" + assert [ + -2057442919.3, + -2054646500.7, + 134043237.4, + 167431657.4, + 200738833.7, + 2077033158.8, + 2079166214.8, + ] == pytest.approx(apply_windowing(arr, ds).tolist(), abs=0.1) + + def test_window_multi_frame(self): + """Test windowing with a multiple frames.""" + ds = dcmread(WIN_12_1F) + assert 16 == ds.BitsAllocated + assert 12 == ds.BitsStored + assert 0 == ds.PixelRepresentation + ds.WindowCenter = 450 + ds.WindowWidth = 790 + assert 450 == ds.WindowCenter + assert 790 == ds.WindowWidth + + arr = ds.pixel_array + arr = np.stack([arr, 4095 - arr]) + assert (2, 484, 484) == arr.shape + assert 642 == arr[0, 326, 130] + assert 3453 == arr[1, 326, 130] + out = apply_windowing(arr, ds) + assert 3046.6 == pytest.approx(out[0, 326, 130], abs=0.1) + assert 4095.0 == pytest.approx(out[1, 326, 130], abs=0.1) + + def test_window_rescale(self): + """Test windowing after a rescale operation.""" + ds = dcmread(WIN_12_1F) + assert 16 == ds.BitsAllocated + assert 12 == ds.BitsStored + assert 0 == ds.PixelRepresentation + if HAVE_NP and config.use_DS_numpy: + expected = np.array([450, 200]) + assert np.allclose(ds.WindowCenter, expected) + expected = np.array([790, 443]) + assert np.allclose(ds.WindowWidth, expected) + else: + assert [450, 200] == ds.WindowCenter + assert [790, 443] == ds.WindowWidth + ds.RescaleSlope = 1.2 + ds.RescaleIntercept = 0 + + arr = ds.pixel_array + assert 0 == arr[16, 60] + assert 642 == arr[326, 130] + assert 1123 == arr[316, 481] + hu = apply_modality_lut(arr, ds) + assert 0 == hu[16, 60] + assert 770.4 == hu[326, 130] + assert 1347.6 == hu[316, 481] + # With rescale -> output range is 0 to 4914 + out = apply_windowing(hu, ds) + assert 0 == pytest.approx(out[16, 60], abs=0.1) + assert 4455.6 == pytest.approx(out[326, 130], abs=0.1) + assert 4914.0 == pytest.approx(out[316, 481], abs=0.1) + + def test_window_modality_lut(self): + """Test windowing after a modality LUT operation.""" + ds = dcmread(MOD_16_SEQ) + ds.WindowCenter = [49147, 200] + ds.WindowWidth = [790, 443] + assert 16 == ds.BitsAllocated + assert 12 == ds.BitsStored + assert 1 == ds.PixelRepresentation # Signed + assert "RescaleSlope" not in ds + assert "ModalityLUTSequence" in ds + + seq = ds.ModalityLUTSequence[0] + assert [4096, -2048, 16] == seq.LUTDescriptor + arr = ds.pixel_array + assert -2048 == arr.min() + assert 4095 == arr.max() + + arr = ds.pixel_array + assert 2047 == arr[16, 60] + assert 1023 == arr[0, 1] + hu = apply_modality_lut(arr, ds) + assert 65535 == hu[16, 60] + assert 49147 == hu[0, 1] + out = apply_windowing(hu, ds) + assert 65535.0 == pytest.approx(out[16, 60], abs=0.1) + assert 32809.0 == pytest.approx(out[0, 1], abs=0.1) + # Output range must be 0 to 2**16 - 1 + assert 65535 == out.max() + assert 0 == out.min() + + def test_window_bad_photometric_interp(self): + """Test bad photometric interpretation raises exception.""" + ds = dcmread(WIN_12_1F) + ds.PhotometricInterpretation = "RGB" + msg = r"only 'MONOCHROME1' and 'MONOCHROME2' are allowed" + with pytest.raises(ValueError, match=msg): + apply_windowing(ds.pixel_array, ds) + + def test_window_bad_parameters(self): + """Test bad windowing parameters raise exceptions.""" + ds = dcmread(WIN_12_1F) + ds.WindowWidth = 0 + ds.VOILUTFunction = "LINEAR" + msg = r"Width must be greater than or equal to 1" + with pytest.raises(ValueError, match=msg): + apply_windowing(ds.pixel_array, ds) + + ds.VOILUTFunction = "LINEAR_EXACT" + msg = r"Width must be greater than 0" + with pytest.raises(ValueError, match=msg): + apply_windowing(ds.pixel_array, ds) + + ds.VOILUTFunction = "SIGMOID" + msg = r"Width must be greater than 0" + with pytest.raises(ValueError, match=msg): + apply_windowing(ds.pixel_array, ds) + + ds.VOILUTFunction = "UNKNOWN" + msg = r"Unsupported \(0028,1056\) VOI LUT Function value 'UNKNOWN'" + with pytest.raises(ValueError, match=msg): + apply_windowing(ds.pixel_array, ds) + + def test_window_bad_index(self, no_numpy_use): + """Test windowing with a bad view index.""" + ds = dcmread(WIN_12_1F) + assert 2 == len(ds.WindowWidth) + arr = ds.pixel_array + with pytest.raises(IndexError, match=r"list index out of range"): + apply_windowing(arr, ds, index=2) + + def test_unchanged(self): + """Test input array is unchanged if no VOI LUT""" + ds = Dataset() + ds.PhotometricInterpretation = "MONOCHROME1" + ds.PixelRepresentation = 1 + ds.BitsStored = 8 + arr = np.asarray([-128, -127, -1, 0, 1, 126, 127], dtype="int8") + out = apply_windowing(arr, ds) + assert [-128, -127, -1, 0, 1, 126, 127] == out.tolist() + + ds.ModalityLUTSequence = [] + out = apply_windowing(arr, ds) + assert [-128, -127, -1, 0, 1, 126, 127] == out.tolist() + + def test_rescale_empty(self): + """Test RescaleSlope and RescaleIntercept being empty.""" + ds = dcmread(WIN_12_1F) + ds.RescaleSlope = None + ds.RescaleIntercept = None + + arr = ds.pixel_array + assert 0 == arr[16, 60] + assert 642 == arr[326, 130] + assert 1123 == arr[316, 481] + out = apply_windowing(arr, ds) + assert 0 == pytest.approx(out[16, 60], abs=0.1) + assert 3046.6 == pytest.approx(out[326, 130], abs=0.1) + assert 4095.0 == pytest.approx(out[316, 481], abs=0.1) + + +@pytest.mark.skipif(not HAVE_NP, reason="Numpy is not available") +class TestApplyVOI: + """Tests for apply_voi().""" + + def test_voi_single_view(self): + """Test VOI LUT with a single view.""" + ds = dcmread(VOI_08_1F) + assert 8 == ds.BitsAllocated + assert 8 == ds.BitsStored + assert 0 == ds.PixelRepresentation + item = ds.VOILUTSequence[0] + assert [256, 0, 16] == item.LUTDescriptor + lut = item.LUTData + assert 0 == lut[0] + assert 19532 == lut[76] + assert 45746 == lut[178] + assert 65535 == lut[255] + + arr = ds.pixel_array + assert 0 == arr[387, 448] + assert 76 == arr[178, 126] + assert 178 == arr[186, 389] + assert 255 == arr[129, 79] + + out = apply_voi(arr, ds) + assert 0 == out[387, 448] + assert 19532 == out[178, 126] + assert 45746 == out[186, 389] + assert 65535 == out[129, 79] + + def test_voi_multi_view(self): + """Test VOI LUT with multiple views.""" + ds = dcmread(VOI_08_1F) + assert 8 == ds.BitsAllocated + assert 8 == ds.BitsStored + assert 0 == ds.PixelRepresentation + item0 = ds.VOILUTSequence[0] + # Add another view that's the inverse + ds.VOILUTSequence.append(Dataset()) + item1 = ds.VOILUTSequence[1] + item1.LUTDescriptor = [256, 0, 16] + item1.LUTData = item0.LUTData[::-1] + + arr = ds.pixel_array + assert 0 == arr[387, 448] + assert 76 == arr[178, 126] + assert 178 == arr[186, 389] + assert 255 == arr[129, 79] + + out0 = apply_voi(arr, ds) + assert 0 == out0[387, 448] + assert 19532 == out0[178, 126] + assert 45746 == out0[186, 389] + assert 65535 == out0[129, 79] + + out1 = apply_voi(arr, ds, index=1) + assert 65535 == out1[387, 448] + assert 46003 == out1[178, 126] + assert 19789 == out1[186, 389] + assert 0 == out1[129, 79] + + def test_voi_multi_frame(self): + """Test VOI with a multiple frames.""" + ds = dcmread(VOI_08_1F) + assert 8 == ds.BitsAllocated + assert 8 == ds.BitsStored + assert 0 == ds.PixelRepresentation + + arr = ds.pixel_array + arr = np.stack([arr, 255 - arr]) + assert (2, 512, 512) == arr.shape + + out = apply_voi(arr, ds) + assert 0 == out[0, 387, 448] + assert 19532 == out[0, 178, 126] + assert 45746 == out[0, 186, 389] + assert 65535 == out[0, 129, 79] + assert 65535 == out[1, 387, 448] + assert 46003 == out[1, 178, 126] + assert 19789 == out[1, 186, 389] + assert 0 == out[1, 129, 79] + + def test_voi_zero_entries(self): + """Test that 0 entries is interpreted correctly.""" + ds = dcmread(VOI_08_1F) + seq = ds.VOILUTSequence[0] + seq.LUTDescriptor = [0, 0, 16] + assert 256 == len(seq.LUTData) + arr = np.asarray([0, 255, 256, 65535]) + msg = r"index 256 is out of bounds" + with pytest.raises(IndexError, match=msg): + apply_voi(arr, ds) + + # LUTData with 65536 entries + seq.LUTData = [0] * 65535 + [1] + out = apply_voi(arr, ds) + assert [0, 0, 0, 1] == list(out) + + def test_voi_uint8(self): + """Test uint VOI LUT with an 8-bit LUT.""" + ds = Dataset() + ds.PixelRepresentation = 0 + ds.BitsStored = 8 + ds.VOILUTSequence = [Dataset()] + item = ds.VOILUTSequence[0] + item.LUTDescriptor = [4, 0, 8] + item.LUTData = [0, 127, 128, 255] + arr = np.asarray([0, 1, 128, 254, 255], dtype="uint8") + out = apply_voi(arr, ds) + assert "uint8" == out.dtype + assert [0, 127, 255, 255, 255] == out.tolist() + + def test_voi_uint16(self): + """Test uint VOI LUT with an 16-bit LUT.""" + ds = Dataset() + ds.PixelRepresentation = 0 + ds.BitsStored = 16 + ds.VOILUTSequence = [Dataset()] + item = ds.VOILUTSequence[0] + item.LUTDescriptor = [4, 0, 16] + item.LUTData = [0, 127, 32768, 65535] + arr = np.asarray([0, 1, 2, 3, 255], dtype="uint16") + out = apply_voi(arr, ds) + assert "uint16" == out.dtype + assert [0, 127, 32768, 65535, 65535] == out.tolist() + + def test_voi_int8(self): + """Test int VOI LUT with an 8-bit LUT.""" + ds = Dataset() + ds.PixelRepresentation = 1 + ds.BitsStored = 8 + ds.VOILUTSequence = [Dataset()] + item = ds.VOILUTSequence[0] + item.LUTDescriptor = [4, 0, 8] + item.LUTData = [0, 127, 128, 255] + arr = np.asarray([0, -1, 2, -128, 127], dtype="int8") + out = apply_voi(arr, ds) + assert "uint8" == out.dtype + assert [0, 0, 128, 0, 255] == out.tolist() + + def test_voi_int16(self): + """Test int VOI LUT with an 16-bit LUT.""" + ds = Dataset() + ds.PixelRepresentation = 0 + ds.BitsStored = 16 + ds.VOILUTSequence = [Dataset()] + item = ds.VOILUTSequence[0] + item.LUTDescriptor = [4, 0, 16] + item.LUTData = [0, 127, 32768, 65535] + arr = np.asarray([0, -1, 2, -128, 255], dtype="int16") + out = apply_voi(arr, ds) + assert "uint16" == out.dtype + assert [0, 0, 32768, 0, 65535] == out.tolist() + + def test_voi_bad_depth(self): + """Test bad LUT depth raises exception.""" + ds = dcmread(VOI_08_1F) + item = ds.VOILUTSequence[0] + item.LUTDescriptor[2] = 7 + msg = r"'7' bits per LUT entry is not supported" + with pytest.raises(NotImplementedError, match=msg): + apply_voi(ds.pixel_array, ds) + + item.LUTDescriptor[2] = 17 + msg = r"'17' bits per LUT entry is not supported" + with pytest.raises(NotImplementedError, match=msg): + apply_voi(ds.pixel_array, ds) + + def test_voi_uint16_array_float(self): + """Test warning when array is float and VOI LUT with an 16-bit LUT""" + ds = Dataset() + ds.PixelRepresentation = 0 + ds.BitsStored = 16 + ds.VOILUTSequence = [Dataset()] + item = ds.VOILUTSequence[0] + item.LUTDescriptor = [4, 0, 16] + item.LUTData = [0, 127, 32768, 65535] + arr = np.asarray([0, 1, 2, 3, 255], dtype="float64") + msg = ( + r"Applying a VOI LUT on a float input array may give " r"incorrect results" + ) + + with pytest.warns(UserWarning, match=msg): + out = apply_voi(arr, ds) + assert [0, 127, 32768, 65535, 65535] == out.tolist() + + def test_unchanged(self): + """Test input array is unchanged if no VOI LUT""" + ds = Dataset() + ds.PhotometricInterpretation = "MONOCHROME1" + ds.PixelRepresentation = 1 + ds.BitsStored = 8 + arr = np.asarray([-128, -127, -1, 0, 1, 126, 127], dtype="int8") + out = apply_voi(arr, ds) + assert [-128, -127, -1, 0, 1, 126, 127] == out.tolist() + + ds.VOILUTSequence = [] + out = apply_voi(arr, ds) + assert [-128, -127, -1, 0, 1, 126, 127] == out.tolist() + + def test_voi_lutdata_ow(self): + """Test LUT Data with VR OW.""" + ds = Dataset() + ds.set_original_encoding(False, True) + ds.PixelRepresentation = 0 + ds.BitsStored = 16 + ds.VOILUTSequence = [Dataset()] + item = ds.VOILUTSequence[0] + item.LUTDescriptor = [4, 0, 16] + item.LUTData = [0, 127, 32768, 65535] + item.LUTData = pack("<4H", *item.LUTData) + item["LUTData"].VR = "OW" + arr = np.asarray([0, 1, 2, 3, 255], dtype="uint16") + out = apply_voi(arr, ds) + assert "uint16" == out.dtype + assert [0, 127, 32768, 65535, 65535] == out.tolist() + + def test_file_meta(self): + """Test using file meta to determine endianness""" + ds = Dataset() + ds.file_meta = FileMetaDataset() + ds.file_meta.TransferSyntaxUID = ExplicitVRLittleEndian + ds.PixelRepresentation = 0 + ds.BitsStored = 16 + ds.VOILUTSequence = [Dataset()] + item = ds.VOILUTSequence[0] + item.LUTDescriptor = [4, 0, 16] + item.LUTData = [0, 127, 32768, 65535] + item.LUTData = pack("<4H", *item.LUTData) + item["LUTData"].VR = "OW" + arr = np.asarray([0, 1, 2, 3, 255], dtype="uint16") + out = apply_voi(arr, ds) + assert "uint16" == out.dtype + assert [0, 127, 32768, 65535, 65535] == out.tolist() + + def test_no_endianness_raises(self): + """Test unable to determine endianness""" + ds = Dataset() + ds.PixelRepresentation = 0 + ds.BitsStored = 16 + ds.VOILUTSequence = [Dataset()] + item = ds.VOILUTSequence[0] + item.LUTDescriptor = [4, 0, 16] + item.LUTData = [0, 127, 32768, 65535] + item.LUTData = pack("<4H", *item.LUTData) + item["LUTData"].VR = "OW" + arr = np.asarray([0, 1, 2, 3, 255], dtype="uint16") + msg = ( + "Unable to determine the endianness of the dataset, please set " + "an appropriate Transfer Syntax UID in 'Dataset.file_meta'" + ) + with pytest.raises(AttributeError, match=msg): + apply_voi(arr, ds) + + +@pytest.mark.skipif(not HAVE_NP, reason="Numpy is not available") +class TestApplyVOILUT: + """Tests for apply_voi_lut()""" + + def test_unchanged(self): + """Test input array is unchanged if no VOI LUT""" + ds = Dataset() + ds.PhotometricInterpretation = "MONOCHROME1" + ds.PixelRepresentation = 1 + ds.BitsStored = 8 + arr = np.asarray([-128, -127, -1, 0, 1, 126, 127], dtype="int8") + out = apply_voi_lut(arr, ds) + assert [-128, -127, -1, 0, 1, 126, 127] == out.tolist() + + ds.VOILUTSequence = [] + out = apply_voi_lut(arr, ds) + assert [-128, -127, -1, 0, 1, 126, 127] == out.tolist() + + def test_only_windowing(self): + """Test only windowing operation elements present.""" + ds = Dataset() + ds.PhotometricInterpretation = "MONOCHROME1" + ds.PixelRepresentation = 0 + ds.BitsStored = 8 + arr = np.asarray([0, 1, 128, 254, 255], dtype="uint8") + + ds.WindowWidth = 1 + ds.WindowCenter = 0 + assert [255, 255, 255, 255, 255] == apply_voi_lut(arr, ds).tolist() + + def test_only_voi(self): + """Test only LUT operation elements present.""" + ds = Dataset() + ds.PixelRepresentation = 0 + ds.BitsStored = 8 + ds.VOILUTSequence = [Dataset()] + item = ds.VOILUTSequence[0] + item.LUTDescriptor = [4, 0, 8] + item.LUTData = [0, 127, 128, 255] + arr = np.asarray([0, 1, 128, 254, 255], dtype="uint8") + out = apply_voi_lut(arr, ds) + assert "uint8" == out.dtype + assert [0, 127, 255, 255, 255] == out.tolist() + + def test_voi_windowing(self): + """Test both LUT and windowing operation elements present.""" + ds = Dataset() + ds.PhotometricInterpretation = "MONOCHROME1" + ds.PixelRepresentation = 0 + ds.BitsStored = 8 + ds.WindowWidth = 1 + ds.WindowCenter = 0 + ds.VOILUTSequence = [Dataset()] + item = ds.VOILUTSequence[0] + item.LUTDescriptor = [4, 0, 8] + item.LUTData = [0, 127, 128, 255] + arr = np.asarray([0, 1, 128, 254, 255], dtype="uint8") + + # Defaults to LUT + out = apply_voi_lut(arr, ds) + assert [0, 127, 255, 255, 255] == out.tolist() + + out = apply_voi_lut(arr, ds, prefer_lut=False) + assert [255, 255, 255, 255, 255] == out.tolist() + + def test_voi_windowing_empty(self): + """Test empty VOI elements.""" + ds = Dataset() + ds.PhotometricInterpretation = "MONOCHROME1" + ds.PixelRepresentation = 0 + ds.BitsStored = 8 + ds.WindowWidth = 1 + ds.WindowCenter = 0 + ds.VOILUTSequence = [Dataset()] + item = ds.VOILUTSequence[0] + item.LUTDescriptor = [4, 0, 8] + item.LUTData = [0, 127, 128, 255] + arr = np.asarray([0, 1, 128, 254, 255], dtype="uint8") + + # Test empty VOI elements + item.LUTData = None + out = apply_voi_lut(arr, ds) + assert [255, 255, 255, 255, 255] == out.tolist() + + # Test empty windowing elements + ds.WindowWidth = None + out = apply_voi_lut(arr, ds) + assert [0, 1, 128, 254, 255] == out.tolist() diff --git a/tests/pixels/test_utils.py b/tests/pixels/test_utils.py index 200878a825..ef1124df63 100644 --- a/tests/pixels/test_utils.py +++ b/tests/pixels/test_utils.py @@ -1,7 +1,13 @@ +# Copyright 2008-2024 pydicom authors. See LICENSE file for details. +"""Tests for the pixels.utils module.""" + import importlib from io import BytesIO import logging import os +import random +from struct import pack +from sys import byteorder import pytest @@ -12,16 +18,29 @@ except ImportError: HAVE_NP = False -from pydicom import dcmread, Dataset +from pydicom import dcmread, config +from pydicom.dataset import Dataset, FileMetaDataset from pydicom.encaps import get_frame -from pydicom.pixel_data_handlers.util import convert_color_space -from pydicom.pixels import pixel_array, iter_pixels +from pydicom.pixels import pixel_array, iter_pixels, convert_color_space from pydicom.pixels.utils import ( as_pixel_options, _passes_version_check, _get_jpg_parameters, + reshape_pixel_array, + pixel_dtype, + get_expected_length, + get_j2k_parameters, + get_nr_frames, + pack_bits, + unpack_bits, + expand_ybr422, +) +from pydicom.uid import ( + EnhancedMRImageStorage, + ExplicitVRLittleEndian, + ExplicitVRBigEndian, + UncompressedTransferSyntaxes, ) -from pydicom.uid import EnhancedMRImageStorage, ExplicitVRLittleEndian from .pixels_reference import ( PIXEL_REFERENCE, @@ -36,7 +55,10 @@ JLSL_08_08_3_0_1F_ILV1, JLSL_08_08_3_0_1F_ILV2, JLSN_08_01_1_0_1F, + EXPL_1_1_3F, ) +from ..test_helpers import assert_no_warning + HAVE_PYLJ = bool(importlib.util.find_spec("pylibjpeg")) HAVE_RLE = bool(importlib.util.find_spec("rle")) @@ -466,3 +488,830 @@ def test_invalid(self): assert _get_jpg_parameters(b"\x00\x00") == {} data = get_frame(JLSN_08_01_1_0_1F.ds.PixelData, 0) assert _get_jpg_parameters(data[:20]) == {} + + +REFERENCE_DTYPE = [ + # BitsAllocated, PixelRepresentation, as_float, numpy dtype string + (1, 0, False, "uint8"), + (1, 1, False, "uint8"), + (8, 0, False, "uint8"), + (8, 1, False, "int8"), + (16, 0, False, "uint16"), + (16, 1, False, "int16"), + (32, 0, False, "uint32"), + (32, 1, False, "int32"), + (32, 0, True, "float32"), + (64, 0, True, "float64"), +] + + +@pytest.mark.skipif(HAVE_NP, reason="Numpy is available") +def test_pixel_dtype_raises(): + """Test that pixel_dtype raises exception without numpy.""" + with pytest.raises(ImportError, match="Numpy is required to determine the dtype"): + pixel_dtype(None) + + +@pytest.mark.skipif(not HAVE_NP, reason="Numpy is not available") +class TestPixelDtype: + """Tests for pixel_dtype().""" + + def setup_method(self): + """Setup the test dataset.""" + self.ds = Dataset() + self.ds.file_meta = FileMetaDataset() + self.ds.file_meta.TransferSyntaxUID = ExplicitVRLittleEndian + + def test_unknown_pixel_representation_raises(self): + """Test an unknown PixelRepresentation value raises exception.""" + self.ds.BitsAllocated = 16 + with pytest.warns(UserWarning): + self.ds.PixelRepresentation = -1 + # The bracket needs to be escaped + with pytest.raises(ValueError, match=r"value of '-1' for '\(0028,0103"): + pixel_dtype(self.ds) + + self.ds.PixelRepresentation = 2 + with pytest.raises(ValueError, match=r"value of '2' for '\(0028,0103"): + pixel_dtype(self.ds) + + def test_unknown_bits_allocated_raises(self): + """Test an unknown BitsAllocated value raises exception.""" + self.ds.BitsAllocated = 0 + self.ds.PixelRepresentation = 0 + # The bracket needs to be escaped + with pytest.raises(ValueError, match=r"value of '0' for '\(0028,0100"): + pixel_dtype(self.ds) + + self.ds.BitsAllocated = 2 + with pytest.raises(ValueError, match=r"value of '2' for '\(0028,0100"): + pixel_dtype(self.ds) + + self.ds.BitsAllocated = 15 + with pytest.raises(ValueError, match=r"value of '15' for '\(0028,0100"): + pixel_dtype(self.ds) + + def test_unsupported_dtypes(self): + """Test unsupported dtypes raise exception.""" + self.ds.BitsAllocated = 24 + self.ds.PixelRepresentation = 0 + + with pytest.raises( + NotImplementedError, match="data type 'uint24' needed to contain" + ): + pixel_dtype(self.ds) + + @pytest.mark.parametrize("bits, pixel_repr, as_float, dtype", REFERENCE_DTYPE) + def test_supported_dtypes(self, bits, pixel_repr, as_float, dtype): + """Test supported dtypes.""" + self.ds.BitsAllocated = bits + self.ds.PixelRepresentation = pixel_repr + # Correct for endianness of system + ref_dtype = np.dtype(dtype) + endianness = self.ds.file_meta.TransferSyntaxUID.is_little_endian + if endianness != (byteorder == "little"): + ref_dtype = ref_dtype.newbyteorder("S") + + assert ref_dtype == pixel_dtype(self.ds, as_float=as_float) + + def test_byte_swapping(self): + """Test that the endianness of the system is taken into account.""" + # The main problem is that our testing environments are probably + # all little endian, but we'll try our best + self.ds.BitsAllocated = 16 + self.ds.PixelRepresentation = 0 + + # explicit little + meta = self.ds.file_meta + + # < is little, = is native, > is big + if byteorder == "little": + self.ds._read_little = True + assert pixel_dtype(self.ds).byteorder in ["<", "="] + meta.TransferSyntaxUID = ExplicitVRBigEndian + self.ds._read_little = False + assert pixel_dtype(self.ds).byteorder == ">" + elif byteorder == "big": + self.ds._read_little = True + assert pixel_dtype(self.ds).byteorder == "<" + meta.TransferSyntaxUID = ExplicitVRBigEndian + self.ds._read_little = False + assert pixel_dtype(self.ds).byteorder in [">", "="] + + def test_no_endianness_raises(self): + ds = Dataset() + ds.BitsAllocated = 8 + ds.PixelRepresentation = 1 + msg = ( + "Unable to determine the endianness of the dataset, please set " + "an appropriate Transfer Syntax UID in 'Dataset.file_meta'" + ) + with pytest.raises(AttributeError, match=msg): + pixel_dtype(ds) + + +if HAVE_NP: + _arr1_1 = [1, 2, 3, 4, 5, 2, 3, 4, 5, 6, 3, 4, 5, 6, 7, 4, 5, 6, 7, 8] + + _arr2_1 = _arr1_1[:] + _arr2_1.extend( + [25, 26, 27, 28, 29, 26, 27, 28, 29, 30, 27, 28, 29, 30, 31, 28, 29, 30, 31, 32] + ) + + _arr1_3_0 = [1, 9, 17, 2, 10, 18, 3, 11, 19, 4, 12, 20, 5, 13, 21, 2, 10, 18, 3, 11] + _arr1_3_0.extend( + [19, 4, 12, 20, 5, 13, 21, 6, 14, 22, 3, 11, 19, 4, 12, 20, 5, 13, 21, 6] + ) + _arr1_3_0.extend( + [14, 22, 7, 15, 23, 4, 12, 20, 5, 13, 21, 6, 14, 22, 7, 15, 23, 8, 16, 24] + ) + + _arr1_3_1 = _arr1_1[:] + _arr1_3_1.extend( + [9, 10, 11, 12, 13, 10, 11, 12, 13, 14, 11, 12, 13, 14, 15, 12, 13, 14, 15, 16] + ) + _arr1_3_1.extend( + [17, 18, 19, 20, 21, 18, 19, 20, 21, 22, 19, 20, 21, 22, 23, 20, 21, 22, 23, 24] + ) + + _arr2_3_0 = _arr1_3_0[:] + _arr2_3_0.extend( + [25, 33, 41, 26, 34, 42, 27, 35, 43, 28, 36, 44, 29, 37, 45, 26, 34, 42, 27, 35] + ) + _arr2_3_0.extend( + [43, 28, 36, 44, 29, 37, 45, 30, 38, 46, 27, 35, 43, 28, 36, 44, 29, 37, 45, 30] + ) + _arr2_3_0.extend( + [38, 46, 31, 39, 47, 28, 36, 44, 29, 37, 45, 30, 38, 46, 31, 39, 47, 32, 40, 48] + ) + + _arr2_3_1 = _arr1_3_1[:] + _arr2_3_1.extend( + [25, 26, 27, 28, 29, 26, 27, 28, 29, 30, 27, 28, 29, 30, 31, 28, 29, 30, 31, 32] + ) + _arr2_3_1.extend( + [33, 34, 35, 36, 37, 34, 35, 36, 37, 38, 35, 36, 37, 38, 39, 36, 37, 38, 39, 40] + ) + _arr2_3_1.extend( + [41, 42, 43, 44, 45, 42, 43, 44, 45, 46, 43, 44, 45, 46, 47, 44, 45, 46, 47, 48] + ) + + RESHAPE_ARRAYS = { + "reference": np.asarray( + [ + [ # Frame 1 + [[1, 9, 17], [2, 10, 18], [3, 11, 19], [4, 12, 20], [5, 13, 21]], + [[2, 10, 18], [3, 11, 19], [4, 12, 20], [5, 13, 21], [6, 14, 22]], + [[3, 11, 19], [4, 12, 20], [5, 13, 21], [6, 14, 22], [7, 15, 23]], + [[4, 12, 20], [5, 13, 21], [6, 14, 22], [7, 15, 23], [8, 16, 24]], + ], + [ # Frame 2 + [ + [25, 33, 41], + [26, 34, 42], + [27, 35, 43], + [28, 36, 44], + [29, 37, 45], + ], + [ + [26, 34, 42], + [27, 35, 43], + [28, 36, 44], + [29, 37, 45], + [30, 38, 46], + ], + [ + [27, 35, 43], + [28, 36, 44], + [29, 37, 45], + [30, 38, 46], + [31, 39, 47], + ], + [ + [28, 36, 44], + [29, 37, 45], + [30, 38, 46], + [31, 39, 47], + [32, 40, 48], + ], + ], + ] + ), + "1frame_1sample": np.asarray(_arr1_1), + "2frame_1sample": np.asarray(_arr2_1), + "1frame_3sample_0config": np.asarray(_arr1_3_0), + "1frame_3sample_1config": np.asarray(_arr1_3_1), + "2frame_3sample_0config": np.asarray(_arr2_3_0), + "2frame_3sample_1config": np.asarray(_arr2_3_1), + } + + +@pytest.mark.skipif(HAVE_NP, reason="Numpy is available") +def test_reshape_pixel_array_raises(): + """Test that reshape_pixel_array raises exception without numpy.""" + with pytest.raises(ImportError, match="Numpy is required to reshape"): + reshape_pixel_array(None, None) + + +@pytest.mark.skipif(not HAVE_NP, reason="Numpy is not available") +class TestReshapePixelArray: + """Tests for reshape_pixel_array().""" + + def setup_method(self): + """Setup the test dataset.""" + self.ds = Dataset() + self.ds.file_meta = FileMetaDataset() + self.ds.file_meta.TransferSyntaxUID = ExplicitVRLittleEndian + self.ds.Rows = 4 + self.ds.Columns = 5 + + # Expected output ref_#frames_#samples + self.ref_1_1 = RESHAPE_ARRAYS["reference"][0, :, :, 0] + self.ref_1_3 = RESHAPE_ARRAYS["reference"][0] + self.ref_2_1 = RESHAPE_ARRAYS["reference"][:, :, :, 0] + self.ref_2_3 = RESHAPE_ARRAYS["reference"] + + def test_reference_1frame_1sample(self): + """Test the 1 frame 1 sample/pixel reference array is as expected.""" + # (rows, columns) + assert (4, 5) == self.ref_1_1.shape + assert np.array_equal( + self.ref_1_1, + np.asarray( + [[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7], [4, 5, 6, 7, 8]] + ), + ) + + def test_reference_1frame_3sample(self): + """Test the 1 frame 3 sample/pixel reference array is as expected.""" + # (rows, columns, planes) + assert (4, 5, 3) == self.ref_1_3.shape + + # Red channel + assert np.array_equal( + self.ref_1_3[:, :, 0], + np.asarray( + [[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7], [4, 5, 6, 7, 8]] + ), + ) + # Green channel + assert np.array_equal( + self.ref_1_3[:, :, 1], + np.asarray( + [ + [9, 10, 11, 12, 13], + [10, 11, 12, 13, 14], + [11, 12, 13, 14, 15], + [12, 13, 14, 15, 16], + ] + ), + ) + # Blue channel + assert np.array_equal( + self.ref_1_3[:, :, 2], + np.asarray( + [ + [17, 18, 19, 20, 21], + [18, 19, 20, 21, 22], + [19, 20, 21, 22, 23], + [20, 21, 22, 23, 24], + ] + ), + ) + + def test_reference_2frame_1sample(self): + """Test the 2 frame 1 sample/pixel reference array is as expected.""" + # (nr frames, rows, columns) + assert (2, 4, 5) == self.ref_2_1.shape + + # Frame 1 + assert np.array_equal( + self.ref_2_1[0, :, :], + np.asarray( + [[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7], [4, 5, 6, 7, 8]] + ), + ) + # Frame 2 + assert np.array_equal( + self.ref_2_1[1, :, :], + np.asarray( + [ + [25, 26, 27, 28, 29], + [26, 27, 28, 29, 30], + [27, 28, 29, 30, 31], + [28, 29, 30, 31, 32], + ] + ), + ) + + def test_reference_2frame_3sample(self): + """Test the 2 frame 3 sample/pixel reference array is as expected.""" + # (nr frames, row, columns, planes) + assert (2, 4, 5, 3) == self.ref_2_3.shape + + # Red channel, frame 1 + assert np.array_equal( + self.ref_2_3[0, :, :, 0], + np.asarray( + [[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7], [4, 5, 6, 7, 8]] + ), + ) + # Green channel, frame 2 + assert np.array_equal( + self.ref_2_3[1, :, :, 1], + np.asarray( + [ + [33, 34, 35, 36, 37], + [34, 35, 36, 37, 38], + [35, 36, 37, 38, 39], + [36, 37, 38, 39, 40], + ] + ), + ) + + def test_1frame_1sample(self): + """Test reshaping 1 frame, 1 sample/pixel.""" + self.ds.SamplesPerPixel = 1 + arr = reshape_pixel_array(self.ds, RESHAPE_ARRAYS["1frame_1sample"]) + assert (4, 5) == arr.shape + assert np.array_equal(arr, self.ref_1_1) + + def test_1frame_3sample_0conf(self): + """Test reshaping 1 frame, 3 sample/pixel for 0 planar config.""" + self.ds.NumberOfFrames = 1 + self.ds.SamplesPerPixel = 3 + self.ds.PlanarConfiguration = 0 + arr = reshape_pixel_array(self.ds, RESHAPE_ARRAYS["1frame_3sample_0config"]) + assert (4, 5, 3) == arr.shape + assert np.array_equal(arr, self.ref_1_3) + + def test_1frame_3sample_1conf(self): + """Test reshaping 1 frame, 3 sample/pixel for 1 planar config.""" + self.ds.NumberOfFrames = 1 + self.ds.SamplesPerPixel = 3 + self.ds.PlanarConfiguration = 1 + arr = reshape_pixel_array(self.ds, RESHAPE_ARRAYS["1frame_3sample_1config"]) + assert (4, 5, 3) == arr.shape + assert np.array_equal(arr, self.ref_1_3) + + def test_2frame_1sample(self): + """Test reshaping 2 frame, 1 sample/pixel.""" + self.ds.NumberOfFrames = 2 + self.ds.SamplesPerPixel = 1 + arr = reshape_pixel_array(self.ds, RESHAPE_ARRAYS["2frame_1sample"]) + assert (2, 4, 5) == arr.shape + assert np.array_equal(arr, self.ref_2_1) + + def test_2frame_3sample_0conf(self): + """Test reshaping 2 frame, 3 sample/pixel for 0 planar config.""" + self.ds.NumberOfFrames = 2 + self.ds.SamplesPerPixel = 3 + self.ds.PlanarConfiguration = 0 + arr = reshape_pixel_array(self.ds, RESHAPE_ARRAYS["2frame_3sample_0config"]) + assert (2, 4, 5, 3) == arr.shape + assert np.array_equal(arr, self.ref_2_3) + + def test_2frame_3sample_1conf(self): + """Test reshaping 2 frame, 3 sample/pixel for 1 planar config.""" + self.ds.NumberOfFrames = 2 + self.ds.SamplesPerPixel = 3 + self.ds.PlanarConfiguration = 1 + arr = reshape_pixel_array(self.ds, RESHAPE_ARRAYS["2frame_3sample_1config"]) + assert (2, 4, 5, 3) == arr.shape + assert np.array_equal(arr, self.ref_2_3) + + def test_compressed_syntaxes_0conf(self): + """Test the compressed syntaxes that are always 0 planar conf.""" + for uid in [ + "1.2.840.10008.1.2.4.50", + "1.2.840.10008.1.2.4.57", + "1.2.840.10008.1.2.4.70", + "1.2.840.10008.1.2.4.90", + "1.2.840.10008.1.2.4.91", + ]: + self.ds.file_meta.TransferSyntaxUID = uid + self.ds.PlanarConfiguration = 1 + self.ds.NumberOfFrames = 1 + self.ds.SamplesPerPixel = 3 + + arr = reshape_pixel_array(self.ds, RESHAPE_ARRAYS["1frame_3sample_0config"]) + assert (4, 5, 3) == arr.shape + assert np.array_equal(arr, self.ref_1_3) + + def test_compressed_syntaxes_1conf(self): + """Test the compressed syntaxes that are always 1 planar conf.""" + for uid in ["1.2.840.10008.1.2.5"]: + self.ds.file_meta.TransferSyntaxUID = uid + self.ds.PlanarConfiguration = 0 + self.ds.NumberOfFrames = 1 + self.ds.SamplesPerPixel = 3 + + arr = reshape_pixel_array(self.ds, RESHAPE_ARRAYS["1frame_3sample_1config"]) + assert (4, 5, 3) == arr.shape + assert np.array_equal(arr, self.ref_1_3) + + def test_uncompressed_syntaxes(self): + """Test that uncompressed syntaxes use the dataset planar conf.""" + for uid in UncompressedTransferSyntaxes: + self.ds.file_meta.TransferSyntaxUID = uid + self.ds.PlanarConfiguration = 0 + self.ds.NumberOfFrames = 1 + self.ds.SamplesPerPixel = 3 + + arr = reshape_pixel_array(self.ds, RESHAPE_ARRAYS["1frame_3sample_0config"]) + assert (4, 5, 3) == arr.shape + assert np.array_equal(arr, self.ref_1_3) + + self.ds.PlanarConfiguration = 1 + arr = reshape_pixel_array(self.ds, RESHAPE_ARRAYS["1frame_3sample_1config"]) + assert (4, 5, 3) == arr.shape + assert np.array_equal(arr, self.ref_1_3) + + def test_invalid_nr_frames_warns(self): + """Test an invalid Number of Frames value shows an warning.""" + self.ds.SamplesPerPixel = 1 + self.ds.NumberOfFrames = 0 + # Need to escape brackets + with pytest.warns(UserWarning, match=r"value of 0 for \(0028,0008\)"): + reshape_pixel_array(self.ds, RESHAPE_ARRAYS["1frame_1sample"]) + + def test_invalid_samples_raises(self): + """Test an invalid Samples per Pixel value raises exception.""" + self.ds.SamplesPerPixel = 0 + # Need to escape brackets + with pytest.raises(ValueError, match=r"value of 0 for \(0028,0002\)"): + reshape_pixel_array(self.ds, RESHAPE_ARRAYS["1frame_1sample"]) + + def test_invalid_planar_conf_raises(self): + self.ds.SamplesPerPixel = 3 + self.ds.PlanarConfiguration = 2 + # Need to escape brackets + with pytest.raises(ValueError, match=r"value of 2 for \(0028,0006\)"): + reshape_pixel_array(self.ds, RESHAPE_ARRAYS["1frame_3sample_0config"]) + + +REFERENCE_LENGTH = [ + # (frames, rows, cols, samples), bit depth, + # result in (bytes, pixels, ybr_bytes) + # YBR can only be 3 samples/px and > 1 bit depth + # No 'NumberOfFrames' in dataset + ((0, 0, 0, 0), 1, (0, 0, None)), + ((0, 1, 1, 1), 1, (1, 1, None)), # 1 bit -> 1 byte + ((0, 1, 1, 3), 1, (1, 3, None)), # 3 bits -> 1 byte + ((0, 1, 3, 3), 1, (2, 9, None)), # 9 bits -> 2 bytes + ((0, 2, 2, 1), 1, (1, 4, None)), # 4 bits -> 1 byte + ((0, 2, 4, 1), 1, (1, 8, None)), # 8 bits -> 1 byte + ((0, 3, 3, 1), 1, (2, 9, None)), # 9 bits -> 2 bytes + ((0, 512, 512, 1), 1, (32768, 262144, None)), # Typical length + ((0, 512, 512, 3), 1, (98304, 786432, None)), + ((0, 0, 0, 0), 8, (0, 0, None)), + ((0, 1, 1, 1), 8, (1, 1, None)), # Odd length + ((0, 9, 1, 1), 8, (9, 9, None)), # Odd length + ((0, 1, 2, 1), 8, (2, 2, None)), # Even length + ((0, 512, 512, 1), 8, (262144, 262144, None)), + ((0, 512, 512, 3), 8, (786432, 786432, 524288)), + ((0, 0, 0, 0), 16, (0, 0, None)), + ((0, 1, 1, 1), 16, (2, 1, None)), # 16 bit data can't be odd length + ((0, 1, 2, 1), 16, (4, 2, None)), + ((0, 512, 512, 1), 16, (524288, 262144, None)), + ((0, 512, 512, 3), 16, (1572864, 786432, 1048576)), + ((0, 0, 0, 0), 32, (0, 0, None)), + ((0, 1, 1, 1), 32, (4, 1, None)), # 32 bit data can't be odd length + ((0, 1, 2, 1), 32, (8, 2, None)), + ((0, 512, 512, 1), 32, (1048576, 262144, None)), + ((0, 512, 512, 3), 32, (3145728, 786432, 2097152)), + # NumberOfFrames odd + ((3, 0, 0, 0), 1, (0, 0, None)), + ((3, 1, 1, 1), 1, (1, 3, None)), + ((3, 1, 1, 3), 1, (2, 9, None)), + ((3, 1, 3, 3), 1, (4, 27, None)), + ((3, 2, 4, 1), 1, (3, 24, None)), + ((3, 2, 2, 1), 1, (2, 12, None)), + ((3, 3, 3, 1), 1, (4, 27, None)), + ((3, 512, 512, 1), 1, (98304, 786432, None)), + ((3, 512, 512, 3), 1, (294912, 2359296, 196608)), + ((3, 0, 0, 0), 8, (0, 0, None)), + ((3, 1, 1, 1), 8, (3, 3, None)), + ((3, 9, 1, 1), 8, (27, 27, None)), + ((3, 1, 2, 1), 8, (6, 6, None)), + ((3, 512, 512, 1), 8, (786432, 786432, None)), + ((3, 512, 512, 3), 8, (2359296, 2359296, 1572864)), + ((3, 0, 0, 0), 16, (0, 0, None)), + ((3, 512, 512, 1), 16, (1572864, 786432, None)), + ((3, 512, 512, 3), 16, (4718592, 2359296, 3145728)), + ((3, 0, 0, 0), 32, (0, 0, None)), + ((3, 512, 512, 1), 32, (3145728, 786432, None)), + ((3, 512, 512, 3), 32, (9437184, 2359296, 6291456)), + # NumberOfFrames even + ((4, 0, 0, 0), 1, (0, 0, None)), + ((4, 1, 1, 1), 1, (1, 4, None)), + ((4, 1, 1, 3), 1, (2, 12, None)), + ((4, 1, 3, 3), 1, (5, 36, None)), + ((4, 2, 4, 1), 1, (4, 32, None)), + ((4, 2, 2, 1), 1, (2, 16, None)), + ((4, 3, 3, 1), 1, (5, 36, None)), + ((4, 512, 512, 1), 1, (131072, 1048576, None)), + ((4, 512, 512, 3), 1, (393216, 3145728, 262144)), + ((4, 0, 0, 0), 8, (0, 0, None)), + ((4, 512, 512, 1), 8, (1048576, 1048576, None)), + ((4, 512, 512, 3), 8, (3145728, 3145728, 2097152)), + ((4, 0, 0, 0), 16, (0, 0, None)), + ((4, 512, 512, 1), 16, (2097152, 1048576, None)), + ((4, 512, 512, 3), 16, (6291456, 3145728, 4194304)), + ((4, 0, 0, 0), 32, (0, 0, None)), + ((4, 512, 512, 1), 32, (4194304, 1048576, None)), + ((4, 512, 512, 3), 32, (12582912, 3145728, 8388608)), +] + + +class TestGetExpectedLength: + """Tests for get_expected_length().""" + + @pytest.mark.parametrize("shape, bits, length", REFERENCE_LENGTH) + def test_length_in_bytes(self, shape, bits, length): + """Test get_expected_length(ds, unit='bytes').""" + ds = Dataset() + ds.PhotometricInterpretation = "MONOCHROME2" + ds.Rows = shape[1] + ds.Columns = shape[2] + ds.BitsAllocated = bits + if shape[0] != 0: + ds.NumberOfFrames = shape[0] + ds.SamplesPerPixel = shape[3] + + assert length[0] == get_expected_length(ds, unit="bytes") + + @pytest.mark.parametrize("shape, bits, length", REFERENCE_LENGTH) + def test_length_in_pixels(self, shape, bits, length): + """Test get_expected_length(ds, unit='pixels').""" + ds = Dataset() + ds.PhotometricInterpretation = "MONOCHROME2" + ds.Rows = shape[1] + ds.Columns = shape[2] + ds.BitsAllocated = bits + if shape[0] != 0: + ds.NumberOfFrames = shape[0] + ds.SamplesPerPixel = shape[3] + + assert length[1] == get_expected_length(ds, unit="pixels") + + @pytest.mark.parametrize("shape, bits, length", REFERENCE_LENGTH) + def test_length_ybr_422(self, shape, bits, length): + """Test get_expected_length for YBR_FULL_422.""" + if shape[3] != 3 or bits == 1: + return + + ds = Dataset() + ds.PhotometricInterpretation = "YBR_FULL_422" + ds.Rows = shape[1] + ds.Columns = shape[2] + ds.BitsAllocated = bits + if shape[0] != 0: + ds.NumberOfFrames = shape[0] + ds.SamplesPerPixel = shape[3] + + assert length[2] == get_expected_length(ds, unit="bytes") + + +class TestGetJ2KParameters: + """Tests for get_j2k_parameters().""" + + def test_precision(self): + """Test getting the precision for a JPEG2K bytestream.""" + base = b"\xff\x4f\xff\x51" + b"\x00" * 38 + # Signed + for ii in range(135, 144): + params = get_j2k_parameters(base + bytes([ii])) + assert ii - 127 == params["precision"] + assert params["is_signed"] + + # Unsigned + for ii in range(7, 16): + params = get_j2k_parameters(base + bytes([ii])) + assert ii + 1 == params["precision"] + assert not params["is_signed"] + + def test_not_j2k(self): + """Test result when no JPEG2K SOF marker present""" + base = b"\xff\x4e\xff\x51" + b"\x00" * 38 + assert {} == get_j2k_parameters(base + b"\x8F") + + def test_no_siz(self): + """Test result when no SIZ box present""" + base = b"\xff\x4f\xff\x52" + b"\x00" * 38 + assert {} == get_j2k_parameters(base + b"\x8F") + + def test_short_bytestream(self): + """Test result when no SIZ box present""" + assert {} == get_j2k_parameters(b"") + assert {} == get_j2k_parameters(b"\xff\x4f\xff\x51" + b"\x00" * 20) + + +class TestGetNrFrames: + """Tests for get_nr_frames().""" + + def test_none(self): + """Test warning when (0028,0008) 'Number of Frames' has a value of + None""" + ds = Dataset() + ds.NumberOfFrames = None + msg = ( + r"A value of None for \(0028,0008\) 'Number of Frames' is " + r"non-conformant. It's recommended that this value be " + r"changed to 1" + ) + with pytest.warns(UserWarning, match=msg): + assert 1 == get_nr_frames(ds) + + def test_zero(self): + """Test warning when (0028,0008) 'Number of Frames' has a value of 0""" + ds = Dataset() + ds.NumberOfFrames = 0 + msg = ( + r"A value of 0 for \(0028,0008\) 'Number of Frames' is " + r"non-conformant. It's recommended that this value be " + r"changed to 1" + ) + with pytest.warns(UserWarning, match=msg): + assert 1 == get_nr_frames(ds) + + def test_missing(self): + """Test return value when (0028,0008) 'Number of Frames' does not + exist""" + ds = Dataset() + with assert_no_warning(): + assert 1 == get_nr_frames(ds) + + def test_existing(self): + """Test return value when (0028,0008) 'Number of Frames' exists.""" + ds = Dataset() + ds.NumberOfFrames = random.randint(1, 10) + with assert_no_warning(): + assert ds.NumberOfFrames == get_nr_frames(ds) + + +REFERENCE_PACK_UNPACK = [ + (b"", []), + (b"\x00", [0, 0, 0, 0, 0, 0, 0, 0]), + (b"\x01", [1, 0, 0, 0, 0, 0, 0, 0]), + (b"\x02", [0, 1, 0, 0, 0, 0, 0, 0]), + (b"\x04", [0, 0, 1, 0, 0, 0, 0, 0]), + (b"\x08", [0, 0, 0, 1, 0, 0, 0, 0]), + (b"\x10", [0, 0, 0, 0, 1, 0, 0, 0]), + (b"\x20", [0, 0, 0, 0, 0, 1, 0, 0]), + (b"\x40", [0, 0, 0, 0, 0, 0, 1, 0]), + (b"\x80", [0, 0, 0, 0, 0, 0, 0, 1]), + (b"\xAA", [0, 1, 0, 1, 0, 1, 0, 1]), + (b"\xF0", [0, 0, 0, 0, 1, 1, 1, 1]), + (b"\x0F", [1, 1, 1, 1, 0, 0, 0, 0]), + (b"\xFF", [1, 1, 1, 1, 1, 1, 1, 1]), + # | 1st byte | 2nd byte + (b"\x00\x00", [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), + (b"\x00\x01", [0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0]), + (b"\x00\x80", [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]), + (b"\x00\xFF", [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1]), + (b"\x01\x80", [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]), + (b"\x80\x80", [0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1]), + (b"\xFF\x80", [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1]), +] + + +class TestUnpackBits: + """Tests for unpack_bits().""" + + @pytest.mark.skipif(not HAVE_NP, reason="Numpy is not available") + @pytest.mark.parametrize("src, output", REFERENCE_PACK_UNPACK) + def test_unpack_np(self, src, output): + """Test unpacking data using numpy.""" + assert np.array_equal(unpack_bits(src, as_array=True), np.asarray(output)) + + as_bytes = pack(f"{len(output)}B", *output) + assert unpack_bits(src, as_array=False) == as_bytes + + @pytest.mark.skipif(HAVE_NP, reason="Numpy is available") + @pytest.mark.parametrize("src, output", REFERENCE_PACK_UNPACK) + def test_unpack_bytes(self, src, output): + """Test unpacking data without numpy.""" + as_bytes = pack(f"{len(output)}B", *output) + assert unpack_bits(src, as_array=False) == as_bytes + + msg = r"unpack_bits\(\) requires NumPy if 'as_array = True'" + with pytest.raises(ValueError, match=msg): + unpack_bits(src, as_array=True) + + +REFERENCE_PACK_PARTIAL = [ + # | 1st byte | 2nd byte + (b"\x00\x40", [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]), # 15-bits + (b"\x00\x20", [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]), + (b"\x00\x10", [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]), + (b"\x00\x08", [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]), + (b"\x00\x04", [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]), + (b"\x00\x02", [0, 0, 0, 0, 0, 0, 0, 0, 0, 1]), + (b"\x00\x01", [0, 0, 0, 0, 0, 0, 0, 0, 1]), # 9-bits + (b"\x80", [0, 0, 0, 0, 0, 0, 0, 1]), # 8-bits + (b"\x40", [0, 0, 0, 0, 0, 0, 1]), + (b"\x20", [0, 0, 0, 0, 0, 1]), + (b"\x10", [0, 0, 0, 0, 1]), + (b"\x08", [0, 0, 0, 1]), + (b"\x04", [0, 0, 1]), + (b"\x02", [0, 1]), + (b"\x01", [1]), + (b"", []), +] + + +@pytest.mark.skipif(not HAVE_NP, reason="Numpy is not available") +class TestPackBits: + """Tests for pack_bits().""" + + @pytest.mark.parametrize("output, input", REFERENCE_PACK_UNPACK) + def test_pack(self, input, output): + """Test packing data.""" + assert output == pack_bits(np.asarray(input), pad=False) + + def test_non_binary_input(self): + """Test non-binary input raises exception.""" + with pytest.raises( + ValueError, match=r"Only binary arrays \(containing ones or" + ): + pack_bits(np.asarray([0, 0, 2, 0, 0, 0, 0, 0])) + + def test_ndarray_input(self): + """Test non 1D input gets ravelled.""" + arr = np.asarray( + [ + [0, 0, 0, 0, 0, 0, 0, 0], + [1, 0, 1, 0, 1, 0, 1, 0], + [1, 1, 1, 1, 1, 1, 1, 1], + ] + ) + assert (3, 8) == arr.shape + b = pack_bits(arr, pad=False) + assert b"\x00\x55\xff" == b + + def test_padding(self): + """Test odd length packed data is padded.""" + arr = np.asarray( + [ + [0, 0, 0, 0, 0, 0, 0, 0], + [1, 0, 1, 0, 1, 0, 1, 0], + [1, 1, 1, 1, 1, 1, 1, 1], + ] + ) + assert 3 == len(pack_bits(arr, pad=False)) + b = pack_bits(arr, pad=True) + assert 4 == len(b) + assert 0 == b[-1] + + @pytest.mark.parametrize("output, input", REFERENCE_PACK_PARTIAL) + def test_pack_partial(self, input, output): + """Test packing data that isn't a full byte long.""" + assert output == pack_bits(np.asarray(input), pad=False) + + def test_functional(self): + """Test against a real dataset.""" + ds = EXPL_1_1_3F.ds + arr = ds.pixel_array + arr = arr.ravel() + assert ds.PixelData == pack_bits(arr) + + +@pytest.mark.skipif(not HAVE_NP, reason="Numpy is not available") +class TestExpandYBR422: + """Tests for expand_ybr422().""" + + def test_8bit(self): + """Test 8-bit expansion.""" + ds = EXPL_8_3_1F_YBR422.ds + assert ds.PhotometricInterpretation == "YBR_FULL_422" + ref = ds.pixel_array + + expanded = expand_ybr422(ds.PixelData, ds.BitsAllocated) + arr = np.frombuffer(expanded, dtype="u1") + assert np.array_equal(arr, ref.ravel()) + + def test_16bit(self): + """Test 16-bit expansion.""" + # Have to make our own 16-bit data + ds = EXPL_8_3_1F_YBR422.ds + ref = ds.pixel_array.astype("float32") + ref *= 65535 / 255 + ref = ref.astype("u2") + # Subsample + # YY BB RR YY BB RR YY BB RR YY BB RR -> YY YY BB RR YY YY BB RR + src = bytearray(ref.tobytes()) + del src[2::12] + del src[2::11] + del src[2::10] + del src[2::9] + + # Should be 2/3rds of the original number of bytes + nr_bytes = ds.Rows * ds.Columns * ds.SamplesPerPixel * 2 + assert len(src) == nr_bytes * 2 // 3 + arr = np.frombuffer(expand_ybr422(src, 16), "u2") + assert np.array_equal(arr, ref.ravel()) + # Spot check values + arr = arr.reshape(100, 100, 3) + assert (19532, 21845, 65535) == tuple(arr[5, 50, :]) + assert (42662, 27242, 49601) == tuple(arr[15, 50, :]) diff --git a/tests/test_dataset.py b/tests/test_dataset.py index 13210f660d..a93abadcf8 100644 --- a/tests/test_dataset.py +++ b/tests/test_dataset.py @@ -27,7 +27,7 @@ from pydicom.dataset import Dataset, FileDataset, validate_file_meta, FileMetaDataset from pydicom.encaps import encapsulate from pydicom.filebase import DicomBytesIO -from pydicom.pixel_data_handlers.util import get_image_pixel_ids +from pydicom.pixels.utils import get_image_pixel_ids from pydicom.sequence import Sequence from pydicom.tag import Tag from pydicom.uid import ( diff --git a/tests/test_gdcm_pixel_data.py b/tests/test_gdcm_pixel_data.py index 2b4cbe4499..012e650d61 100644 --- a/tests/test_gdcm_pixel_data.py +++ b/tests/test_gdcm_pixel_data.py @@ -21,10 +21,8 @@ from pydicom.data import get_testdata_file from pydicom.encaps import get_frame from pydicom.pixel_data_handlers import numpy_handler, gdcm_handler -from pydicom.pixel_data_handlers.util import ( - _convert_YBR_FULL_to_RGB, - get_j2k_parameters, -) +from pydicom.pixels.processing import _convert_YBR_FULL_to_RGB +from pydicom.pixels.utils import get_j2k_parameters from pydicom.tag import Tag try: diff --git a/tests/test_handler_util.py b/tests/test_handler_util.py index ca906a337d..077ac55e5f 100644 --- a/tests/test_handler_util.py +++ b/tests/test_handler_util.py @@ -1,13 +1,9 @@ # Copyright 2008-2018 pydicom authors. See LICENSE file for details. """Tests for the pixel_data_handlers.util module.""" -import os -import random -from struct import unpack, pack from sys import byteorder import pytest -from .test_helpers import assert_no_warning try: import numpy as np @@ -16,1045 +12,10 @@ except ImportError: HAVE_NP = False -from pydicom import dcmread, config -from pydicom.data import get_testdata_file, get_palette_files -from pydicom.dataset import Dataset, FileMetaDataset -from pydicom.pixel_data_handlers.util import ( - dtype_corrected_for_endianness, - reshape_pixel_array, - convert_color_space, - pixel_dtype, - get_expected_length, - apply_color_lut, - _expand_segmented_lut, - apply_modality_lut, - apply_voi_lut, - get_j2k_parameters, - get_nr_frames, - apply_voi, - apply_windowing, - pack_bits, - unpack_bits, - expand_ybr422, -) -from pydicom.uid import ( - ExplicitVRLittleEndian, - ExplicitVRBigEndian, - ImplicitVRLittleEndian, - UncompressedTransferSyntaxes, -) +from pydicom import config - -# PAL: PALETTE COLOR Photometric Interpretation -# SEG: Segmented Palette Color -# SUP: Supplemental Palette Color -# LE, BE: little endian, big endian encoding -# 8/8, 1 sample/pixel, 1 frame -PAL_08_256_0_16_1F = get_testdata_file("OBXXXX1A.dcm") -PAL_08_200_0_16_1F = get_testdata_file("OT-PAL-8-face.dcm") -# 8/8, 1 sample/pixel, 2 frame -PAL_08_256_0_16_2F = get_testdata_file("OBXXXX1A_2frame.dcm") -# PALETTE COLOR with 16-bit LUTs (no indirect segments) -PAL_SEG_LE_16_1F = get_testdata_file("gdcm-US-ALOKA-16.dcm") -PAL_SEG_BE_16_1F = get_testdata_file("gdcm-US-ALOKA-16_big.dcm") -# Supplemental palette colour + VOI windowing -SUP_16_16_2F = get_testdata_file("eCT_Supplemental.dcm") -# 8 bit, 3 samples/pixel, 1 and 2 frame datasets -# RGB colorspace, uncompressed -RGB_8_3_1F = get_testdata_file("SC_rgb.dcm") -RGB_8_3_2F = get_testdata_file("SC_rgb_2frame.dcm") -# MOD: Modality LUT -# SEQ: Modality LUT Sequence -MOD_16 = get_testdata_file("CT_small.dcm") -MOD_16_SEQ = get_testdata_file("mlut_18.dcm") -# VOI: VOI LUT Sequence -# WIN: Windowing operation -WIN_12_1F = get_testdata_file("MR-SIEMENS-DICOM-WithOverlays.dcm") -VOI_08_1F = get_testdata_file("vlut_04.dcm") -# 1/1, 1 sample/pixel, 3 frame -EXPL_1_1_3F = get_testdata_file("liver.dcm") -# Uncompressed YBR_FULL_422 -EXPL_8_3_1F_YBR422 = get_testdata_file("SC_ybr_full_422_uncompressed.dcm") - - -# Tests with Numpy unavailable -@pytest.mark.skipif(HAVE_NP, reason="Numpy is available") -class TestNoNumpy: - """Tests for the util functions without numpy.""" - - def test_pixel_dtype_raises(self): - """Test that pixel_dtype raises exception without numpy.""" - with pytest.raises( - ImportError, match="Numpy is required to determine the dtype" - ): - pixel_dtype(None) - - def test_reshape_pixel_array_raises(self): - """Test that reshape_pixel_array raises exception without numpy.""" - with pytest.raises(ImportError, match="Numpy is required to reshape"): - reshape_pixel_array(None, None) - - -# Tests with Numpy available -REFERENCE_DTYPE = [ - # BitsAllocated, PixelRepresentation, as_float, numpy dtype string - (1, 0, False, "uint8"), - (1, 1, False, "uint8"), - (8, 0, False, "uint8"), - (8, 1, False, "int8"), - (16, 0, False, "uint16"), - (16, 1, False, "int16"), - (32, 0, False, "uint32"), - (32, 1, False, "int32"), - (32, 0, True, "float32"), - (64, 0, True, "float64"), -] - - -@pytest.mark.skipif(not HAVE_NP, reason="Numpy is not available") -class TestNumpy_PixelDtype: - """Tests for util.pixel_dtype.""" - - def setup_method(self): - """Setup the test dataset.""" - self.ds = Dataset() - self.ds.file_meta = FileMetaDataset() - self.ds.file_meta.TransferSyntaxUID = ExplicitVRLittleEndian - - def test_unknown_pixel_representation_raises(self): - """Test an unknown PixelRepresentation value raises exception.""" - self.ds.BitsAllocated = 16 - with pytest.warns(UserWarning): - self.ds.PixelRepresentation = -1 - # The bracket needs to be escaped - with pytest.raises(ValueError, match=r"value of '-1' for '\(0028,0103"): - pixel_dtype(self.ds) - - self.ds.PixelRepresentation = 2 - with pytest.raises(ValueError, match=r"value of '2' for '\(0028,0103"): - pixel_dtype(self.ds) - - def test_unknown_bits_allocated_raises(self): - """Test an unknown BitsAllocated value raises exception.""" - self.ds.BitsAllocated = 0 - self.ds.PixelRepresentation = 0 - # The bracket needs to be escaped - with pytest.raises(ValueError, match=r"value of '0' for '\(0028,0100"): - pixel_dtype(self.ds) - - self.ds.BitsAllocated = 2 - with pytest.raises(ValueError, match=r"value of '2' for '\(0028,0100"): - pixel_dtype(self.ds) - - self.ds.BitsAllocated = 15 - with pytest.raises(ValueError, match=r"value of '15' for '\(0028,0100"): - pixel_dtype(self.ds) - - def test_unsupported_dtypes(self): - """Test unsupported dtypes raise exception.""" - self.ds.BitsAllocated = 24 - self.ds.PixelRepresentation = 0 - - with pytest.raises( - NotImplementedError, match="data type 'uint24' needed to contain" - ): - pixel_dtype(self.ds) - - @pytest.mark.parametrize("bits, pixel_repr, as_float, dtype", REFERENCE_DTYPE) - def test_supported_dtypes(self, bits, pixel_repr, as_float, dtype): - """Test supported dtypes.""" - self.ds.BitsAllocated = bits - self.ds.PixelRepresentation = pixel_repr - # Correct for endianness of system - ref_dtype = np.dtype(dtype) - endianness = self.ds.file_meta.TransferSyntaxUID.is_little_endian - if endianness != (byteorder == "little"): - ref_dtype = ref_dtype.newbyteorder("S") - - assert ref_dtype == pixel_dtype(self.ds, as_float=as_float) - - def test_byte_swapping(self): - """Test that the endianness of the system is taken into account.""" - # The main problem is that our testing environments are probably - # all little endian, but we'll try our best - self.ds.BitsAllocated = 16 - self.ds.PixelRepresentation = 0 - - # explicit little - meta = self.ds.file_meta - - # < is little, = is native, > is big - if byteorder == "little": - self.ds._read_little = True - assert pixel_dtype(self.ds).byteorder in ["<", "="] - meta.TransferSyntaxUID = ExplicitVRBigEndian - self.ds._read_little = False - assert pixel_dtype(self.ds).byteorder == ">" - elif byteorder == "big": - self.ds._read_little = True - assert pixel_dtype(self.ds).byteorder == "<" - meta.TransferSyntaxUID = ExplicitVRBigEndian - self.ds._read_little = False - assert pixel_dtype(self.ds).byteorder in [">", "="] - - def test_no_endianness_raises(self): - ds = Dataset() - ds.BitsAllocated = 8 - ds.PixelRepresentation = 1 - msg = ( - "Unable to determine the endianness of the dataset, please set " - "an appropriate Transfer Syntax UID in 'Dataset.file_meta'" - ) - with pytest.raises(AttributeError, match=msg): - pixel_dtype(ds) - - -if HAVE_NP: - RESHAPE_ARRAYS = { - "reference": np.asarray( - [ - [ # Frame 1 - [[1, 9, 17], [2, 10, 18], [3, 11, 19], [4, 12, 20], [5, 13, 21]], - [[2, 10, 18], [3, 11, 19], [4, 12, 20], [5, 13, 21], [6, 14, 22]], - [[3, 11, 19], [4, 12, 20], [5, 13, 21], [6, 14, 22], [7, 15, 23]], - [[4, 12, 20], [5, 13, 21], [6, 14, 22], [7, 15, 23], [8, 16, 24]], - ], - [ # Frame 2 - [ - [25, 33, 41], - [26, 34, 42], - [27, 35, 43], - [28, 36, 44], - [29, 37, 45], - ], - [ - [26, 34, 42], - [27, 35, 43], - [28, 36, 44], - [29, 37, 45], - [30, 38, 46], - ], - [ - [27, 35, 43], - [28, 36, 44], - [29, 37, 45], - [30, 38, 46], - [31, 39, 47], - ], - [ - [28, 36, 44], - [29, 37, 45], - [30, 38, 46], - [31, 39, 47], - [32, 40, 48], - ], - ], - ] - ), - "1frame_1sample": np.asarray( - [1, 2, 3, 4, 5, 2, 3, 4, 5, 6, 3, 4, 5, 6, 7, 4, 5, 6, 7, 8] - ), - "2frame_1sample": np.asarray( - [ - 1, - 2, - 3, - 4, - 5, - 2, - 3, - 4, - 5, - 6, # Frame 1 - 3, - 4, - 5, - 6, - 7, - 4, - 5, - 6, - 7, - 8, - 25, - 26, - 27, - 28, - 29, - 26, - 27, - 28, - 29, - 30, # Frame 2 - 27, - 28, - 29, - 30, - 31, - 28, - 29, - 30, - 31, - 32, - ] - ), - "1frame_3sample_0config": np.asarray( - [ - 1, - 9, - 17, - 2, - 10, - 18, - 3, - 11, - 19, - 4, - 12, - 20, - 5, - 13, - 21, - 2, - 10, - 18, - 3, - 11, - 19, - 4, - 12, - 20, - 5, - 13, - 21, - 6, - 14, - 22, - 3, - 11, - 19, - 4, - 12, - 20, - 5, - 13, - 21, - 6, - 14, - 22, - 7, - 15, - 23, - 4, - 12, - 20, - 5, - 13, - 21, - 6, - 14, - 22, - 7, - 15, - 23, - 8, - 16, - 24, - ] - ), - "1frame_3sample_1config": np.asarray( - [ - 1, - 2, - 3, - 4, - 5, - 2, - 3, - 4, - 5, - 6, # Red - 3, - 4, - 5, - 6, - 7, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 10, - 11, - 12, - 13, - 14, # Green - 11, - 12, - 13, - 14, - 15, - 12, - 13, - 14, - 15, - 16, - 17, - 18, - 19, - 20, - 21, - 18, - 19, - 20, - 21, - 22, # Blue - 19, - 20, - 21, - 22, - 23, - 20, - 21, - 22, - 23, - 24, - ] - ), - "2frame_3sample_0config": np.asarray( - [ - 1, - 9, - 17, - 2, - 10, - 18, - 3, - 11, - 19, - 4, - 12, - 20, # Frame 1 - 5, - 13, - 21, - 2, - 10, - 18, - 3, - 11, - 19, - 4, - 12, - 20, - 5, - 13, - 21, - 6, - 14, - 22, - 3, - 11, - 19, - 4, - 12, - 20, - 5, - 13, - 21, - 6, - 14, - 22, - 7, - 15, - 23, - 4, - 12, - 20, - 5, - 13, - 21, - 6, - 14, - 22, - 7, - 15, - 23, - 8, - 16, - 24, - 25, - 33, - 41, - 26, - 34, - 42, - 27, - 35, - 43, - 28, - 36, - 44, # Frame 2 - 29, - 37, - 45, - 26, - 34, - 42, - 27, - 35, - 43, - 28, - 36, - 44, - 29, - 37, - 45, - 30, - 38, - 46, - 27, - 35, - 43, - 28, - 36, - 44, - 29, - 37, - 45, - 30, - 38, - 46, - 31, - 39, - 47, - 28, - 36, - 44, - 29, - 37, - 45, - 30, - 38, - 46, - 31, - 39, - 47, - 32, - 40, - 48, - ] - ), - "2frame_3sample_1config": np.asarray( - [ - 1, - 2, - 3, - 4, - 5, - 2, - 3, - 4, - 5, - 6, # Frame 1, red - 3, - 4, - 5, - 6, - 7, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 10, - 11, - 12, - 13, - 14, # Frame 1, green - 11, - 12, - 13, - 14, - 15, - 12, - 13, - 14, - 15, - 16, - 17, - 18, - 19, - 20, - 21, - 18, - 19, - 20, - 21, - 22, # Frame 1, blue - 19, - 20, - 21, - 22, - 23, - 20, - 21, - 22, - 23, - 24, - 25, - 26, - 27, - 28, - 29, - 26, - 27, - 28, - 29, - 30, # Frame 2, red - 27, - 28, - 29, - 30, - 31, - 28, - 29, - 30, - 31, - 32, - 33, - 34, - 35, - 36, - 37, - 34, - 35, - 36, - 37, - 38, # Frame 2, green - 35, - 36, - 37, - 38, - 39, - 36, - 37, - 38, - 39, - 40, - 41, - 42, - 43, - 44, - 45, - 42, - 43, - 44, - 45, - 46, # Frame 2, blue - 43, - 44, - 45, - 46, - 47, - 44, - 45, - 46, - 47, - 48, - ] - ), - } - - -@pytest.mark.skipif(not HAVE_NP, reason="Numpy is not available") -class TestNumpy_ReshapePixelArray: - """Tests for util.reshape_pixel_array.""" - - def setup_method(self): - """Setup the test dataset.""" - self.ds = Dataset() - self.ds.file_meta = FileMetaDataset() - self.ds.file_meta.TransferSyntaxUID = ExplicitVRLittleEndian - self.ds.Rows = 4 - self.ds.Columns = 5 - - # Expected output ref_#frames_#samples - self.ref_1_1 = RESHAPE_ARRAYS["reference"][0, :, :, 0] - self.ref_1_3 = RESHAPE_ARRAYS["reference"][0] - self.ref_2_1 = RESHAPE_ARRAYS["reference"][:, :, :, 0] - self.ref_2_3 = RESHAPE_ARRAYS["reference"] - - def test_reference_1frame_1sample(self): - """Test the 1 frame 1 sample/pixel reference array is as expected.""" - # (rows, columns) - assert (4, 5) == self.ref_1_1.shape - assert np.array_equal( - self.ref_1_1, - np.asarray( - [[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7], [4, 5, 6, 7, 8]] - ), - ) - - def test_reference_1frame_3sample(self): - """Test the 1 frame 3 sample/pixel reference array is as expected.""" - # (rows, columns, planes) - assert (4, 5, 3) == self.ref_1_3.shape - - # Red channel - assert np.array_equal( - self.ref_1_3[:, :, 0], - np.asarray( - [[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7], [4, 5, 6, 7, 8]] - ), - ) - # Green channel - assert np.array_equal( - self.ref_1_3[:, :, 1], - np.asarray( - [ - [9, 10, 11, 12, 13], - [10, 11, 12, 13, 14], - [11, 12, 13, 14, 15], - [12, 13, 14, 15, 16], - ] - ), - ) - # Blue channel - assert np.array_equal( - self.ref_1_3[:, :, 2], - np.asarray( - [ - [17, 18, 19, 20, 21], - [18, 19, 20, 21, 22], - [19, 20, 21, 22, 23], - [20, 21, 22, 23, 24], - ] - ), - ) - - def test_reference_2frame_1sample(self): - """Test the 2 frame 1 sample/pixel reference array is as expected.""" - # (nr frames, rows, columns) - assert (2, 4, 5) == self.ref_2_1.shape - - # Frame 1 - assert np.array_equal( - self.ref_2_1[0, :, :], - np.asarray( - [[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7], [4, 5, 6, 7, 8]] - ), - ) - # Frame 2 - assert np.array_equal( - self.ref_2_1[1, :, :], - np.asarray( - [ - [25, 26, 27, 28, 29], - [26, 27, 28, 29, 30], - [27, 28, 29, 30, 31], - [28, 29, 30, 31, 32], - ] - ), - ) - - def test_reference_2frame_3sample(self): - """Test the 2 frame 3 sample/pixel reference array is as expected.""" - # (nr frames, row, columns, planes) - assert (2, 4, 5, 3) == self.ref_2_3.shape - - # Red channel, frame 1 - assert np.array_equal( - self.ref_2_3[0, :, :, 0], - np.asarray( - [[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7], [4, 5, 6, 7, 8]] - ), - ) - # Green channel, frame 2 - assert np.array_equal( - self.ref_2_3[1, :, :, 1], - np.asarray( - [ - [33, 34, 35, 36, 37], - [34, 35, 36, 37, 38], - [35, 36, 37, 38, 39], - [36, 37, 38, 39, 40], - ] - ), - ) - - def test_1frame_1sample(self): - """Test reshaping 1 frame, 1 sample/pixel.""" - self.ds.SamplesPerPixel = 1 - arr = reshape_pixel_array(self.ds, RESHAPE_ARRAYS["1frame_1sample"]) - assert (4, 5) == arr.shape - assert np.array_equal(arr, self.ref_1_1) - - def test_1frame_3sample_0conf(self): - """Test reshaping 1 frame, 3 sample/pixel for 0 planar config.""" - self.ds.NumberOfFrames = 1 - self.ds.SamplesPerPixel = 3 - self.ds.PlanarConfiguration = 0 - arr = reshape_pixel_array(self.ds, RESHAPE_ARRAYS["1frame_3sample_0config"]) - assert (4, 5, 3) == arr.shape - assert np.array_equal(arr, self.ref_1_3) - - def test_1frame_3sample_1conf(self): - """Test reshaping 1 frame, 3 sample/pixel for 1 planar config.""" - self.ds.NumberOfFrames = 1 - self.ds.SamplesPerPixel = 3 - self.ds.PlanarConfiguration = 1 - arr = reshape_pixel_array(self.ds, RESHAPE_ARRAYS["1frame_3sample_1config"]) - assert (4, 5, 3) == arr.shape - assert np.array_equal(arr, self.ref_1_3) - - def test_2frame_1sample(self): - """Test reshaping 2 frame, 1 sample/pixel.""" - self.ds.NumberOfFrames = 2 - self.ds.SamplesPerPixel = 1 - arr = reshape_pixel_array(self.ds, RESHAPE_ARRAYS["2frame_1sample"]) - assert (2, 4, 5) == arr.shape - assert np.array_equal(arr, self.ref_2_1) - - def test_2frame_3sample_0conf(self): - """Test reshaping 2 frame, 3 sample/pixel for 0 planar config.""" - self.ds.NumberOfFrames = 2 - self.ds.SamplesPerPixel = 3 - self.ds.PlanarConfiguration = 0 - arr = reshape_pixel_array(self.ds, RESHAPE_ARRAYS["2frame_3sample_0config"]) - assert (2, 4, 5, 3) == arr.shape - assert np.array_equal(arr, self.ref_2_3) - - def test_2frame_3sample_1conf(self): - """Test reshaping 2 frame, 3 sample/pixel for 1 planar config.""" - self.ds.NumberOfFrames = 2 - self.ds.SamplesPerPixel = 3 - self.ds.PlanarConfiguration = 1 - arr = reshape_pixel_array(self.ds, RESHAPE_ARRAYS["2frame_3sample_1config"]) - assert (2, 4, 5, 3) == arr.shape - assert np.array_equal(arr, self.ref_2_3) - - def test_compressed_syntaxes_0conf(self): - """Test the compressed syntaxes that are always 0 planar conf.""" - for uid in [ - "1.2.840.10008.1.2.4.50", - "1.2.840.10008.1.2.4.57", - "1.2.840.10008.1.2.4.70", - "1.2.840.10008.1.2.4.90", - "1.2.840.10008.1.2.4.91", - ]: - self.ds.file_meta.TransferSyntaxUID = uid - self.ds.PlanarConfiguration = 1 - self.ds.NumberOfFrames = 1 - self.ds.SamplesPerPixel = 3 - - arr = reshape_pixel_array(self.ds, RESHAPE_ARRAYS["1frame_3sample_0config"]) - assert (4, 5, 3) == arr.shape - assert np.array_equal(arr, self.ref_1_3) - - def test_compressed_syntaxes_1conf(self): - """Test the compressed syntaxes that are always 1 planar conf.""" - for uid in ["1.2.840.10008.1.2.5"]: - self.ds.file_meta.TransferSyntaxUID = uid - self.ds.PlanarConfiguration = 0 - self.ds.NumberOfFrames = 1 - self.ds.SamplesPerPixel = 3 - - arr = reshape_pixel_array(self.ds, RESHAPE_ARRAYS["1frame_3sample_1config"]) - assert (4, 5, 3) == arr.shape - assert np.array_equal(arr, self.ref_1_3) - - def test_uncompressed_syntaxes(self): - """Test that uncompressed syntaxes use the dataset planar conf.""" - for uid in UncompressedTransferSyntaxes: - self.ds.file_meta.TransferSyntaxUID = uid - self.ds.PlanarConfiguration = 0 - self.ds.NumberOfFrames = 1 - self.ds.SamplesPerPixel = 3 - - arr = reshape_pixel_array(self.ds, RESHAPE_ARRAYS["1frame_3sample_0config"]) - assert (4, 5, 3) == arr.shape - assert np.array_equal(arr, self.ref_1_3) - - self.ds.PlanarConfiguration = 1 - arr = reshape_pixel_array(self.ds, RESHAPE_ARRAYS["1frame_3sample_1config"]) - assert (4, 5, 3) == arr.shape - assert np.array_equal(arr, self.ref_1_3) - - def test_invalid_nr_frames_warns(self): - """Test an invalid Number of Frames value shows an warning.""" - self.ds.SamplesPerPixel = 1 - self.ds.NumberOfFrames = 0 - # Need to escape brackets - with pytest.warns(UserWarning, match=r"value of 0 for \(0028,0008\)"): - reshape_pixel_array(self.ds, RESHAPE_ARRAYS["1frame_1sample"]) - - def test_invalid_samples_raises(self): - """Test an invalid Samples per Pixel value raises exception.""" - self.ds.SamplesPerPixel = 0 - # Need to escape brackets - with pytest.raises(ValueError, match=r"value of 0 for \(0028,0002\)"): - reshape_pixel_array(self.ds, RESHAPE_ARRAYS["1frame_1sample"]) - - def test_invalid_planar_conf_raises(self): - self.ds.SamplesPerPixel = 3 - self.ds.PlanarConfiguration = 2 - # Need to escape brackets - with pytest.raises(ValueError, match=r"value of 2 for \(0028,0006\)"): - reshape_pixel_array(self.ds, RESHAPE_ARRAYS["1frame_3sample_0config"]) - - -@pytest.mark.skipif(not HAVE_NP, reason="Numpy is not available") -class TestNumpy_ConvertColourSpace: - """Tests for util.convert_color_space.""" - - def test_unknown_current_raises(self): - """Test an unknown current color space raises exception.""" - with pytest.raises( - NotImplementedError, match="Conversion from TEST to RGB is not suppo" - ): - convert_color_space(None, "TEST", "RGB") - - def test_unknown_desired_raises(self): - """Test an unknown desdired color space raises exception.""" - with pytest.raises( - NotImplementedError, match="Conversion from RGB to TEST is not suppo" - ): - convert_color_space(None, "RGB", "TEST") - - @pytest.mark.parametrize( - "current, desired", - [ - ("RGB", "RGB"), - ("YBR_FULL", "YBR_FULL"), - ("YBR_FULL", "YBR_FULL_422"), - ("YBR_FULL_422", "YBR_FULL_422"), - ("YBR_FULL_422", "YBR_FULL"), - ], - ) - def test_current_is_desired(self, current, desired): - """Test that the array is unchanged when current matches desired.""" - arr = np.ones((2, 3)) - assert np.array_equal(arr, convert_color_space(arr, current, desired)) - - def test_rgb_ybr_rgb_single_frame(self): - """Test round trip conversion of single framed pixel data.""" - ds = dcmread(RGB_8_3_1F) - - arr = ds.pixel_array - assert (255, 0, 0) == tuple(arr[5, 50, :]) - assert (255, 128, 128) == tuple(arr[15, 50, :]) - assert (0, 255, 0) == tuple(arr[25, 50, :]) - assert (128, 255, 128) == tuple(arr[35, 50, :]) - assert (0, 0, 255) == tuple(arr[45, 50, :]) - assert (128, 128, 255) == tuple(arr[55, 50, :]) - assert (0, 0, 0) == tuple(arr[65, 50, :]) - assert (64, 64, 64) == tuple(arr[75, 50, :]) - assert (192, 192, 192) == tuple(arr[85, 50, :]) - assert (255, 255, 255) == tuple(arr[95, 50, :]) - - ybr = convert_color_space(arr, "RGB", "YBR_FULL") - assert (76, 85, 255) == tuple(ybr[5, 50, :]) - assert (166, 107, 192) == tuple(ybr[15, 50, :]) - assert (150, 44, 21) == tuple(ybr[25, 50, :]) - assert (203, 86, 75) == tuple(ybr[35, 50, :]) - assert (29, 255, 107) == tuple(ybr[45, 50, :]) - assert (142, 192, 118) == tuple(ybr[55, 50, :]) - assert (0, 128, 128) == tuple(ybr[65, 50, :]) - assert (64, 128, 128) == tuple(ybr[75, 50, :]) - assert (192, 128, 128) == tuple(ybr[85, 50, :]) - assert (255, 128, 128) == tuple(ybr[95, 50, :]) - - # Round trip -> rounding errors get compounded - rgb = convert_color_space(ybr, "YBR_FULL", "RGB") - # All pixels within +/- 1 units - assert np.allclose(rgb, arr, atol=1) - assert rgb.shape == arr.shape - - def test_rgb_ybr_rgb_multi_frame(self): - """Test round trip conversion of multi-framed pixel data.""" - ds = dcmread(RGB_8_3_2F) - - arr = ds.pixel_array - assert (255, 0, 0) == tuple(arr[0, 5, 50, :]) - assert (255, 128, 128) == tuple(arr[0, 15, 50, :]) - assert (0, 255, 0) == tuple(arr[0, 25, 50, :]) - assert (128, 255, 128) == tuple(arr[0, 35, 50, :]) - assert (0, 0, 255) == tuple(arr[0, 45, 50, :]) - assert (128, 128, 255) == tuple(arr[0, 55, 50, :]) - assert (0, 0, 0) == tuple(arr[0, 65, 50, :]) - assert (64, 64, 64) == tuple(arr[0, 75, 50, :]) - assert (192, 192, 192) == tuple(arr[0, 85, 50, :]) - assert (255, 255, 255) == tuple(arr[0, 95, 50, :]) - # Frame 2 is frame 1 inverted - assert np.array_equal((2**ds.BitsAllocated - 1) - arr[1], arr[0]) - - ybr = convert_color_space(arr, "RGB", "YBR_FULL") - assert (76, 85, 255) == tuple(ybr[0, 5, 50, :]) - assert (166, 107, 192) == tuple(ybr[0, 15, 50, :]) - assert (150, 44, 21) == tuple(ybr[0, 25, 50, :]) - assert (203, 86, 75) == tuple(ybr[0, 35, 50, :]) - assert (29, 255, 107) == tuple(ybr[0, 45, 50, :]) - assert (142, 192, 118) == tuple(ybr[0, 55, 50, :]) - assert (0, 128, 128) == tuple(ybr[0, 65, 50, :]) - assert (64, 128, 128) == tuple(ybr[0, 75, 50, :]) - assert (192, 128, 128) == tuple(ybr[0, 85, 50, :]) - assert (255, 128, 128) == tuple(ybr[0, 95, 50, :]) - # Frame 2 - assert (179, 171, 1) == tuple(ybr[1, 5, 50, :]) - assert (89, 149, 65) == tuple(ybr[1, 15, 50, :]) - assert (105, 212, 235) == tuple(ybr[1, 25, 50, :]) - assert (52, 170, 181) == tuple(ybr[1, 35, 50, :]) - assert (226, 1, 149) == tuple(ybr[1, 45, 50, :]) - assert (113, 65, 138) == tuple(ybr[1, 55, 50, :]) - assert (255, 128, 128) == tuple(ybr[1, 65, 50, :]) - assert (191, 128, 128) == tuple(ybr[1, 75, 50, :]) - assert (63, 128, 128) == tuple(ybr[1, 85, 50, :]) - assert (0, 128, 128) == tuple(ybr[1, 95, 50, :]) - - # Round trip -> rounding errors get compounded - rgb = convert_color_space(ybr, "YBR_FULL", "RGB") - # All pixels within +/- 1 units - assert np.allclose(rgb, arr, atol=1) - assert rgb.shape == arr.shape - - def test_frame_by_frame(self): - """Test processing frame-by-frame.""" - ds = dcmread(RGB_8_3_2F) - - arr = ds.pixel_array - ybr = convert_color_space(arr, "RGB", "YBR_FULL", per_frame=True) - assert (76, 85, 255) == tuple(ybr[0, 5, 50, :]) - assert (166, 107, 192) == tuple(ybr[0, 15, 50, :]) - assert (150, 44, 21) == tuple(ybr[0, 25, 50, :]) - assert (203, 86, 75) == tuple(ybr[0, 35, 50, :]) - assert (29, 255, 107) == tuple(ybr[0, 45, 50, :]) - assert (142, 192, 118) == tuple(ybr[0, 55, 50, :]) - assert (0, 128, 128) == tuple(ybr[0, 65, 50, :]) - assert (64, 128, 128) == tuple(ybr[0, 75, 50, :]) - assert (192, 128, 128) == tuple(ybr[0, 85, 50, :]) - assert (255, 128, 128) == tuple(ybr[0, 95, 50, :]) - # Frame 2 - assert (179, 171, 1) == tuple(ybr[1, 5, 50, :]) - assert (89, 149, 65) == tuple(ybr[1, 15, 50, :]) - assert (105, 212, 235) == tuple(ybr[1, 25, 50, :]) - assert (52, 170, 181) == tuple(ybr[1, 35, 50, :]) - assert (226, 1, 149) == tuple(ybr[1, 45, 50, :]) - assert (113, 65, 138) == tuple(ybr[1, 55, 50, :]) - assert (255, 128, 128) == tuple(ybr[1, 65, 50, :]) - assert (191, 128, 128) == tuple(ybr[1, 75, 50, :]) - assert (63, 128, 128) == tuple(ybr[1, 85, 50, :]) - assert (0, 128, 128) == tuple(ybr[1, 95, 50, :]) +with pytest.warns(DeprecationWarning): + from pydicom.pixel_data_handlers.util import dtype_corrected_for_endianness @pytest.mark.skipif(not HAVE_NP, reason="Numpy is not available") @@ -1085,1844 +46,94 @@ def test_no_endian_raises(self): dtype_corrected_for_endianness(None, None) -REFERENCE_LENGTH = [ - # (frames, rows, cols, samples), bit depth, - # result in (bytes, pixels, ybr_bytes) - # YBR can only be 3 samples/px and > 1 bit depth - # No 'NumberOfFrames' in dataset - ((0, 0, 0, 0), 1, (0, 0, None)), - ((0, 1, 1, 1), 1, (1, 1, None)), # 1 bit -> 1 byte - ((0, 1, 1, 3), 1, (1, 3, None)), # 3 bits -> 1 byte - ((0, 1, 3, 3), 1, (2, 9, None)), # 9 bits -> 2 bytes - ((0, 2, 2, 1), 1, (1, 4, None)), # 4 bits -> 1 byte - ((0, 2, 4, 1), 1, (1, 8, None)), # 8 bits -> 1 byte - ((0, 3, 3, 1), 1, (2, 9, None)), # 9 bits -> 2 bytes - ((0, 512, 512, 1), 1, (32768, 262144, None)), # Typical length - ((0, 512, 512, 3), 1, (98304, 786432, None)), - ((0, 0, 0, 0), 8, (0, 0, None)), - ((0, 1, 1, 1), 8, (1, 1, None)), # Odd length - ((0, 9, 1, 1), 8, (9, 9, None)), # Odd length - ((0, 1, 2, 1), 8, (2, 2, None)), # Even length - ((0, 512, 512, 1), 8, (262144, 262144, None)), - ((0, 512, 512, 3), 8, (786432, 786432, 524288)), - ((0, 0, 0, 0), 16, (0, 0, None)), - ((0, 1, 1, 1), 16, (2, 1, None)), # 16 bit data can't be odd length - ((0, 1, 2, 1), 16, (4, 2, None)), - ((0, 512, 512, 1), 16, (524288, 262144, None)), - ((0, 512, 512, 3), 16, (1572864, 786432, 1048576)), - ((0, 0, 0, 0), 32, (0, 0, None)), - ((0, 1, 1, 1), 32, (4, 1, None)), # 32 bit data can't be odd length - ((0, 1, 2, 1), 32, (8, 2, None)), - ((0, 512, 512, 1), 32, (1048576, 262144, None)), - ((0, 512, 512, 3), 32, (3145728, 786432, 2097152)), - # NumberOfFrames odd - ((3, 0, 0, 0), 1, (0, 0, None)), - ((3, 1, 1, 1), 1, (1, 3, None)), - ((3, 1, 1, 3), 1, (2, 9, None)), - ((3, 1, 3, 3), 1, (4, 27, None)), - ((3, 2, 4, 1), 1, (3, 24, None)), - ((3, 2, 2, 1), 1, (2, 12, None)), - ((3, 3, 3, 1), 1, (4, 27, None)), - ((3, 512, 512, 1), 1, (98304, 786432, None)), - ((3, 512, 512, 3), 1, (294912, 2359296, 196608)), - ((3, 0, 0, 0), 8, (0, 0, None)), - ((3, 1, 1, 1), 8, (3, 3, None)), - ((3, 9, 1, 1), 8, (27, 27, None)), - ((3, 1, 2, 1), 8, (6, 6, None)), - ((3, 512, 512, 1), 8, (786432, 786432, None)), - ((3, 512, 512, 3), 8, (2359296, 2359296, 1572864)), - ((3, 0, 0, 0), 16, (0, 0, None)), - ((3, 512, 512, 1), 16, (1572864, 786432, None)), - ((3, 512, 512, 3), 16, (4718592, 2359296, 3145728)), - ((3, 0, 0, 0), 32, (0, 0, None)), - ((3, 512, 512, 1), 32, (3145728, 786432, None)), - ((3, 512, 512, 3), 32, (9437184, 2359296, 6291456)), - # NumberOfFrames even - ((4, 0, 0, 0), 1, (0, 0, None)), - ((4, 1, 1, 1), 1, (1, 4, None)), - ((4, 1, 1, 3), 1, (2, 12, None)), - ((4, 1, 3, 3), 1, (5, 36, None)), - ((4, 2, 4, 1), 1, (4, 32, None)), - ((4, 2, 2, 1), 1, (2, 16, None)), - ((4, 3, 3, 1), 1, (5, 36, None)), - ((4, 512, 512, 1), 1, (131072, 1048576, None)), - ((4, 512, 512, 3), 1, (393216, 3145728, 262144)), - ((4, 0, 0, 0), 8, (0, 0, None)), - ((4, 512, 512, 1), 8, (1048576, 1048576, None)), - ((4, 512, 512, 3), 8, (3145728, 3145728, 2097152)), - ((4, 0, 0, 0), 16, (0, 0, None)), - ((4, 512, 512, 1), 16, (2097152, 1048576, None)), - ((4, 512, 512, 3), 16, (6291456, 3145728, 4194304)), - ((4, 0, 0, 0), 32, (0, 0, None)), - ((4, 512, 512, 1), 32, (4194304, 1048576, None)), - ((4, 512, 512, 3), 32, (12582912, 3145728, 8388608)), -] - - -class TestGetExpectedLength: - """Tests for util.get_expected_length.""" - - @pytest.mark.parametrize("shape, bits, length", REFERENCE_LENGTH) - def test_length_in_bytes(self, shape, bits, length): - """Test get_expected_length(ds, unit='bytes').""" - ds = Dataset() - ds.PhotometricInterpretation = "MONOCHROME2" - ds.Rows = shape[1] - ds.Columns = shape[2] - ds.BitsAllocated = bits - if shape[0] != 0: - ds.NumberOfFrames = shape[0] - ds.SamplesPerPixel = shape[3] - - assert length[0] == get_expected_length(ds, unit="bytes") - - @pytest.mark.parametrize("shape, bits, length", REFERENCE_LENGTH) - def test_length_in_pixels(self, shape, bits, length): - """Test get_expected_length(ds, unit='pixels').""" - ds = Dataset() - ds.PhotometricInterpretation = "MONOCHROME2" - ds.Rows = shape[1] - ds.Columns = shape[2] - ds.BitsAllocated = bits - if shape[0] != 0: - ds.NumberOfFrames = shape[0] - ds.SamplesPerPixel = shape[3] - - assert length[1] == get_expected_length(ds, unit="pixels") - - @pytest.mark.parametrize("shape, bits, length", REFERENCE_LENGTH) - def test_length_ybr_422(self, shape, bits, length): - """Test get_expected_length for YBR_FULL_422.""" - if shape[3] != 3 or bits == 1: - return - - ds = Dataset() - ds.PhotometricInterpretation = "YBR_FULL_422" - ds.Rows = shape[1] - ds.Columns = shape[2] - ds.BitsAllocated = bits - if shape[0] != 0: - ds.NumberOfFrames = shape[0] - ds.SamplesPerPixel = shape[3] - - assert length[2] == get_expected_length(ds, unit="bytes") - - -@pytest.mark.skipif(not HAVE_NP, reason="Numpy is not available") -class TestNumpy_ModalityLUT: - """Tests for util.apply_modality_lut().""" - - def test_slope_intercept(self): - """Test the rescale slope/intercept transform.""" - ds = dcmread(MOD_16) - assert 1 == ds.RescaleSlope - assert -1024 == ds.RescaleIntercept - arr = ds.pixel_array - out = apply_modality_lut(arr, ds) - assert out.flags.writeable - assert np.float64 == out.dtype - - assert np.array_equal(arr - 1024, out) - - ds.RescaleSlope = 2.5 - ds.RescaleIntercept = -2048 - out = apply_modality_lut(arr, ds) - assert np.array_equal(arr * 2.5 - 2048, out) - - def test_lut_sequence(self): - """Test the LUT Sequence transform.""" - ds = dcmread(MOD_16_SEQ) - seq = ds.ModalityLUTSequence[0] - assert [4096, -2048, 16] == seq.LUTDescriptor - arr = ds.pixel_array - assert -2048 == arr.min() - assert 4095 == arr.max() - out = apply_modality_lut(arr, ds) - - # IV > 2047 -> LUT[4095] - mapped_pixels = arr > 2047 - assert seq.LUTData[-1] == out[mapped_pixels][0] - assert (seq.LUTData[-1] == out[mapped_pixels]).all() - assert out.flags.writeable - assert out.dtype == np.uint16 - - assert [65535, 65535, 49147, 49147, 65535] == list(out[0, 50:55]) - assert [65535, 65535, 65535, 65535, 65535] == list(out[50, 50:55]) - assert [65535, 65535, 65535, 65535, 65535] == list(out[100, 50:55]) - assert [65535, 65535, 49147, 49147, 65535] == list(out[150, 50:55]) - assert [65535, 65535, 49147, 49147, 65535] == list(out[200, 50:55]) - assert 39321 == out[185, 340] - assert 45867 == out[185, 385] - assert 52428 == out[228, 385] - assert 58974 == out[291, 385] - - def test_lut_sequence_zero_entries(self): - """Test that 0 entries is interpreted correctly.""" - # LUTDescriptor[0] of 0 -> 65536, but only 4096 entries so any - # attempt to access LUTData[4096] or higher will raise IndexError - ds = dcmread(MOD_16_SEQ) - seq = ds.ModalityLUTSequence[0] - seq.LUTDescriptor = [0, 0, 16] - assert 4096 == len(seq.LUTData) - arr = np.asarray([0, 4095, 4096, 65535]) - msg = r"index 4096 is out of bounds" - with pytest.raises(IndexError, match=msg): - apply_modality_lut(arr, ds) - - # LUTData with 65536 entries - seq.LUTData = [0] * 65535 + [1] - out = apply_modality_lut(arr, ds) - assert [0, 0, 0, 1] == list(out) - - def test_unchanged(self): - """Test no modality LUT transform.""" - ds = dcmread(MOD_16) - del ds.RescaleSlope - del ds.RescaleIntercept - arr = ds.pixel_array - out = apply_modality_lut(arr, ds) - assert arr is out - - ds.ModalityLUTSequence = [] - out = apply_modality_lut(arr, ds) - assert arr is out - - def test_lutdata_ow(self): - """Test LUT Data with VR OW.""" - ds = dcmread(MOD_16_SEQ) - assert ds.original_encoding == (False, True) - seq = ds.ModalityLUTSequence[0] - assert [4096, -2048, 16] == seq.LUTDescriptor - seq["LUTData"].VR = "OW" - seq.LUTData = pack("<4096H", *seq.LUTData) - arr = ds.pixel_array - assert -2048 == arr.min() - assert 4095 == arr.max() - out = apply_modality_lut(arr, ds) - - # IV > 2047 -> LUT[4095] - mapped_pixels = arr > 2047 - assert 65535 == out[mapped_pixels][0] - assert (65535 == out[mapped_pixels]).all() - assert out.flags.writeable - assert out.dtype == np.uint16 - - assert [65535, 65535, 49147, 49147, 65535] == list(out[0, 50:55]) - assert [65535, 65535, 65535, 65535, 65535] == list(out[50, 50:55]) - assert [65535, 65535, 65535, 65535, 65535] == list(out[100, 50:55]) - assert [65535, 65535, 49147, 49147, 65535] == list(out[150, 50:55]) - assert [65535, 65535, 49147, 49147, 65535] == list(out[200, 50:55]) - assert 39321 == out[185, 340] - assert 45867 == out[185, 385] - assert 52428 == out[228, 385] - assert 58974 == out[291, 385] - - def test_no_endianness_raises(self): - ds = dcmread(MOD_16_SEQ) - assert ds.original_encoding == (False, True) - seq = ds.ModalityLUTSequence[0] - assert [4096, -2048, 16] == seq.LUTDescriptor - seq["LUTData"].VR = "OW" - seq.LUTData = pack("<4096H", *seq.LUTData) - arr = ds.pixel_array - del ds.file_meta - ds._read_little = None - msg = ( - "Unable to determine the endianness of the dataset, please set " - "an appropriate Transfer Syntax UID in 'FileDataset.file_meta'" - ) - with pytest.raises(AttributeError, match=msg): - apply_modality_lut(arr, ds) - - def test_file_meta(self): - """Test using file meta to determine endianness""" - ds = dcmread(MOD_16_SEQ) - seq = ds.ModalityLUTSequence[0] - assert [4096, -2048, 16] == seq.LUTDescriptor - seq["LUTData"].VR = "OW" - seq.LUTData = pack("<4096H", *seq.LUTData) - arr = ds.pixel_array - ds._read_little = None - out = apply_modality_lut(arr, ds) - - assert 39321 == out[185, 340] - assert 45867 == out[185, 385] - assert 52428 == out[228, 385] - assert 58974 == out[291, 385] - - def test_original_encoding(self): - """Test using original encoding to determine endianness""" - ds = dcmread(MOD_16_SEQ) - seq = ds.ModalityLUTSequence[0] - assert [4096, -2048, 16] == seq.LUTDescriptor - seq["LUTData"].VR = "OW" - seq.LUTData = pack("<4096H", *seq.LUTData) - arr = ds.pixel_array - del ds.file_meta - assert ds.original_encoding == (False, True) - out = apply_modality_lut(arr, ds) - - assert 39321 == out[185, 340] - assert 45867 == out[185, 385] - assert 52428 == out[228, 385] - assert 58974 == out[291, 385] - - -@pytest.mark.skipif(not HAVE_NP, reason="Numpy is not available") -class TestNumpy_PaletteColor: - """Tests for util.apply_color_lut().""" - - def setup_method(self): - """Setup the tests""" - self.o_palette = get_palette_files("pet.dcm")[0] - self.n_palette = get_palette_files("pet.dcm")[0][:-3] + "tmp" - - def teardown_method(self): - """Teardown the tests""" - if os.path.exists(self.n_palette): - os.rename(self.n_palette, self.o_palette) - - def test_neither_ds_nor_palette_raises(self): - """Test missing `ds` and `palette` raise an exception.""" - ds = dcmread(PAL_08_256_0_16_1F) - msg = r"Either 'ds' or 'palette' is required" - with pytest.raises(ValueError, match=msg): - apply_color_lut(ds.pixel_array) - - def test_palette_unknown_raises(self, disable_value_validation): - """Test using an unknown `palette` raise an exception.""" - ds = dcmread(PAL_08_256_0_16_1F) - # Palette name - msg = r"Unknown palette 'TEST'" - with pytest.raises(ValueError, match=msg): - apply_color_lut(ds.pixel_array, palette="TEST") - - # SOP Instance UID - msg = r"Unknown palette '1.2.840.10008.1.1'" - with pytest.raises(ValueError, match=msg): - apply_color_lut(ds.pixel_array, palette="1.2.840.10008.1.1") - - def test_palette_unavailable_raises(self, disable_value_validation): - """Test using a missing `palette` raise an exception.""" - os.rename(self.o_palette, self.n_palette) - ds = dcmread(PAL_08_256_0_16_1F) - msg = r"list index out of range" - with pytest.raises(IndexError, match=msg): - apply_color_lut(ds.pixel_array, palette="PET") - - def test_supplemental_raises(self): - """Test that supplemental palette color LUT raises exception.""" - ds = dcmread(SUP_16_16_2F) - msg = ( - r"Use of this function with the Supplemental Palette Color Lookup " - r"Table Module is not currently supported" - ) - with pytest.raises(ValueError, match=msg): - apply_color_lut(ds.pixel_array, ds) - - def test_invalid_bit_depth_raises(self): - """Test that an invalid bit depth raises an exception.""" - ds = dcmread(PAL_08_256_0_16_1F) - ds.RedPaletteColorLookupTableDescriptor[2] = 15 - msg = r"data type ['\"]uint15['\"] not understood" - with pytest.raises(TypeError, match=msg): - apply_color_lut(ds.pixel_array, ds) - - def test_invalid_lut_bit_depth_raises(self): - """Test that an invalid LUT bit depth raises an exception.""" - ds = dcmread(PAL_08_256_0_16_1F) - ds.RedPaletteColorLookupTableData = ds.RedPaletteColorLookupTableData[:-2] - ds.GreenPaletteColorLookupTableData = ds.GreenPaletteColorLookupTableData[:-2] - ds.BluePaletteColorLookupTableData = ds.BluePaletteColorLookupTableData[:-2] - msg = ( - r"The bit depth of the LUT data '15.9' is invalid \(only 8 or 16 " - r"bits per entry allowed\)" - ) - with pytest.raises(ValueError, match=msg): - apply_color_lut(ds.pixel_array, ds) - - def test_unequal_lut_length_raises(self): - """Test that an unequal LUT lengths raise an exception.""" - ds = dcmread(PAL_08_256_0_16_1F) - ds.BluePaletteColorLookupTableData = ds.BluePaletteColorLookupTableData[:-2] - msg = r"LUT data must be the same length" - with pytest.raises(ValueError, match=msg): - apply_color_lut(ds.pixel_array, ds) - - def test_no_palette_color(self): - """Test that an unequal LUT lengths raise an exception.""" - ds = dcmread(PAL_08_256_0_16_1F) - del ds.RedPaletteColorLookupTableData - msg = r"No suitable Palette Color Lookup Table Module found" - with pytest.raises(ValueError, match=msg): - apply_color_lut(ds.pixel_array, ds) - - def test_uint08_16(self): - """Test uint8 Pixel Data with 16-bit LUT entries.""" - ds = dcmread(PAL_08_200_0_16_1F, force=True) - ds.file_meta = FileMetaDataset() - ds.file_meta.TransferSyntaxUID = ImplicitVRLittleEndian - assert 8 == ds.BitsStored - assert 16 == ds.RedPaletteColorLookupTableDescriptor[2] - arr = ds.pixel_array - orig = arr.copy() - rgb = apply_color_lut(arr, ds) - assert (480, 640, 3) == rgb.shape - assert [0, 0, 0] == list(rgb[0, 0, :]) - assert [9216, 9216, 9216] == list(rgb[0, 4, :]) - assert [18688, 18688, 18688] == list(rgb[0, 9, :]) - assert [27904, 33536, 0] == list(rgb[0, 638, :]) - assert [18688, 24320, 0] == list(rgb[479, 639, :]) - - # original `arr` is unchanged - assert np.array_equal(orig, arr) - - def test_uint08_16_2frame(self): - """Test 2 frame uint8 Pixel Data with 16-bit LUT entries.""" - ds = dcmread(PAL_08_256_0_16_2F) - assert 8 == ds.BitsStored - assert 16 == ds.RedPaletteColorLookupTableDescriptor[2] - arr = ds.pixel_array - orig = arr.copy() - rgb = apply_color_lut(arr, ds) - assert (2, 600, 800, 3) == rgb.shape - assert [9472, 15872, 24064] == list(rgb[0, 0, 0, :]) - assert [34816, 43520, 54016] == list(rgb[0, 12, 12, :]) - assert [65280, 65280, 65280] == list(rgb[0, 17, 110, :]) - assert [0, 0, 0] == list(rgb[0, 77, 103, :]) - assert [23040, 52480, 65280] == list(rgb[0, 478, 793, :]) - - # 2nd frame is inverse of 1st, so won't be coloured correctly - ref = np.asarray( - [ - [26112, 26112, 26112], - [54528, 54528, 54528], - [54528, 54528, 54528], - [16640, 16640, 16640], - [49152, 45056, 22016], - [34816, 43520, 54016], - [5632, 9984, 14848], - [62464, 2816, 2816], - [3072, 5632, 8192], - [3072, 5632, 8192], - ] - ) - assert np.array_equal(ref, rgb[1, 143:153, 355, :]) - - # original `arr` is unchanged - assert np.array_equal(orig, arr) - - def test_uint16_16_segmented_little(self): - """Test uint16 Pixel Data with 16-bit LUT entries.""" - # Endianness from file_meta - ds = dcmread(PAL_SEG_LE_16_1F) - assert 16 == ds.BitsStored - assert 16 == ds.RedPaletteColorLookupTableDescriptor[2] - arr = ds.pixel_array - orig = arr.copy() - rgb = apply_color_lut(arr, ds) - assert (480, 640, 3) == rgb.shape - assert [10280, 11565, 16705] == list(rgb[0, 0, :]) - assert [10280, 11565, 16705] == list(rgb[0, 320, :]) - assert [10280, 11565, 16705] == list(rgb[0, 639, :]) - assert [0, 0, 0] == list(rgb[240, 0, :]) - assert [257, 257, 257] == list(rgb[240, 320, :]) - assert [2313, 2313, 2313] == list(rgb[240, 639, :]) - assert [10280, 11565, 16705] == list(rgb[479, 0, :]) - assert [10280, 11565, 16705] == list(rgb[479, 320, :]) - assert [10280, 11565, 16705] == list(rgb[479, 639, :]) - - assert (orig == arr).all() - - # Endianness from original encoding - ds = dcmread(PAL_SEG_LE_16_1F) - assert 16 == ds.BitsStored - assert 16 == ds.RedPaletteColorLookupTableDescriptor[2] - arr = ds.pixel_array - orig = arr.copy() - del ds.file_meta - rgb = apply_color_lut(arr, ds) - assert (480, 640, 3) == rgb.shape - assert [10280, 11565, 16705] == list(rgb[0, 0, :]) - assert [10280, 11565, 16705] == list(rgb[0, 320, :]) - assert [10280, 11565, 16705] == list(rgb[0, 639, :]) - assert [0, 0, 0] == list(rgb[240, 0, :]) - assert [257, 257, 257] == list(rgb[240, 320, :]) - assert [2313, 2313, 2313] == list(rgb[240, 639, :]) - assert [10280, 11565, 16705] == list(rgb[479, 0, :]) - assert [10280, 11565, 16705] == list(rgb[479, 320, :]) - assert [10280, 11565, 16705] == list(rgb[479, 639, :]) - - assert (orig == arr).all() - - # No endianness raises - ds._read_little = None - msg = ( - "Unable to determine the endianness of the dataset, please set " - "an appropriate Transfer Syntax UID in 'FileDataset.file_meta'" - ) - with pytest.raises(AttributeError, match=msg): - apply_color_lut(arr, ds) - - def test_uint16_16_segmented_big(self): - """Test big endian uint16 Pixel Data with 16-bit LUT entries.""" - ds = dcmread(PAL_SEG_BE_16_1F) - assert 16 == ds.BitsStored - assert 16 == ds.RedPaletteColorLookupTableDescriptor[2] - arr = ds.pixel_array - rgb = apply_color_lut(arr, ds) - assert (480, 640, 3) == rgb.shape - assert [10280, 11565, 16705] == list(rgb[0, 0, :]) - assert [10280, 11565, 16705] == list(rgb[0, 320, :]) - assert [10280, 11565, 16705] == list(rgb[0, 639, :]) - assert [0, 0, 0] == list(rgb[240, 0, :]) - assert [257, 257, 257] == list(rgb[240, 320, :]) - assert [2313, 2313, 2313] == list(rgb[240, 639, :]) - assert [10280, 11565, 16705] == list(rgb[479, 0, :]) - assert [10280, 11565, 16705] == list(rgb[479, 320, :]) - assert [10280, 11565, 16705] == list(rgb[479, 639, :]) - - def test_16_allocated_8_entries(self): - """Test LUT with 8-bit entries in 16 bits allocated.""" - ds = dcmread(PAL_08_200_0_16_1F, force=True) - ds.file_meta = FileMetaDataset() - ds.file_meta.TransferSyntaxUID = ImplicitVRLittleEndian - ds.RedPaletteColorLookupTableDescriptor = [200, 0, 8] - lut = pack("<200H", *list(range(0, 200))) - assert 400 == len(lut) - ds.RedPaletteColorLookupTableData = lut - ds.GreenPaletteColorLookupTableData = lut - ds.BluePaletteColorLookupTableData = lut - arr = ds.pixel_array - assert (56, 149) == (arr.min(), arr.max()) - out = apply_color_lut(arr, ds) - # Because the LUTs are mapped index to value (i.e. LUT[0] = 0, - # LUT[149] = 149), the output array should equal the input array - # but with three channels of identical values - assert np.array_equal(arr, out[:, :, 0]) - assert np.array_equal(arr, out[:, :, 1]) - assert np.array_equal(arr, out[:, :, 2]) - - def test_alpha(self): - """Test applying a color palette with an alpha channel.""" - ds = dcmread(PAL_08_256_0_16_1F) - ds.AlphaPaletteColorLookupTableData = b"\x00\x80" * 256 - arr = ds.pixel_array - rgba = apply_color_lut(arr, ds) - assert (600, 800, 4) == rgba.shape - assert 32768 == rgba[:, :, 3][0, 0] - assert (32768 == rgba[:, :, 3]).any() - - def test_well_known_palette(self, disable_value_validation): - """Test using a well-known palette.""" - ds = dcmread(PAL_08_256_0_16_1F) - # Drop it to 8-bit - arr = ds.pixel_array - rgb = apply_color_lut(arr, palette="PET") - line = rgb[68:88, 364, :] - ref = [ - [249, 122, 12], - [255, 130, 4], - [255, 136, 16], - [255, 134, 12], - [253, 126, 4], - [239, 112, 32], - [211, 84, 88], - [197, 70, 116], - [177, 50, 156], - [168, 40, 176], - [173, 46, 164], - [185, 58, 140], - [207, 80, 96], - [209, 82, 92], - [189, 62, 132], - [173, 46, 164], - [168, 40, 176], - [162, 34, 188], - [162, 34, 188], - [154, 26, 204], - ] - assert np.array_equal(np.asarray(ref), line) - uid = apply_color_lut(arr, palette="1.2.840.10008.1.5.2") - assert np.array_equal(uid, rgb) - - def test_first_map_positive(self): - """Test a positive first mapping value.""" - ds = dcmread(PAL_08_200_0_16_1F, force=True) - ds.file_meta = FileMetaDataset() - ds.file_meta.TransferSyntaxUID = ImplicitVRLittleEndian - ds.RedPaletteColorLookupTableDescriptor[1] = 10 - arr = ds.pixel_array - rgb = apply_color_lut(arr, ds) - # All IVs < 10 should be set to LUT[0] - # All IVs >= 10 should be shifted down 10 entries - # Original IV range is 56 to 149 -> 46 to 139 - # LUT[88] -> LUT[78] = [33280, 56320, 65280] - # LUT[149] -> LUT[139] = [50944, 16384, 27904] - assert [33280, 56320, 65280] == list(rgb[arr == 88][0]) - assert ([33280, 56320, 65280] == rgb[arr == 88]).all() - assert [50944, 16384, 27904] == list(rgb[arr == 149][0]) - assert ([50944, 16384, 27904] == rgb[arr == 149]).all() - - def test_first_map_negative(self): - """Test a negative first mapping value.""" - ds = dcmread(PAL_08_200_0_16_1F, force=True) - ds.file_meta = FileMetaDataset() - ds.file_meta.TransferSyntaxUID = ImplicitVRLittleEndian - ds["RedPaletteColorLookupTableDescriptor"].VR = "SS" - ds.RedPaletteColorLookupTableDescriptor[1] = -10 - arr = ds.pixel_array - rgb = apply_color_lut(arr, ds) - # All IVs < -10 should be set to LUT[0] - # All IVs >= -10 should be shifted up 10 entries - # Original IV range is 56 to 149 -> 66 to 159 - # LUT[60] -> LUT[70] = [33280 61952 65280] - # LUT[130] -> LUT[140] = [60160, 25600, 37376] - assert [33280, 61952, 65280] == list(rgb[arr == 60][0]) - assert ([33280, 61952, 65280] == rgb[arr == 60]).all() - assert [60160, 25600, 37376] == list(rgb[arr == 130][0]) - assert ([60160, 25600, 37376] == rgb[arr == 130]).all() - - def test_unchanged(self): - """Test dataset with no LUT is unchanged.""" - # Regression test for #1068 - ds = dcmread(MOD_16, force=True) - assert "RedPaletteColorLookupTableDescriptor" not in ds - msg = r"No suitable Palette Color Lookup Table Module found" - with pytest.raises(ValueError, match=msg): - apply_color_lut(ds.pixel_array, ds) - - -@pytest.mark.skipif(not HAVE_NP, reason="Numpy is not available") -class TestNumpy_ExpandSegmentedLUT: - """Tests for util._expand_segmented_lut().""" - - def test_discrete(self): - """Test expanding a discrete segment.""" - data = (0, 1, 0) - assert [0] == _expand_segmented_lut(data, "H") - - data = (0, 2, 0, 112) - assert [0, 112] == _expand_segmented_lut(data, "H") - - data = (0, 2, 0, -112) - assert [0, -112] == _expand_segmented_lut(data, "H") - - data = (0, 2, 0, 112, 0, 0) - assert [0, 112] == _expand_segmented_lut(data, "H") - - data = (0, 2, 0, -112, 0, 0) - assert [0, -112] == _expand_segmented_lut(data, "H") - - def test_linear(self): - """Test expanding a linear segment.""" - # Linear can never be the first segment - # Positive slope - data = (0, 2, 0, 28672, 1, 5, 49152) - out = _expand_segmented_lut(data, "H") - assert [0, 28672, 32768, 36864, 40960, 45056, 49152] == out - - data = (0, 1, -400, 1, 5, 0) - out = _expand_segmented_lut(data, "H") - assert [-400, -320, -240, -160, -80, 0] == out - - # Positive slope, floating point steps - data = (0, 1, 163, 1, 48, 255) - out = _expand_segmented_lut(data, "H") - assert (1 + 48) == len(out) - - # No slope - data = (0, 2, 0, 28672, 1, 5, 28672) - out = _expand_segmented_lut(data, "H") - assert [0, 28672, 28672, 28672, 28672, 28672, 28672] == out - - data = (0, 1, -100, 1, 5, -100) - out = _expand_segmented_lut(data, "H") - assert [-100, -100, -100, -100, -100, -100] == out - - # Negative slope - data = (0, 2, 0, 49152, 1, 5, 28672) - out = _expand_segmented_lut(data, "H") - assert [0, 49152, 45056, 40960, 36864, 32768, 28672] == out - - data = (0, 1, 0, 1, 5, -400) - out = _expand_segmented_lut(data, "H") - assert [0, -80, -160, -240, -320, -400] == out - - def test_indirect_08(self): - """Test expanding an indirect segment encoded as 8-bit.""" - # No real world test data available for this - # LSB, MSB - ref_a = [0, 112, 128, 144, 160, 176, 192, 192, 192, 192, 192, 192] - - # Little endian - data = (0, 2, 0, 112, 1, 5, 192, 2, 1, 4, 0, 0, 0) - out = _expand_segmented_lut(data, "B") - assert ref_a == out - - data = (0, 2, 0, 112, 2, 1, 0, 0, 0, 0) - out = _expand_segmented_lut(data, ">B") - assert [0, 112, 0, 112] == out - - # 0x0001 0x0203 is 66051 in BE 16-bit MSB, LSB - data = [0, 1, 0] * 22017 + [0, 2, 1, 2] + [2, 1, 2, 3, 0, 1] - out = _expand_segmented_lut(data, ">B") - assert [0] * 22017 + [1, 2, 1, 2] == out - - def test_indirect_16(self): - """Test expanding an indirect segment encoded as 16-bit.""" - # Start from a discrete segment - data = (0, 2, 0, 112, 1, 5, 192, 2, 2, 0, 0) - out = _expand_segmented_lut(data, "H") - assert [0, 112, 128, 144, 160, 176, 192] * 2 == out - - # Start from a linear segment - data = (0, 2, 0, 112, 1, 5, 192, 2, 1, 4, 0) - out = _expand_segmented_lut(data, "H") - assert [0, 112, 128, 144, 160, 176, 192, 192, 192, 192, 192, 192] == out - - def test_palettes_spring(self): - """Test expanding the SPRING palette.""" - ds = dcmread(get_palette_files("spring.dcm")[0]) - - bs = ds.SegmentedRedPaletteColorLookupTableData - fmt = f"<{len(bs)}B" - data = unpack(fmt, bs) - out = _expand_segmented_lut(data, fmt) - assert [255] * 256 == out - - bs = ds.SegmentedGreenPaletteColorLookupTableData - fmt = f"<{len(bs)}B" - data = unpack(fmt, bs) - out = _expand_segmented_lut(data, fmt) - assert list(range(0, 256)) == out - - bs = ds.SegmentedBluePaletteColorLookupTableData - fmt = f"<{len(bs)}B" - data = unpack(fmt, bs) - out = _expand_segmented_lut(data, fmt) - assert list(range(255, -1, -1)) == out - - def test_palettes_summer(self): - """Test expanding the SUMMER palette.""" - ds = dcmread(get_palette_files("summer.dcm")[0]) - - bs = ds.SegmentedRedPaletteColorLookupTableData - fmt = f"<{len(bs)}B" - data = unpack(fmt, bs) - out = _expand_segmented_lut(data, fmt) - assert [0] * 256 == out - - bs = ds.SegmentedGreenPaletteColorLookupTableData - fmt = f"<{len(bs)}B" - data = unpack(fmt, bs) - out = _expand_segmented_lut(data, fmt) - assert [255, 255, 254, 254, 253] == out[:5] - assert [130, 129, 129, 128, 128] == out[-5:] - - bs = ds.SegmentedBluePaletteColorLookupTableData - fmt = f"<{len(bs)}B" - data = unpack(fmt, bs) - out = _expand_segmented_lut(data, fmt) - assert [0] * 128 == out[:128] - assert [246, 248, 250, 252, 254] == out[-5:] - - def test_palettes_fall(self): - """Test expanding the FALL palette.""" - ds = dcmread(get_palette_files("fall.dcm")[0]) - - bs = ds.SegmentedRedPaletteColorLookupTableData - fmt = f"<{len(bs)}B" - data = unpack(fmt, bs) - out = _expand_segmented_lut(data, fmt) - assert [255] * 256 == out - - bs = ds.SegmentedGreenPaletteColorLookupTableData - fmt = f"<{len(bs)}B" - data = unpack(fmt, bs) - out = _expand_segmented_lut(data, fmt) - assert list(range(255, -1, -1)) == out - - bs = ds.SegmentedBluePaletteColorLookupTableData - fmt = f"<{len(bs)}B" - data = unpack(fmt, bs) - out = _expand_segmented_lut(data, fmt) - assert [0] * 256 == out - - def test_palettes_winter(self): - """Test expanding the WINTER palette.""" - ds = dcmread(get_palette_files("winter.dcm")[0]) - - bs = ds.SegmentedRedPaletteColorLookupTableData - fmt = f"<{len(bs)}B" - data = unpack(fmt, bs) - out = _expand_segmented_lut(data, fmt) - assert [0] * 128 == out[:128] - assert [123, 124, 125, 126, 127] == out[-5:] - - bs = ds.SegmentedGreenPaletteColorLookupTableData - fmt = f"<{len(bs)}B" - data = unpack(fmt, bs) - out = _expand_segmented_lut(data, fmt) - assert list(range(0, 256)) == out - - bs = ds.SegmentedBluePaletteColorLookupTableData - fmt = f"<{len(bs)}B" - data = unpack(fmt, bs) - out = _expand_segmented_lut(data, fmt) - assert [255, 255, 254, 254, 253] == out[:5] - assert [130, 129, 129, 128, 128] == out[-5:] - - def test_first_linear_raises(self): - """Test having a linear segment first raises exception.""" - data = (1, 5, 49152) - msg = ( - r"Error expanding a segmented palette color lookup table: " - r"the first segment cannot be a linear segment" - ) - with pytest.raises(ValueError, match=msg): - _expand_segmented_lut(data, "H") - - def test_first_indirect_raises(self): - """Test having a linear segment first raises exception.""" - data = (2, 5, 2, 0) - msg = ( - r"Error expanding a segmented palette color lookup table: " - r"the first segment cannot be an indirect segment" - ) - with pytest.raises(ValueError, match=msg): - _expand_segmented_lut(data, "H") - - def test_unknown_opcode_raises(self): - """Test having an unknown opcode raises exception.""" - data = (3, 5, 49152) - msg = ( - r"Error expanding a segmented palette lookup table: " - r"unknown segment type '3'" - ) - with pytest.raises(ValueError, match=msg): - _expand_segmented_lut(data, "H") - - -@pytest.mark.skipif(not HAVE_NP, reason="Numpy is not available") -class TestNumpy_ApplyWindowing: - """Tests for util.apply_windowing().""" - - def test_window_single_view(self): - """Test windowing with a single view.""" - # 12-bit unsigned - ds = dcmread(WIN_12_1F) - assert 16 == ds.BitsAllocated - assert 12 == ds.BitsStored - assert 0 == ds.PixelRepresentation - ds.WindowCenter = 450 - ds.WindowWidth = 790 - assert 450 == ds.WindowCenter - assert 790 == ds.WindowWidth - - arr = ds.pixel_array - assert 642 == arr[326, 130] - out = apply_windowing(arr, ds) - assert 3046.6 == pytest.approx(out[326, 130], abs=0.1) - - def test_window_multi_view(self): - """Test windowing with multiple views.""" - ds = dcmread(WIN_12_1F) - assert 16 == ds.BitsAllocated - assert 12 == ds.BitsStored - assert 0 == ds.PixelRepresentation - if HAVE_NP and config.use_DS_numpy: - expected = np.array([450, 200]) - assert np.allclose(ds.WindowCenter, expected) - expected = np.array([790, 443]) - assert np.allclose(ds.WindowWidth, expected) - else: - assert [450, 200] == ds.WindowCenter - assert [790, 443] == ds.WindowWidth - - arr = ds.pixel_array - assert 642 == arr[326, 130] - out = apply_windowing(arr, ds) - assert 3046.6 == pytest.approx(out[326, 130], abs=0.1) - out = apply_windowing(arr, ds, index=1) - assert 4095.0 == pytest.approx(out[326, 130], abs=0.1) - - def test_window_uint8(self): - """Test windowing an 8-bit unsigned array.""" - ds = Dataset() - ds.PhotometricInterpretation = "MONOCHROME1" - ds.PixelRepresentation = 0 - ds.BitsStored = 8 - arr = np.asarray([0, 1, 128, 254, 255], dtype="uint8") - - # Linear - ds.WindowWidth = 1 - ds.WindowCenter = 0 - assert [255, 255, 255, 255, 255] == apply_windowing(arr, ds).tolist() - - ds.WindowWidth = 128 - ds.WindowCenter = 254 - assert [0, 0, 0, 128.5, 130.5] == pytest.approx( - apply_windowing(arr, ds).tolist(), abs=0.1 - ) - - # Linear exact - ds.VOILUTFunction = "LINEAR_EXACT" - assert [0, 0, 0, 127.5, 129.5] == pytest.approx( - apply_windowing(arr, ds).tolist(), abs=0.1 - ) - - # Sigmoid - ds.VOILUTFunction = "SIGMOID" - assert [0.1, 0.1, 4.9, 127.5, 129.5] == pytest.approx( - apply_windowing(arr, ds).tolist(), abs=0.1 - ) - - def test_window_uint16(self): - """Test windowing a 16-bit unsigned array.""" - ds = Dataset() - ds.PhotometricInterpretation = "MONOCHROME1" - ds.PixelRepresentation = 0 - ds.BitsStored = 16 - arr = np.asarray([0, 1, 32768, 65534, 65535], dtype="uint16") - - ds.WindowWidth = 1 - ds.WindowCenter = 0 - assert [65535] * 5 == apply_windowing(arr, ds).tolist() - - ds.WindowWidth = 32768 - ds.WindowCenter = 254 - assert [32260.5, 32262.5, 65535, 65535, 65535] == pytest.approx( - apply_windowing(arr, ds).tolist(), abs=0.1 - ) - - ds.VOILUTFunction = "LINEAR_EXACT" - assert [32259.5, 32261.5, 65535, 65535, 65535] == pytest.approx( - apply_windowing(arr, ds).tolist(), abs=0.1 - ) - - ds.VOILUTFunction = "SIGMOID" - assert [32259.5, 32261.5, 64319.8, 65512.3, 65512.3] == pytest.approx( - apply_windowing(arr, ds).tolist(), abs=0.1 - ) - - def test_window_uint32(self): - """Test windowing a 32-bit unsigned array.""" - ds = Dataset() - ds.PhotometricInterpretation = "MONOCHROME1" - ds.PixelRepresentation = 0 - ds.BitsStored = 32 - y_max = 2**32 - 1 - arr = np.asarray([0, 1, 2**31, y_max - 1, y_max], dtype="uint32") - - ds.WindowWidth = 1 - ds.WindowCenter = 0 - assert [y_max] * 5 == apply_windowing(arr, ds).tolist() - - ds.WindowWidth = 342423423423 - ds.WindowCenter = 757336 - assert [ - 2147474148.4, - 2147474148.4, - 2174409724, - 2201345299.7, - 2201345299.7, - ] == pytest.approx(apply_windowing(arr, ds).tolist(), abs=0.1) - - ds.VOILUTFunction = "LINEAR_EXACT" - assert [ - 2147474148.3, - 2147474148.4, - 2174409724, - 2201345299.7, - 2201345299.7, - ] == pytest.approx(apply_windowing(arr, ds).tolist(), abs=0.1) - - ds.VOILUTFunction = "SIGMOID" - assert [ - 2147474148.3, - 2147474148.4, - 2174408313.1, - 2201334008.2, - 2201334008.3, - ] == pytest.approx(apply_windowing(arr, ds).tolist(), abs=0.1) - - def test_window_int8(self): - """Test windowing an 8-bit signed array.""" - ds = Dataset() - ds.PhotometricInterpretation = "MONOCHROME1" - ds.PixelRepresentation = 1 - ds.BitsStored = 8 - arr = np.asarray([-128, -127, -1, 0, 1, 126, 127], dtype="int8") - - # Linear - ds.WindowWidth = 1 - ds.WindowCenter = 0 - assert [-128, -128, -128, 127, 127, 127, 127] == pytest.approx( - apply_windowing(arr, ds).tolist() - ) - - ds.WindowWidth = 128 - ds.WindowCenter = -5 - assert [-128, -128, 8.5, 10.5, 12.6, 127, 127] == pytest.approx( - apply_windowing(arr, ds).tolist(), abs=0.1 - ) - - # Linear exact - ds.VOILUTFunction = "LINEAR_EXACT" - assert [-128, -128, 7.5, 9.5, 11.5, 127, 127] == pytest.approx( - apply_windowing(arr, ds).tolist(), abs=0.1 - ) - - # Sigmoid - ds.VOILUTFunction = "SIGMOID" - assert [-122.7, -122.5, 7.5, 9.4, 11.4, 122.8, 122.9] == pytest.approx( - apply_windowing(arr, ds).tolist(), abs=0.1 - ) - - def test_window_int16(self): - """Test windowing an 8-bit signed array.""" - ds = Dataset() - ds.PhotometricInterpretation = "MONOCHROME1" - ds.PixelRepresentation = 1 - ds.BitsStored = 16 - arr = np.asarray([-128, -127, -1, 0, 1, 126, 127], dtype="int16") - - # Linear - ds.WindowWidth = 1 - ds.WindowCenter = 0 - assert [-32768, -32768, -32768, 32767, 32767, 32767, 32767] == pytest.approx( - apply_windowing(arr, ds).tolist(), abs=0.1 - ) - - ds.WindowWidth = 128 - ds.WindowCenter = -5 - assert [-32768, -32768, 2321.6, 2837.6, 3353.7, 32767, 32767] == pytest.approx( - apply_windowing(arr, ds).tolist(), abs=0.1 - ) - - # Linear exact - ds.VOILUTFunction = "LINEAR_EXACT" - assert [-32768, -32768, 2047.5, 2559.5, 3071.5, 32767, 32767] == pytest.approx( - apply_windowing(arr, ds).tolist(), abs=0.1 - ) - - # Sigmoid - ds.VOILUTFunction = "SIGMOID" - assert [ - -31394.1, - -31351.4, - 2044.8, - 2554.3, - 3062.5, - 31692, - 31724.6, - ] == pytest.approx(apply_windowing(arr, ds).tolist(), abs=0.1) - - def test_window_int32(self): - """Test windowing an 32-bit signed array.""" - ds = Dataset() - ds.PhotometricInterpretation = "MONOCHROME1" - ds.PixelRepresentation = 1 - ds.BitsStored = 32 - arr = np.asarray([-128, -127, -1, 0, 1, 126, 127], dtype="int32") - - # Linear - ds.WindowWidth = 1 - ds.WindowCenter = 0 - assert [ - -(2**31), - -(2**31), - -(2**31), - 2**31 - 1, - 2**31 - 1, - 2**31 - 1, - 2**31 - 1, - ] == pytest.approx(apply_windowing(arr, ds).tolist(), abs=0.1) - - ds.WindowWidth = 128 - ds.WindowCenter = -5 - assert [ - -2147483648, - -2147483648, - 152183880, - 186002520.1, - 219821160.3, - 2147483647, - 2147483647, - ] == pytest.approx(apply_windowing(arr, ds).tolist(), abs=0.1) - - # Linear exact - ds.VOILUTFunction = "LINEAR_EXACT" - assert [ - -2147483648, - -2147483648, - 134217727.5, - 167772159.5, - 201326591.5, - 2147483647, - 2147483647, - ] == pytest.approx(apply_windowing(arr, ds).tolist(), abs=0.1) - - # Sigmoid - ds.VOILUTFunction = "SIGMOID" - assert [ - -2057442919.3, - -2054646500.7, - 134043237.4, - 167431657.4, - 200738833.7, - 2077033158.8, - 2079166214.8, - ] == pytest.approx(apply_windowing(arr, ds).tolist(), abs=0.1) - - def test_window_multi_frame(self): - """Test windowing with a multiple frames.""" - ds = dcmread(WIN_12_1F) - assert 16 == ds.BitsAllocated - assert 12 == ds.BitsStored - assert 0 == ds.PixelRepresentation - ds.WindowCenter = 450 - ds.WindowWidth = 790 - assert 450 == ds.WindowCenter - assert 790 == ds.WindowWidth - - arr = ds.pixel_array - arr = np.stack([arr, 4095 - arr]) - assert (2, 484, 484) == arr.shape - assert 642 == arr[0, 326, 130] - assert 3453 == arr[1, 326, 130] - out = apply_windowing(arr, ds) - assert 3046.6 == pytest.approx(out[0, 326, 130], abs=0.1) - assert 4095.0 == pytest.approx(out[1, 326, 130], abs=0.1) - - def test_window_rescale(self): - """Test windowing after a rescale operation.""" - ds = dcmread(WIN_12_1F) - assert 16 == ds.BitsAllocated - assert 12 == ds.BitsStored - assert 0 == ds.PixelRepresentation - if HAVE_NP and config.use_DS_numpy: - expected = np.array([450, 200]) - assert np.allclose(ds.WindowCenter, expected) - expected = np.array([790, 443]) - assert np.allclose(ds.WindowWidth, expected) - else: - assert [450, 200] == ds.WindowCenter - assert [790, 443] == ds.WindowWidth - ds.RescaleSlope = 1.2 - ds.RescaleIntercept = 0 - - arr = ds.pixel_array - assert 0 == arr[16, 60] - assert 642 == arr[326, 130] - assert 1123 == arr[316, 481] - hu = apply_modality_lut(arr, ds) - assert 0 == hu[16, 60] - assert 770.4 == hu[326, 130] - assert 1347.6 == hu[316, 481] - # With rescale -> output range is 0 to 4914 - out = apply_windowing(hu, ds) - assert 0 == pytest.approx(out[16, 60], abs=0.1) - assert 4455.6 == pytest.approx(out[326, 130], abs=0.1) - assert 4914.0 == pytest.approx(out[316, 481], abs=0.1) - - def test_window_modality_lut(self): - """Test windowing after a modality LUT operation.""" - ds = dcmread(MOD_16_SEQ) - ds.WindowCenter = [49147, 200] - ds.WindowWidth = [790, 443] - assert 16 == ds.BitsAllocated - assert 12 == ds.BitsStored - assert 1 == ds.PixelRepresentation # Signed - assert "RescaleSlope" not in ds - assert "ModalityLUTSequence" in ds - - seq = ds.ModalityLUTSequence[0] - assert [4096, -2048, 16] == seq.LUTDescriptor - arr = ds.pixel_array - assert -2048 == arr.min() - assert 4095 == arr.max() - - arr = ds.pixel_array - assert 2047 == arr[16, 60] - assert 1023 == arr[0, 1] - hu = apply_modality_lut(arr, ds) - assert 65535 == hu[16, 60] - assert 49147 == hu[0, 1] - out = apply_windowing(hu, ds) - assert 65535.0 == pytest.approx(out[16, 60], abs=0.1) - assert 32809.0 == pytest.approx(out[0, 1], abs=0.1) - # Output range must be 0 to 2**16 - 1 - assert 65535 == out.max() - assert 0 == out.min() - - def test_window_bad_photometric_interp(self): - """Test bad photometric interpretation raises exception.""" - ds = dcmread(WIN_12_1F) - ds.PhotometricInterpretation = "RGB" - msg = r"only 'MONOCHROME1' and 'MONOCHROME2' are allowed" - with pytest.raises(ValueError, match=msg): - apply_windowing(ds.pixel_array, ds) - - def test_window_bad_parameters(self): - """Test bad windowing parameters raise exceptions.""" - ds = dcmread(WIN_12_1F) - ds.WindowWidth = 0 - ds.VOILUTFunction = "LINEAR" - msg = r"Width must be greater than or equal to 1" - with pytest.raises(ValueError, match=msg): - apply_windowing(ds.pixel_array, ds) - - ds.VOILUTFunction = "LINEAR_EXACT" - msg = r"Width must be greater than 0" - with pytest.raises(ValueError, match=msg): - apply_windowing(ds.pixel_array, ds) - - ds.VOILUTFunction = "SIGMOID" - msg = r"Width must be greater than 0" - with pytest.raises(ValueError, match=msg): - apply_windowing(ds.pixel_array, ds) - - ds.VOILUTFunction = "UNKNOWN" - msg = r"Unsupported \(0028,1056\) VOI LUT Function value 'UNKNOWN'" - with pytest.raises(ValueError, match=msg): - apply_windowing(ds.pixel_array, ds) - - def test_window_bad_index(self, no_numpy_use): - """Test windowing with a bad view index.""" - ds = dcmread(WIN_12_1F) - assert 2 == len(ds.WindowWidth) - arr = ds.pixel_array - with pytest.raises(IndexError, match=r"list index out of range"): - apply_windowing(arr, ds, index=2) - - def test_unchanged(self): - """Test input array is unchanged if no VOI LUT""" - ds = Dataset() - ds.PhotometricInterpretation = "MONOCHROME1" - ds.PixelRepresentation = 1 - ds.BitsStored = 8 - arr = np.asarray([-128, -127, -1, 0, 1, 126, 127], dtype="int8") - out = apply_windowing(arr, ds) - assert [-128, -127, -1, 0, 1, 126, 127] == out.tolist() - - ds.ModalityLUTSequence = [] - out = apply_windowing(arr, ds) - assert [-128, -127, -1, 0, 1, 126, 127] == out.tolist() - - def test_rescale_empty(self): - """Test RescaleSlope and RescaleIntercept being empty.""" - ds = dcmread(WIN_12_1F) - ds.RescaleSlope = None - ds.RescaleIntercept = None - - arr = ds.pixel_array - assert 0 == arr[16, 60] - assert 642 == arr[326, 130] - assert 1123 == arr[316, 481] - out = apply_windowing(arr, ds) - assert 0 == pytest.approx(out[16, 60], abs=0.1) - assert 3046.6 == pytest.approx(out[326, 130], abs=0.1) - assert 4095.0 == pytest.approx(out[316, 481], abs=0.1) - - -@pytest.mark.skipif(not HAVE_NP, reason="Numpy is not available") -class TestNumpy_ApplyVOI: - """Tests for util.apply_voi().""" - - def test_voi_single_view(self): - """Test VOI LUT with a single view.""" - ds = dcmread(VOI_08_1F) - assert 8 == ds.BitsAllocated - assert 8 == ds.BitsStored - assert 0 == ds.PixelRepresentation - item = ds.VOILUTSequence[0] - assert [256, 0, 16] == item.LUTDescriptor - lut = item.LUTData - assert 0 == lut[0] - assert 19532 == lut[76] - assert 45746 == lut[178] - assert 65535 == lut[255] - - arr = ds.pixel_array - assert 0 == arr[387, 448] - assert 76 == arr[178, 126] - assert 178 == arr[186, 389] - assert 255 == arr[129, 79] - - out = apply_voi(arr, ds) - assert 0 == out[387, 448] - assert 19532 == out[178, 126] - assert 45746 == out[186, 389] - assert 65535 == out[129, 79] - - def test_voi_multi_view(self): - """Test VOI LUT with multiple views.""" - ds = dcmread(VOI_08_1F) - assert 8 == ds.BitsAllocated - assert 8 == ds.BitsStored - assert 0 == ds.PixelRepresentation - item0 = ds.VOILUTSequence[0] - # Add another view that's the inverse - ds.VOILUTSequence.append(Dataset()) - item1 = ds.VOILUTSequence[1] - item1.LUTDescriptor = [256, 0, 16] - item1.LUTData = item0.LUTData[::-1] - - arr = ds.pixel_array - assert 0 == arr[387, 448] - assert 76 == arr[178, 126] - assert 178 == arr[186, 389] - assert 255 == arr[129, 79] - - out0 = apply_voi(arr, ds) - assert 0 == out0[387, 448] - assert 19532 == out0[178, 126] - assert 45746 == out0[186, 389] - assert 65535 == out0[129, 79] - - out1 = apply_voi(arr, ds, index=1) - assert 65535 == out1[387, 448] - assert 46003 == out1[178, 126] - assert 19789 == out1[186, 389] - assert 0 == out1[129, 79] - - def test_voi_multi_frame(self): - """Test VOI with a multiple frames.""" - ds = dcmread(VOI_08_1F) - assert 8 == ds.BitsAllocated - assert 8 == ds.BitsStored - assert 0 == ds.PixelRepresentation - - arr = ds.pixel_array - arr = np.stack([arr, 255 - arr]) - assert (2, 512, 512) == arr.shape - - out = apply_voi(arr, ds) - assert 0 == out[0, 387, 448] - assert 19532 == out[0, 178, 126] - assert 45746 == out[0, 186, 389] - assert 65535 == out[0, 129, 79] - assert 65535 == out[1, 387, 448] - assert 46003 == out[1, 178, 126] - assert 19789 == out[1, 186, 389] - assert 0 == out[1, 129, 79] - - def test_voi_zero_entries(self): - """Test that 0 entries is interpreted correctly.""" - ds = dcmread(VOI_08_1F) - seq = ds.VOILUTSequence[0] - seq.LUTDescriptor = [0, 0, 16] - assert 256 == len(seq.LUTData) - arr = np.asarray([0, 255, 256, 65535]) - msg = r"index 256 is out of bounds" - with pytest.raises(IndexError, match=msg): - apply_voi(arr, ds) - - # LUTData with 65536 entries - seq.LUTData = [0] * 65535 + [1] - out = apply_voi(arr, ds) - assert [0, 0, 0, 1] == list(out) - - def test_voi_uint8(self): - """Test uint VOI LUT with an 8-bit LUT.""" - ds = Dataset() - ds.PixelRepresentation = 0 - ds.BitsStored = 8 - ds.VOILUTSequence = [Dataset()] - item = ds.VOILUTSequence[0] - item.LUTDescriptor = [4, 0, 8] - item.LUTData = [0, 127, 128, 255] - arr = np.asarray([0, 1, 128, 254, 255], dtype="uint8") - out = apply_voi(arr, ds) - assert "uint8" == out.dtype - assert [0, 127, 255, 255, 255] == out.tolist() - - def test_voi_uint16(self): - """Test uint VOI LUT with an 16-bit LUT.""" - ds = Dataset() - ds.PixelRepresentation = 0 - ds.BitsStored = 16 - ds.VOILUTSequence = [Dataset()] - item = ds.VOILUTSequence[0] - item.LUTDescriptor = [4, 0, 16] - item.LUTData = [0, 127, 32768, 65535] - arr = np.asarray([0, 1, 2, 3, 255], dtype="uint16") - out = apply_voi(arr, ds) - assert "uint16" == out.dtype - assert [0, 127, 32768, 65535, 65535] == out.tolist() - - def test_voi_int8(self): - """Test int VOI LUT with an 8-bit LUT.""" - ds = Dataset() - ds.PixelRepresentation = 1 - ds.BitsStored = 8 - ds.VOILUTSequence = [Dataset()] - item = ds.VOILUTSequence[0] - item.LUTDescriptor = [4, 0, 8] - item.LUTData = [0, 127, 128, 255] - arr = np.asarray([0, -1, 2, -128, 127], dtype="int8") - out = apply_voi(arr, ds) - assert "uint8" == out.dtype - assert [0, 0, 128, 0, 255] == out.tolist() - - def test_voi_int16(self): - """Test int VOI LUT with an 16-bit LUT.""" - ds = Dataset() - ds.PixelRepresentation = 0 - ds.BitsStored = 16 - ds.VOILUTSequence = [Dataset()] - item = ds.VOILUTSequence[0] - item.LUTDescriptor = [4, 0, 16] - item.LUTData = [0, 127, 32768, 65535] - arr = np.asarray([0, -1, 2, -128, 255], dtype="int16") - out = apply_voi(arr, ds) - assert "uint16" == out.dtype - assert [0, 0, 32768, 0, 65535] == out.tolist() - - def test_voi_bad_depth(self): - """Test bad LUT depth raises exception.""" - ds = dcmread(VOI_08_1F) - item = ds.VOILUTSequence[0] - item.LUTDescriptor[2] = 7 - msg = r"'7' bits per LUT entry is not supported" - with pytest.raises(NotImplementedError, match=msg): - apply_voi(ds.pixel_array, ds) - - item.LUTDescriptor[2] = 17 - msg = r"'17' bits per LUT entry is not supported" - with pytest.raises(NotImplementedError, match=msg): - apply_voi(ds.pixel_array, ds) - - def test_voi_uint16_array_float(self): - """Test warning when array is float and VOI LUT with an 16-bit LUT""" - ds = Dataset() - ds.PixelRepresentation = 0 - ds.BitsStored = 16 - ds.VOILUTSequence = [Dataset()] - item = ds.VOILUTSequence[0] - item.LUTDescriptor = [4, 0, 16] - item.LUTData = [0, 127, 32768, 65535] - arr = np.asarray([0, 1, 2, 3, 255], dtype="float64") - msg = ( - r"Applying a VOI LUT on a float input array may give " r"incorrect results" - ) - - with pytest.warns(UserWarning, match=msg): - out = apply_voi(arr, ds) - assert [0, 127, 32768, 65535, 65535] == out.tolist() - - def test_unchanged(self): - """Test input array is unchanged if no VOI LUT""" - ds = Dataset() - ds.PhotometricInterpretation = "MONOCHROME1" - ds.PixelRepresentation = 1 - ds.BitsStored = 8 - arr = np.asarray([-128, -127, -1, 0, 1, 126, 127], dtype="int8") - out = apply_voi(arr, ds) - assert [-128, -127, -1, 0, 1, 126, 127] == out.tolist() - - ds.VOILUTSequence = [] - out = apply_voi(arr, ds) - assert [-128, -127, -1, 0, 1, 126, 127] == out.tolist() - - def test_voi_lutdata_ow(self): - """Test LUT Data with VR OW.""" - ds = Dataset() - ds.set_original_encoding(False, True) - ds.PixelRepresentation = 0 - ds.BitsStored = 16 - ds.VOILUTSequence = [Dataset()] - item = ds.VOILUTSequence[0] - item.LUTDescriptor = [4, 0, 16] - item.LUTData = [0, 127, 32768, 65535] - item.LUTData = pack("<4H", *item.LUTData) - item["LUTData"].VR = "OW" - arr = np.asarray([0, 1, 2, 3, 255], dtype="uint16") - out = apply_voi(arr, ds) - assert "uint16" == out.dtype - assert [0, 127, 32768, 65535, 65535] == out.tolist() - - def test_file_meta(self): - """Test using file meta to determine endianness""" - ds = Dataset() - ds.file_meta = FileMetaDataset() - ds.file_meta.TransferSyntaxUID = ExplicitVRLittleEndian - ds.PixelRepresentation = 0 - ds.BitsStored = 16 - ds.VOILUTSequence = [Dataset()] - item = ds.VOILUTSequence[0] - item.LUTDescriptor = [4, 0, 16] - item.LUTData = [0, 127, 32768, 65535] - item.LUTData = pack("<4H", *item.LUTData) - item["LUTData"].VR = "OW" - arr = np.asarray([0, 1, 2, 3, 255], dtype="uint16") - out = apply_voi(arr, ds) - assert "uint16" == out.dtype - assert [0, 127, 32768, 65535, 65535] == out.tolist() - - def test_no_endianness_raises(self): - """Test unable to determine endianness""" - ds = Dataset() - ds.PixelRepresentation = 0 - ds.BitsStored = 16 - ds.VOILUTSequence = [Dataset()] - item = ds.VOILUTSequence[0] - item.LUTDescriptor = [4, 0, 16] - item.LUTData = [0, 127, 32768, 65535] - item.LUTData = pack("<4H", *item.LUTData) - item["LUTData"].VR = "OW" - arr = np.asarray([0, 1, 2, 3, 255], dtype="uint16") - msg = ( - "Unable to determine the endianness of the dataset, please set " - "an appropriate Transfer Syntax UID in 'Dataset.file_meta'" - ) - with pytest.raises(AttributeError, match=msg): - apply_voi(arr, ds) - - -@pytest.mark.skipif(not HAVE_NP, reason="Numpy is not available") -class TestNumpy_ApplyVOILUT: - """Tests for util.apply_voi_lut()""" - - def test_unchanged(self): - """Test input array is unchanged if no VOI LUT""" - ds = Dataset() - ds.PhotometricInterpretation = "MONOCHROME1" - ds.PixelRepresentation = 1 - ds.BitsStored = 8 - arr = np.asarray([-128, -127, -1, 0, 1, 126, 127], dtype="int8") - out = apply_voi_lut(arr, ds) - assert [-128, -127, -1, 0, 1, 126, 127] == out.tolist() - - ds.VOILUTSequence = [] - out = apply_voi_lut(arr, ds) - assert [-128, -127, -1, 0, 1, 126, 127] == out.tolist() - - def test_only_windowing(self): - """Test only windowing operation elements present.""" - ds = Dataset() - ds.PhotometricInterpretation = "MONOCHROME1" - ds.PixelRepresentation = 0 - ds.BitsStored = 8 - arr = np.asarray([0, 1, 128, 254, 255], dtype="uint8") - - ds.WindowWidth = 1 - ds.WindowCenter = 0 - assert [255, 255, 255, 255, 255] == apply_voi_lut(arr, ds).tolist() - - def test_only_voi(self): - """Test only LUT operation elements present.""" - ds = Dataset() - ds.PixelRepresentation = 0 - ds.BitsStored = 8 - ds.VOILUTSequence = [Dataset()] - item = ds.VOILUTSequence[0] - item.LUTDescriptor = [4, 0, 8] - item.LUTData = [0, 127, 128, 255] - arr = np.asarray([0, 1, 128, 254, 255], dtype="uint8") - out = apply_voi_lut(arr, ds) - assert "uint8" == out.dtype - assert [0, 127, 255, 255, 255] == out.tolist() - - def test_voi_windowing(self): - """Test both LUT and windowing operation elements present.""" - ds = Dataset() - ds.PhotometricInterpretation = "MONOCHROME1" - ds.PixelRepresentation = 0 - ds.BitsStored = 8 - ds.WindowWidth = 1 - ds.WindowCenter = 0 - ds.VOILUTSequence = [Dataset()] - item = ds.VOILUTSequence[0] - item.LUTDescriptor = [4, 0, 8] - item.LUTData = [0, 127, 128, 255] - arr = np.asarray([0, 1, 128, 254, 255], dtype="uint8") - - # Defaults to LUT - out = apply_voi_lut(arr, ds) - assert [0, 127, 255, 255, 255] == out.tolist() - - out = apply_voi_lut(arr, ds, prefer_lut=False) - assert [255, 255, 255, 255, 255] == out.tolist() - - def test_voi_windowing_empty(self): - """Test empty VOI elements.""" - ds = Dataset() - ds.PhotometricInterpretation = "MONOCHROME1" - ds.PixelRepresentation = 0 - ds.BitsStored = 8 - ds.WindowWidth = 1 - ds.WindowCenter = 0 - ds.VOILUTSequence = [Dataset()] - item = ds.VOILUTSequence[0] - item.LUTDescriptor = [4, 0, 8] - item.LUTData = [0, 127, 128, 255] - arr = np.asarray([0, 1, 128, 254, 255], dtype="uint8") - - # Test empty VOI elements - item.LUTData = None - out = apply_voi_lut(arr, ds) - assert [255, 255, 255, 255, 255] == out.tolist() - - # Test empty windowing elements - ds.WindowWidth = None - out = apply_voi_lut(arr, ds) - assert [0, 1, 128, 254, 255] == out.tolist() - - -class TestGetJ2KParameters: - """Tests for get_j2k_parameters.""" - - def test_precision(self): - """Test getting the precision for a JPEG2K bytestream.""" - base = b"\xff\x4f\xff\x51" + b"\x00" * 38 - # Signed - for ii in range(135, 144): - params = get_j2k_parameters(base + bytes([ii])) - assert ii - 127 == params["precision"] - assert params["is_signed"] - - # Unsigned - for ii in range(7, 16): - params = get_j2k_parameters(base + bytes([ii])) - assert ii + 1 == params["precision"] - assert not params["is_signed"] - - def test_not_j2k(self): - """Test result when no JPEG2K SOF marker present""" - base = b"\xff\x4e\xff\x51" + b"\x00" * 38 - assert {} == get_j2k_parameters(base + b"\x8F") - - def test_no_siz(self): - """Test result when no SIZ box present""" - base = b"\xff\x4f\xff\x52" + b"\x00" * 38 - assert {} == get_j2k_parameters(base + b"\x8F") - - def test_short_bytestream(self): - """Test result when no SIZ box present""" - assert {} == get_j2k_parameters(b"") - assert {} == get_j2k_parameters(b"\xff\x4f\xff\x51" + b"\x00" * 20) - - -class TestGetNrFrames: - """Tests for get_nr_frames.""" - - def test_none(self): - """Test warning when (0028,0008) 'Number of Frames' has a value of - None""" - ds = Dataset() - ds.NumberOfFrames = None - msg = ( - r"A value of None for \(0028,0008\) 'Number of Frames' is " - r"non-conformant. It's recommended that this value be " - r"changed to 1" - ) - with pytest.warns(UserWarning, match=msg): - assert 1 == get_nr_frames(ds) - - def test_zero(self): - """Test warning when (0028,0008) 'Number of Frames' has a value of 0""" - ds = Dataset() - ds.NumberOfFrames = 0 - msg = ( - r"A value of 0 for \(0028,0008\) 'Number of Frames' is " - r"non-conformant. It's recommended that this value be " - r"changed to 1" - ) - with pytest.warns(UserWarning, match=msg): - assert 1 == get_nr_frames(ds) +def test_deprecation_warnings(): + msg = ( + "The 'pydicom.pixel_data_handlers' module will be removed in v4.0, " + "please use 'from pydicom.pixels import convert_color_space' instead" + ) + with pytest.warns(DeprecationWarning, match=msg): + from pydicom.pixel_data_handlers import convert_color_space - def test_missing(self): - """Test return value when (0028,0008) 'Number of Frames' does not - exist""" - ds = Dataset() - with assert_no_warning(): - assert 1 == get_nr_frames(ds) + with pytest.warns(DeprecationWarning, match=msg): + from pydicom.pixel_data_handlers.util import convert_color_space as x - def test_existing(self): - """Test return value when (0028,0008) 'Number of Frames' exists.""" - ds = Dataset() - ds.NumberOfFrames = random.randint(1, 10) - with assert_no_warning(): - assert ds.NumberOfFrames == get_nr_frames(ds) + msg = ( + "The 'pydicom.pixel_data_handlers' module will be removed in v4.0, " + "please use 'from pydicom.pixels.utils import expand_ybr422' instead" + ) + with pytest.warns(DeprecationWarning, match=msg): + from pydicom.pixel_data_handlers import expand_ybr422 + with pytest.warns(DeprecationWarning, match=msg): + from pydicom.pixel_data_handlers.util import expand_ybr422 as y -REFERENCE_PACK_UNPACK = [ - (b"", []), - (b"\x00", [0, 0, 0, 0, 0, 0, 0, 0]), - (b"\x01", [1, 0, 0, 0, 0, 0, 0, 0]), - (b"\x02", [0, 1, 0, 0, 0, 0, 0, 0]), - (b"\x04", [0, 0, 1, 0, 0, 0, 0, 0]), - (b"\x08", [0, 0, 0, 1, 0, 0, 0, 0]), - (b"\x10", [0, 0, 0, 0, 1, 0, 0, 0]), - (b"\x20", [0, 0, 0, 0, 0, 1, 0, 0]), - (b"\x40", [0, 0, 0, 0, 0, 0, 1, 0]), - (b"\x80", [0, 0, 0, 0, 0, 0, 0, 1]), - (b"\xAA", [0, 1, 0, 1, 0, 1, 0, 1]), - (b"\xF0", [0, 0, 0, 0, 1, 1, 1, 1]), - (b"\x0F", [1, 1, 1, 1, 0, 0, 0, 0]), - (b"\xFF", [1, 1, 1, 1, 1, 1, 1, 1]), - # | 1st byte | 2nd byte - (b"\x00\x00", [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), - (b"\x00\x01", [0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0]), - (b"\x00\x80", [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]), - (b"\x00\xFF", [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1]), - (b"\x01\x80", [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]), - (b"\x80\x80", [0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1]), - (b"\xFF\x80", [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1]), -] + msg = ( + "'dtype_corrected_for_endianness' is deprecated and will be " "removed in v4.0" + ) + with pytest.warns(DeprecationWarning, match=msg): + from pydicom.pixel_data_handlers.util import dtype_corrected_for_endianness -class TestUnpackBits: - """Tests for unpack_bits.""" +@pytest.fixture +def use_future(): + original = config._use_future + config._use_future = True + yield + config._use_future = original - @pytest.mark.skipif(not HAVE_NP, reason="Numpy is not available") - @pytest.mark.parametrize("src, output", REFERENCE_PACK_UNPACK) - def test_unpack_np(self, src, output): - """Test unpacking data using numpy.""" - assert np.array_equal(unpack_bits(src, as_array=True), np.asarray(output)) - as_bytes = pack(f"{len(output)}B", *output) - assert unpack_bits(src, as_array=False) == as_bytes +class TestFuture: + def test_imports_raise(self, use_future): + with pytest.raises(ImportError): + from pydicom.pixel_data_handlers import apply_color_lut as x - @pytest.mark.skipif(HAVE_NP, reason="Numpy is available") - @pytest.mark.parametrize("src, output", REFERENCE_PACK_UNPACK) - def test_unpack_bytes(self, src, output): - """Test unpacking data without numpy.""" - as_bytes = pack(f"{len(output)}B", *output) - assert unpack_bits(src, as_array=False) == as_bytes + with pytest.raises(ImportError): + from pydicom.pixel_data_handlers.util import apply_color_lut - msg = r"unpack_bits\(\) requires NumPy if 'as_array = True'" - with pytest.raises(ValueError, match=msg): - unpack_bits(src, as_array=True) + with pytest.raises(ImportError): + from pydicom.pixel_data_handlers.util import apply_modality_lut + with pytest.raises(ImportError): + from pydicom.pixel_data_handlers.util import apply_voi_lut -REFERENCE_PACK_PARTIAL = [ - # | 1st byte | 2nd byte - (b"\x00\x40", [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]), # 15-bits - (b"\x00\x20", [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]), - (b"\x00\x10", [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]), - (b"\x00\x08", [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]), - (b"\x00\x04", [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]), - (b"\x00\x02", [0, 0, 0, 0, 0, 0, 0, 0, 0, 1]), - (b"\x00\x01", [0, 0, 0, 0, 0, 0, 0, 0, 1]), # 9-bits - (b"\x80", [0, 0, 0, 0, 0, 0, 0, 1]), # 8-bits - (b"\x40", [0, 0, 0, 0, 0, 0, 1]), - (b"\x20", [0, 0, 0, 0, 0, 1]), - (b"\x10", [0, 0, 0, 0, 1]), - (b"\x08", [0, 0, 0, 1]), - (b"\x04", [0, 0, 1]), - (b"\x02", [0, 1]), - (b"\x01", [1]), - (b"", []), -] + with pytest.raises(ImportError): + from pydicom.pixel_data_handlers.util import apply_voi + with pytest.raises(ImportError): + from pydicom.pixel_data_handlers.util import apply_windowing -@pytest.mark.skipif(not HAVE_NP, reason="Numpy is not available") -class TestNumpy_PackBits: - """Tests for numpy_handler.pack_bits.""" + with pytest.raises(ImportError): + from pydicom.pixel_data_handlers.util import convert_color_space - @pytest.mark.parametrize("output, input", REFERENCE_PACK_UNPACK) - def test_pack(self, input, output): - """Test packing data.""" - assert output == pack_bits(np.asarray(input), pad=False) + with pytest.raises(ImportError): + from pydicom.pixel_data_handlers import expand_ybr422 as y - def test_non_binary_input(self): - """Test non-binary input raises exception.""" - with pytest.raises( - ValueError, match=r"Only binary arrays \(containing ones or" - ): - pack_bits(np.asarray([0, 0, 2, 0, 0, 0, 0, 0])) + with pytest.raises(ImportError): + from pydicom.pixel_data_handlers.util import expand_ybr422 - def test_ndarray_input(self): - """Test non 1D input gets ravelled.""" - arr = np.asarray( - [ - [0, 0, 0, 0, 0, 0, 0, 0], - [1, 0, 1, 0, 1, 0, 1, 0], - [1, 1, 1, 1, 1, 1, 1, 1], - ] - ) - assert (3, 8) == arr.shape - b = pack_bits(arr, pad=False) - assert b"\x00\x55\xff" == b + with pytest.raises(ImportError): + from pydicom.pixel_data_handlers.util import get_expected_length - def test_padding(self): - """Test odd length packed data is padded.""" - arr = np.asarray( - [ - [0, 0, 0, 0, 0, 0, 0, 0], - [1, 0, 1, 0, 1, 0, 1, 0], - [1, 1, 1, 1, 1, 1, 1, 1], - ] - ) - assert 3 == len(pack_bits(arr, pad=False)) - b = pack_bits(arr, pad=True) - assert 4 == len(b) - assert 0 == b[-1] + with pytest.raises(ImportError): + from pydicom.pixel_data_handlers.util import get_image_pixel_ids - @pytest.mark.parametrize("output, input", REFERENCE_PACK_PARTIAL) - def test_pack_partial(self, input, output): - """Test packing data that isn't a full byte long.""" - assert output == pack_bits(np.asarray(input), pad=False) + with pytest.raises(ImportError): + from pydicom.pixel_data_handlers.util import get_j2k_parameters - def test_functional(self): - """Test against a real dataset.""" - ds = dcmread(EXPL_1_1_3F) - arr = ds.pixel_array - arr = arr.ravel() - assert ds.PixelData == pack_bits(arr) + with pytest.raises(ImportError): + from pydicom.pixel_data_handlers.util import get_nr_frames + with pytest.raises(ImportError): + from pydicom.pixel_data_handlers.util import pack_bits -@pytest.mark.skipif(not HAVE_NP, reason="Numpy is not available") -class TestExpandYBR422: - def test_8bit(self): - """Test 8-bit expansion.""" - ds = dcmread(EXPL_8_3_1F_YBR422) - assert ds.PhotometricInterpretation == "YBR_FULL_422" - ref = ds.pixel_array + with pytest.raises(ImportError): + from pydicom.pixel_data_handlers.util import pixel_dtype - expanded = expand_ybr422(ds.PixelData, ds.BitsAllocated) - arr = np.frombuffer(expanded, dtype="u1") - assert np.array_equal(arr, ref.ravel()) + with pytest.raises(ImportError): + from pydicom.pixel_data_handlers.util import reshape_pixel_array - def test_16bit(self): - """Test 16-bit expansion.""" - # Have to make our own 16-bit data - ds = dcmread(EXPL_8_3_1F_YBR422) - ref = ds.pixel_array.astype("float32") - ref *= 65535 / 255 - ref = ref.astype("u2") - # Subsample - # YY BB RR YY BB RR YY BB RR YY BB RR -> YY YY BB RR YY YY BB RR - src = bytearray(ref.tobytes()) - del src[2::12] - del src[2::11] - del src[2::10] - del src[2::9] + with pytest.raises(ImportError): + from pydicom.pixel_data_handlers.util import unpack_bits - # Should be 2/3rds of the original number of bytes - nr_bytes = ds.Rows * ds.Columns * ds.SamplesPerPixel * 2 - assert len(src) == nr_bytes * 2 // 3 - arr = np.frombuffer(expand_ybr422(src, 16), "u2") - assert np.array_equal(arr, ref.ravel()) - # Spot check values - arr = arr.reshape(100, 100, 3) - assert (19532, 21845, 65535) == tuple(arr[5, 50, :]) - assert (42662, 27242, 49601) == tuple(arr[15, 50, :]) + with pytest.raises(ImportError): + from pydicom.pixel_data_handlers.util import dtype_corrected_for_endianness diff --git a/tests/test_numpy_pixel_data.py b/tests/test_numpy_pixel_data.py index cb0c9a12b4..4ef32a529a 100644 --- a/tests/test_numpy_pixel_data.py +++ b/tests/test_numpy_pixel_data.py @@ -33,7 +33,7 @@ from pydicom.data import get_testdata_file from pydicom.dataset import Dataset, FileMetaDataset from pydicom.filereader import dcmread -from pydicom.pixel_data_handlers.util import convert_color_space +from pydicom.pixels.processing import convert_color_space from pydicom.uid import ( ImplicitVRLittleEndian, ExplicitVRLittleEndian, diff --git a/tests/test_pillow_pixel_data.py b/tests/test_pillow_pixel_data.py index 6ff980b44f..0277123ba4 100644 --- a/tests/test_pillow_pixel_data.py +++ b/tests/test_pillow_pixel_data.py @@ -6,7 +6,8 @@ from pydicom.data import get_testdata_file from pydicom.encaps import get_frame, generate_frames, encapsulate from pydicom.filereader import dcmread -from pydicom.pixel_data_handlers.util import convert_color_space, get_j2k_parameters +from pydicom.pixels.processing import convert_color_space +from pydicom.pixels.utils import get_j2k_parameters from pydicom.uid import ( JPEGBaseline8Bit, JPEGLosslessSV1, diff --git a/tests/test_pylibjpeg.py b/tests/test_pylibjpeg.py index ab76a57553..c42022d44b 100644 --- a/tests/test_pylibjpeg.py +++ b/tests/test_pylibjpeg.py @@ -7,11 +7,8 @@ from pydicom.data import get_testdata_file from pydicom.encaps import get_frame from pydicom.filereader import dcmread -from pydicom.pixel_data_handlers.util import ( - convert_color_space, - get_j2k_parameters, - get_expected_length, -) +from pydicom.pixels.processing import convert_color_space +from pydicom.pixels.utils import get_j2k_parameters, get_expected_length from pydicom.uid import ( JPEGBaseline8Bit, JPEGExtended12Bit, diff --git a/tests/test_rle_pixel_data.py b/tests/test_rle_pixel_data.py index 32bb35e110..009089c746 100644 --- a/tests/test_rle_pixel_data.py +++ b/tests/test_rle_pixel_data.py @@ -36,7 +36,7 @@ try: import numpy as np from pydicom.pixel_data_handlers import numpy_handler as NP_HANDLER - from pydicom.pixel_data_handlers.util import reshape_pixel_array + from pydicom.pixels.utils import reshape_pixel_array HAVE_NP = NP_HANDLER.HAVE_NP except ImportError: