-
Notifications
You must be signed in to change notification settings - Fork 271
/
static_output.py
1955 lines (1702 loc) · 70.7 KB
/
static_output.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
import abc
import functools
import itertools
import os
import pickle
import time
import weakref
from collections import defaultdict
from collections.abc import Sized
from stat import ST_CTIME
import numpy as np
from unyt.exceptions import UnitConversionError, UnitParseError
from yt._maintenance.deprecation import issue_deprecation_warning
from yt.config import ytcfg
from yt.data_objects.particle_filters import filter_registry
from yt.data_objects.particle_unions import ParticleUnion
from yt.data_objects.region_expression import RegionExpression
from yt.fields.derived_field import ValidateSpatial
from yt.fields.field_type_container import FieldTypeContainer
from yt.fields.fluid_fields import setup_gradient_fields
from yt.funcs import iter_fields, mylog, set_intersection, setdefaultattr
from yt.geometry.coordinates.api import (
CartesianCoordinateHandler,
CoordinateHandler,
CylindricalCoordinateHandler,
GeographicCoordinateHandler,
InternalGeographicCoordinateHandler,
PolarCoordinateHandler,
SpectralCubeCoordinateHandler,
SphericalCoordinateHandler,
)
from yt.units import UnitContainer, _wrap_display_ytarray, dimensions
from yt.units.dimensions import current_mks
from yt.units.unit_object import Unit, define_unit
from yt.units.unit_registry import UnitRegistry
from yt.units.unit_systems import create_code_unit_system, unit_system_registry
from yt.units.yt_array import YTArray, YTQuantity
from yt.utilities.cosmology import Cosmology
from yt.utilities.exceptions import (
YTFieldNotFound,
YTGeometryNotSupported,
YTIllDefinedParticleFilter,
YTObjectNotImplemented,
)
from yt.utilities.lib.fnv_hash import fnv_hash
from yt.utilities.minimal_representation import MinimalDataset
from yt.utilities.object_registries import data_object_registry, output_type_registry
from yt.utilities.parallel_tools.parallel_analysis_interface import parallel_root_only
from yt.utilities.parameter_file_storage import NoParameterShelf, ParameterFileStore
# We want to support the movie format in the future.
# When such a thing comes to pass, I'll move all the stuff that is constant up
# to here, and then have it instantiate EnzoDatasets as appropriate.
_cached_datasets = weakref.WeakValueDictionary()
_ds_store = ParameterFileStore()
def _unsupported_object(ds, obj_name):
def _raise_unsupp(*args, **kwargs):
raise YTObjectNotImplemented(ds, obj_name)
return _raise_unsupp
class MutableAttribute:
"""A descriptor for mutable data"""
def __init__(self, display_array=False):
self.data = weakref.WeakKeyDictionary()
self.display_array = display_array
def __get__(self, instance, owner):
if not instance:
return None
ret = self.data.get(instance, None)
try:
ret = ret.copy()
except AttributeError:
pass
if self.display_array:
try:
ret._ipython_display_ = functools.partial(_wrap_display_ytarray, ret)
# This will error out if the items have yet to be turned into
# YTArrays, in which case we just let it go.
except AttributeError:
pass
return ret
def __set__(self, instance, value):
self.data[instance] = value
def requires_index(attr_name):
@property
def ireq(self):
self.index
# By now it should have been set
attr = self.__dict__[attr_name]
return attr
@ireq.setter
def ireq(self, value):
self.__dict__[attr_name] = value
return ireq
class Dataset(abc.ABC):
default_fluid_type = "gas"
default_field = ("gas", "density")
fluid_types = ("gas", "deposit", "index")
particle_types = ("io",) # By default we have an 'all'
particle_types_raw = ("io",)
geometry = "cartesian"
coordinates = None
storage_filename = None
particle_unions = None
known_filters = None
_index_class = None
field_units = None
derived_field_list = requires_index("derived_field_list")
fields = requires_index("fields")
_instantiated = False
_unique_identifier = None
_particle_type_counts = None
_proj_type = "quad_proj"
_ionization_label_format = "roman_numeral"
_determined_fields = None
fields_detected = False
# these are set in self._parse_parameter_file()
domain_left_edge = MutableAttribute(True)
domain_right_edge = MutableAttribute(True)
domain_dimensions = MutableAttribute(True)
# the point in index space "domain_left_edge" doesn't necessarily
# map to (0, 0, 0)
domain_offset = np.zeros(3, dtype="int64")
_periodicity = MutableAttribute()
_force_periodicity = False
# these are set in self._set_derived_attrs()
domain_width = MutableAttribute(True)
domain_center = MutableAttribute(True)
def __new__(cls, filename=None, *args, **kwargs):
if not isinstance(filename, str):
obj = object.__new__(cls)
# The Stream frontend uses a StreamHandler object to pass metadata
# to __init__.
is_stream = hasattr(filename, "get_fields") and hasattr(
filename, "get_particle_type"
)
if not is_stream:
obj.__init__(filename, *args, **kwargs)
return obj
apath = os.path.abspath(filename)
cache_key = (apath, pickle.dumps(args), pickle.dumps(kwargs))
if ytcfg.get("yt", "skip_dataset_cache"):
obj = object.__new__(cls)
elif cache_key not in _cached_datasets:
obj = object.__new__(cls)
if not obj._skip_cache:
_cached_datasets[cache_key] = obj
else:
obj = _cached_datasets[cache_key]
return obj
def __init_subclass__(cls, *args, **kwargs):
super().__init_subclass__(*args, **kwargs)
output_type_registry[cls.__name__] = cls
mylog.debug("Registering: %s as %s", cls.__name__, cls)
def __init__(
self,
filename,
dataset_type=None,
file_style=None,
units_override=None,
unit_system="cgs",
default_species_fields=None,
):
"""
Base class for generating new output types. Principally consists of
a *filename* and a *dataset_type* which will be passed on to children.
"""
# We return early and do NOT initialize a second time if this file has
# already been initialized.
if self._instantiated:
return
self.dataset_type = dataset_type
self.file_style = file_style
self.conversion_factors = {}
self.parameters = {}
self.region_expression = self.r = RegionExpression(self)
self.known_filters = self.known_filters or {}
self.particle_unions = self.particle_unions or {}
self.field_units = self.field_units or {}
self._determined_fields = {}
self.units_override = self.__class__._sanitize_units_override(units_override)
self.default_species_fields = default_species_fields
# path stuff
self.parameter_filename = str(filename)
self.basename = os.path.basename(filename)
self.directory = os.path.expanduser(os.path.dirname(filename))
self.fullpath = os.path.abspath(self.directory)
self.backup_filename = self.parameter_filename + "_backup.gdf"
self.read_from_backup = False
if os.path.exists(self.backup_filename):
self.read_from_backup = True
if len(self.directory) == 0:
self.directory = "."
# to get the timing right, do this before the heavy lifting
self._instantiated = time.time()
self.no_cgs_equiv_length = False
if unit_system == "code":
# create a fake MKS unit system which we will override later to
# avoid chicken/egg issue of the unit registry needing a unit system
# but code units need a unit registry to define the code units on
used_unit_system = "mks"
else:
used_unit_system = unit_system
self._create_unit_registry(used_unit_system)
self._parse_parameter_file()
self.set_units()
self.setup_cosmology()
self._assign_unit_system(unit_system)
self._setup_coordinate_handler()
self.print_key_parameters()
self._set_derived_attrs()
# Because we need an instantiated class to check the ds's existence in
# the cache, we move that check to here from __new__. This avoids
# double-instantiation.
# PR 3124: _set_derived_attrs() can change the hash, check store here
try:
_ds_store.check_ds(self)
except NoParameterShelf:
pass
self._setup_classes()
@property
def unique_identifier(self):
if self._unique_identifier is None:
self._unique_identifier = int(os.stat(self.parameter_filename)[ST_CTIME])
name_as_bytes = bytearray(map(ord, self.parameter_filename))
self._unique_identifier += fnv_hash(name_as_bytes)
return self._unique_identifier
@unique_identifier.setter
def unique_identifier(self, value):
self._unique_identifier = value
@property
def periodicity(self):
if self._force_periodicity:
return (True, True, True)
return self._periodicity
@periodicity.setter
def periodicity(self, val):
# remove this setter to break backward compatibility
issue_deprecation_warning(
"Dataset.periodicity should not be overridden manually. "
"In the future, this will become an error. "
"Use `Dataset.force_periodicity` instead.",
since="4.0.0",
removal="4.1.0",
)
err_msg = f"Expected a 3-element boolean tuple, received `{val}`."
if not isinstance(val, Sized):
raise TypeError(err_msg)
if len(val) != 3:
raise ValueError(err_msg)
if any(not isinstance(p, (bool, np.bool_)) for p in val):
raise TypeError(err_msg)
self._periodicity = tuple(bool(p) for p in val)
def force_periodicity(self, val=True):
"""
Override box periodicity to (True, True, True).
Use ds.force_periodicty(False) to use the actual box periodicity.
"""
# This is a user-facing method that embrace a long-standing
# workaround in yt user codes.
if not isinstance(val, bool):
raise TypeError("force_periodicity expected a boolean.")
self._force_periodicity = val
# abstract methods require implementation in subclasses
@classmethod
@abc.abstractmethod
def _is_valid(cls, filename, *args, **kwargs):
# A heuristic test to determine if the data format can be interpreted
# with the present frontend
return False
@abc.abstractmethod
def _parse_parameter_file(self):
# set up various attributes from self.parameter_filename
# for a full description of what is required here see
# yt.frontends._skeleton.SkeletonDataset
pass
@abc.abstractmethod
def _set_code_unit_attributes(self):
# set up code-units to physical units normalization factors
# for a full description of what is required here see
# yt.frontends._skeleton.SkeletonDataset
pass
def _set_derived_attrs(self):
if self.domain_left_edge is None or self.domain_right_edge is None:
self.domain_center = np.zeros(3)
self.domain_width = np.zeros(3)
else:
self.domain_center = 0.5 * (self.domain_right_edge + self.domain_left_edge)
self.domain_width = self.domain_right_edge - self.domain_left_edge
if not isinstance(self.current_time, YTQuantity):
self.current_time = self.quan(self.current_time, "code_time")
for attr in ("center", "width", "left_edge", "right_edge"):
n = f"domain_{attr}"
v = getattr(self, n)
if not isinstance(v, YTArray) and v is not None:
# Note that we don't add on _ipython_display_ here because
# everything is stored inside a MutableAttribute.
v = self.arr(v, "code_length")
setattr(self, n, v)
def __reduce__(self):
args = (self._hash(),)
return (_reconstruct_ds, args)
def __repr__(self):
return f"{self.__class__.__name__}: {self.parameter_filename}"
def __str__(self):
return self.basename
def _hash(self):
s = f"{self.basename};{self.current_time};{self.unique_identifier}"
try:
import hashlib
return hashlib.md5(s.encode("utf-8")).hexdigest()
except ImportError:
return s.replace(";", "*")
_checksum = None
@property
def checksum(self):
"""
Computes md5 sum of a dataset.
Note: Currently this property is unable to determine a complete set of
files that are a part of a given dataset. As a first approximation, the
checksum of :py:attr:`~parameter_file` is calculated. In case
:py:attr:`~parameter_file` is a directory, checksum of all files inside
the directory is calculated.
"""
if self._checksum is None:
try:
import hashlib
except ImportError:
self._checksum = "nohashlib"
return self._checksum
def generate_file_md5(m, filename, blocksize=2 ** 20):
with open(filename, "rb") as f:
while True:
buf = f.read(blocksize)
if not buf:
break
m.update(buf)
m = hashlib.md5()
if os.path.isdir(self.parameter_filename):
for root, _, files in os.walk(self.parameter_filename):
for fname in files:
fname = os.path.join(root, fname)
generate_file_md5(m, fname)
elif os.path.isfile(self.parameter_filename):
generate_file_md5(m, self.parameter_filename)
else:
m = "notafile"
if hasattr(m, "hexdigest"):
m = m.hexdigest()
self._checksum = m
return self._checksum
@property
def _mrep(self):
return MinimalDataset(self)
@property
def _skip_cache(self):
return False
@classmethod
def _guess_candidates(cls, base, directories, files):
"""
This is a class method that accepts a directory (base), a list of files
in that directory, and a list of subdirectories. It should return a
list of filenames (defined relative to the supplied directory) and a
boolean as to whether or not further directories should be recursed.
This function doesn't need to catch all possibilities, nor does it need
to filter possibilities.
"""
return [], True
def close(self):
pass
def __getitem__(self, key):
"""Returns units, parameters, or conversion_factors in that order."""
return self.parameters[key]
def __iter__(self):
yield from self.parameters
def get_smallest_appropriate_unit(
self, v, quantity="distance", return_quantity=False
):
"""
Returns the largest whole unit smaller than the YTQuantity passed to
it as a string.
The quantity keyword can be equal to `distance` or `time`. In the
case of distance, the units are: 'Mpc', 'kpc', 'pc', 'au', 'rsun',
'km', etc. For time, the units are: 'Myr', 'kyr', 'yr', 'day', 'hr',
's', 'ms', etc.
If return_quantity is set to True, it finds the largest YTQuantity
object with a whole unit and a power of ten as the coefficient, and it
returns this YTQuantity.
"""
good_u = None
if quantity == "distance":
unit_list = [
"Ppc",
"Tpc",
"Gpc",
"Mpc",
"kpc",
"pc",
"au",
"rsun",
"km",
"cm",
"um",
"nm",
"pm",
]
elif quantity == "time":
unit_list = [
"Yyr",
"Zyr",
"Eyr",
"Pyr",
"Tyr",
"Gyr",
"Myr",
"kyr",
"yr",
"day",
"hr",
"s",
"ms",
"us",
"ns",
"ps",
"fs",
]
else:
raise ValueError(
"Specified quantity must be equal to 'distance' or 'time'."
)
for unit in unit_list:
uq = self.quan(1.0, unit)
if uq <= v:
good_u = unit
break
if good_u is None and quantity == "distance":
good_u = "cm"
if good_u is None and quantity == "time":
good_u = "s"
if return_quantity:
unit_index = unit_list.index(good_u)
# This avoids indexing errors
if unit_index == 0:
return self.quan(1, unit_list[0])
# Number of orders of magnitude between unit and next one up
OOMs = np.ceil(
np.log10(
self.quan(1, unit_list[unit_index - 1])
/ self.quan(1, unit_list[unit_index])
)
)
# Backwards order of coefficients (e.g. [100, 10, 1])
coeffs = 10 ** np.arange(OOMs)[::-1]
for j in coeffs:
uq = self.quan(j, good_u)
if uq <= v:
return uq
else:
return good_u
def has_key(self, key):
"""
Checks units, parameters, and conversion factors. Returns a boolean.
"""
return key in self.parameters
_instantiated_index = None
@property
def index(self):
if self._instantiated_index is None:
self._instantiated_index = self._index_class(
self, dataset_type=self.dataset_type
)
# Now we do things that we need an instantiated index for
# ...first off, we create our field_info now.
oldsettings = np.geterr()
np.seterr(all="ignore")
self.create_field_info()
np.seterr(**oldsettings)
return self._instantiated_index
@parallel_root_only
def print_key_parameters(self):
for a in [
"current_time",
"domain_dimensions",
"domain_left_edge",
"domain_right_edge",
"cosmological_simulation",
]:
if not hasattr(self, a):
mylog.error("Missing %s in parameter file definition!", a)
continue
v = getattr(self, a)
mylog.info("Parameters: %-25s = %s", a, v)
if hasattr(self, "cosmological_simulation") and self.cosmological_simulation:
for a in [
"current_redshift",
"omega_lambda",
"omega_matter",
"omega_radiation",
"hubble_constant",
]:
if not hasattr(self, a):
mylog.error("Missing %s in parameter file definition!", a)
continue
v = getattr(self, a)
mylog.info("Parameters: %-25s = %s", a, v)
@parallel_root_only
def print_stats(self):
self.index.print_stats()
@property
def field_list(self):
return self.index.field_list
def create_field_info(self):
self.field_dependencies = {}
self.derived_field_list = []
self.filtered_particle_types = []
self.field_info = self._field_info_class(self, self.field_list)
self.coordinates.setup_fields(self.field_info)
self.field_info.setup_fluid_fields()
for ptype in self.particle_types:
self.field_info.setup_particle_fields(ptype)
self.field_info.setup_fluid_index_fields()
if "all" not in self.particle_types:
mylog.debug("Creating Particle Union 'all'")
pu = ParticleUnion("all", list(self.particle_types_raw))
nfields = self.add_particle_union(pu)
if nfields == 0:
mylog.debug("zero common fields: skipping particle union 'all'")
if "nbody" not in self.particle_types:
mylog.debug("Creating Particle Union 'nbody'")
ptypes = list(self.particle_types_raw)
if hasattr(self, "_sph_ptypes"):
for sph_ptype in self._sph_ptypes:
if sph_ptype in ptypes:
ptypes.remove(sph_ptype)
if ptypes:
nbody_ptypes = []
for ptype in ptypes:
if (ptype, "particle_mass") in self.field_info:
nbody_ptypes.append(ptype)
pu = ParticleUnion("nbody", nbody_ptypes)
nfields = self.add_particle_union(pu)
if nfields == 0:
mylog.debug("zero common fields, skipping particle union 'nbody'")
self.field_info.setup_extra_union_fields()
mylog.debug("Loading field plugins.")
self.field_info.load_all_plugins(self.default_fluid_type)
deps, unloaded = self.field_info.check_derived_fields()
self.field_dependencies.update(deps)
self.fields = FieldTypeContainer(self)
self.index.field_list = sorted(self.field_list)
# Now that we've detected the fields, set this flag so that
# deprecated fields will be logged if they are used
self.fields_detected = True
self._last_freq = (None, None)
def set_field_label_format(self, format_property, value):
"""
Set format properties for how fields will be written
out. Accepts
format_property : string indicating what property to set
value: the value to set for that format_property
"""
available_formats = {"ionization_label": ("plus_minus", "roman_numeral")}
if format_property in available_formats:
if value in available_formats[format_property]:
setattr(self, f"_{format_property}_format", value)
else:
raise ValueError(
"{} not an acceptable value for format_property "
"{}. Choices are {}.".format(
value, format_property, available_formats[format_property]
)
)
else:
raise ValueError(
"{} not a recognized format_property. Available "
"properties are: {}".format(
format_property, list(available_formats.keys())
)
)
def setup_deprecated_fields(self):
from yt.fields.field_aliases import _field_name_aliases
added = []
for old_name, new_name in _field_name_aliases:
try:
fi = self._get_field_info(new_name)
except YTFieldNotFound:
continue
self.field_info.alias(("gas", old_name), fi.name)
added.append(("gas", old_name))
self.field_info.find_dependencies(added)
def _setup_coordinate_handler(self):
kwargs = {}
if isinstance(self.geometry, tuple):
self.geometry, ordering = self.geometry
kwargs["ordering"] = ordering
if isinstance(self.geometry, CoordinateHandler):
# I kind of dislike this. The geometry field should always be a
# string, but the way we're set up with subclassing, we can't
# mandate that quite the way I'd like.
self.coordinates = self.geometry
return
elif callable(self.geometry):
cls = self.geometry
elif self.geometry == "cartesian":
cls = CartesianCoordinateHandler
elif self.geometry == "cylindrical":
cls = CylindricalCoordinateHandler
elif self.geometry == "polar":
cls = PolarCoordinateHandler
elif self.geometry == "spherical":
cls = SphericalCoordinateHandler
self.no_cgs_equiv_length = True
elif self.geometry == "geographic":
cls = GeographicCoordinateHandler
self.no_cgs_equiv_length = True
elif self.geometry == "internal_geographic":
cls = InternalGeographicCoordinateHandler
self.no_cgs_equiv_length = True
elif self.geometry == "spectral_cube":
cls = SpectralCubeCoordinateHandler
else:
raise YTGeometryNotSupported(self.geometry)
self.coordinates = cls(self, **kwargs)
def add_particle_union(self, union):
# No string lookups here, we need an actual union.
f = self.particle_fields_by_type
# find fields common to all particle types in the union
fields = set_intersection([f[s] for s in union if s in self.particle_types_raw])
if len(fields) == 0:
# don't create this union if no fields are common to all
# particle types
return len(fields)
for field in fields:
units = set()
for s in union:
# First we check our existing fields for units
funits = self._get_field_info(s, field).units
# Then we override with field_units settings.
funits = self.field_units.get((s, field), funits)
units.add(funits)
if len(units) == 1:
self.field_units[union.name, field] = list(units)[0]
self.particle_types += (union.name,)
self.particle_unions[union.name] = union
fields = [(union.name, field) for field in fields]
new_fields = [_ for _ in fields if _ not in self.field_list]
self.field_list.extend(new_fields)
new_field_info_fields = [
_ for _ in fields if _ not in self.field_info.field_list
]
self.field_info.field_list.extend(new_field_info_fields)
self.index.field_list = sorted(self.field_list)
# Give ourselves a chance to add them here, first, then...
# ...if we can't find them, we set them up as defaults.
new_fields = self._setup_particle_types([union.name])
self.field_info.find_dependencies(new_fields)
return len(new_fields)
def add_particle_filter(self, filter):
"""Add particle filter to the dataset.
Add ``filter`` to the dataset and set up relevant derived_field.
It will also add any ``filtered_type`` that the ``filter`` depends on.
"""
# This requires an index
self.index
# This is a dummy, which we set up to enable passthrough of "all"
# concatenation fields.
n = getattr(filter, "name", filter)
self.known_filters[n] = None
if isinstance(filter, str):
used = False
f = filter_registry.get(filter, None)
if f is None:
return False
used = self._setup_filtered_type(f)
if used:
filter = f
else:
used = self._setup_filtered_type(filter)
if not used:
self.known_filters.pop(n, None)
return False
self.known_filters[filter.name] = filter
return True
def _setup_filtered_type(self, filter):
# Check if the filtered_type of this filter is known,
# otherwise add it first if it is in the filter_registry
if filter.filtered_type not in self.known_filters.keys():
if filter.filtered_type in filter_registry:
add_success = self.add_particle_filter(filter.filtered_type)
if add_success:
mylog.info(
"Added filter dependency '%s' for '%s'",
filter.filtered_type,
filter.name,
)
if not filter.available(self.derived_field_list):
raise YTIllDefinedParticleFilter(
filter, filter.missing(self.derived_field_list)
)
fi = self.field_info
fd = self.field_dependencies
available = False
for fn in self.derived_field_list:
if fn[0] == filter.filtered_type:
# Now we can add this
available = True
self.derived_field_list.append((filter.name, fn[1]))
fi[filter.name, fn[1]] = filter.wrap_func(fn, fi[fn])
# Now we append the dependencies
fd[filter.name, fn[1]] = fd[fn]
if available:
if filter.name not in self.particle_types:
self.particle_types += (filter.name,)
if filter.name not in self.filtered_particle_types:
self.filtered_particle_types.append(filter.name)
if hasattr(self, "_sph_ptypes"):
if filter.filtered_type == self._sph_ptypes[0]:
mylog.warning(
"It appears that you are filtering on an SPH field "
"type. It is recommended to use 'gas' as the "
"filtered particle type in this case instead."
)
if filter.filtered_type in (self._sph_ptypes + ("gas",)):
self._sph_ptypes = self._sph_ptypes + (filter.name,)
new_fields = self._setup_particle_types([filter.name])
deps, _ = self.field_info.check_derived_fields(new_fields)
self.field_dependencies.update(deps)
return available
def _setup_particle_types(self, ptypes=None):
df = []
if ptypes is None:
ptypes = self.ds.particle_types_raw
for ptype in set(ptypes):
df += self._setup_particle_type(ptype)
return df
_last_freq = (None, None)
_last_finfo = None
def _get_field_info(self, ftype, fname=None):
field_info, is_ambiguous = self._get_field_info_helper(ftype, fname)
if is_ambiguous:
ft, fn = field_info.name
msg = (
f"The requested field name '{fn}' "
"is ambiguous and corresponds to any one of "
f"the following field types:\n {self.field_info._ambiguous_field_names[fn]}\n"
"Please specify the requested field as an explicit "
"tuple (ftype, fname).\n"
f'Defaulting to \'("{ft}", "{fn}")\'.'
)
issue_deprecation_warning(msg, since="4.0.0", removal="4.1.0")
return field_info
def _get_field_info_helper(self, ftype, fname=None):
self.index
# store the original inputs in case we need to raise an error
INPUT = ftype, fname
if fname is None:
try:
ftype, fname = ftype.name
except AttributeError:
ftype, fname = "unknown", ftype
# storing this condition before altering it
guessing_type = ftype == "unknown"
if guessing_type:
ftype = self._last_freq[0] or ftype
is_ambiguous = fname in self.field_info._ambiguous_field_names
else:
is_ambiguous = False
field = (ftype, fname)
if (
field == self._last_freq
and field not in self.field_info.field_aliases.values()
):
return self._last_finfo, is_ambiguous
if field in self.field_info:
self._last_freq = field
self._last_finfo = self.field_info[(ftype, fname)]
return self._last_finfo, is_ambiguous
try:
# Sometimes, if guessing_type == True, this will be switched for
# the type of field it is. So we look at the field type and
# determine if we need to change the type.
fi = self._last_finfo = self.field_info[fname]
if (
fi.sampling_type == "particle"
and self._last_freq[0] not in self.particle_types
):
field = "all", field[1]
elif (
not fi.sampling_type == "particle"
and self._last_freq[0] not in self.fluid_types
):
field = self.default_fluid_type, field[1]
self._last_freq = field
return self._last_finfo, is_ambiguous
except KeyError:
pass
# We also should check "all" for particles, which can show up if you're
# mixing deposition/gas fields with particle fields.
if guessing_type:
if hasattr(self, "_sph_ptype"):
to_guess = [self.default_fluid_type, "all"]
else:
to_guess = ["all", self.default_fluid_type]
to_guess += list(self.fluid_types) + list(self.particle_types)
for ftype in to_guess:
if (ftype, fname) in self.field_info:
self._last_freq = (ftype, fname)
self._last_finfo = self.field_info[(ftype, fname)]
return self._last_finfo, is_ambiguous
raise YTFieldNotFound(field=INPUT, ds=self)
def _setup_classes(self):
# Called by subclass
self.object_types = []
self.objects = []
self.plots = []
for name, cls in sorted(data_object_registry.items()):
if name in self._index_class._unsupported_objects:
setattr(self, name, _unsupported_object(self, name))
continue
self._add_object_class(name, cls)
self.object_types.sort()
def _add_object_class(self, name, base):
# skip projection data objects that don't make sense
# for this type of data
if "proj" in name and name != self._proj_type:
return
elif "proj" in name:
name = "proj"
self.object_types.append(name)
obj = functools.partial(base, ds=weakref.proxy(self))
obj.__doc__ = base.__doc__
setattr(self, name, obj)
def _find_extremum(self, field, ext, source=None, to_array=True):
"""
Find the extremum value of a field in a data object (source) and its position.
Parameters
----------
field : str or tuple(str, str)
ext : str
'min' or 'max', select an extremum
source : a Yt data object
to_array : bool
select the return type.
Returns
-------
val, coords
val: unyt.unyt_quantity
extremum value detected
coords: unyt.unyt_array or list(unyt.unyt_quantity)
Conversion to a single unyt_array object is only possible for coordinate
systems with homogeneous dimensions across axes (i.e. cartesian).
"""
ext = ext.lower()
if source is None:
source = self.all_data()
method = {
"min": source.quantities.min_location,
"max": source.quantities.max_location,
}[ext]
val, x1, x2, x3 = method(field)
coords = [x1, x2, x3]
mylog.info("%s value is %0.5e at %0.16f %0.16f %0.16f", ext, val, *coords)
if to_array:
if any(x.units.is_dimensionless for x in coords):
mylog.warning(
"dataset `%s` has angular coordinates. "
"Use 'to_array=False' to preserve "
"dimensionality in each coordinate.",
str(self),
)
# force conversion to length
alt_coords = []
for x in coords:
alt_coords.append(
self.quan(x.v, "code_length")
if x.units.is_dimensionless
else x.to("code_length")
)
coords = self.arr(alt_coords, dtype="float64").to("code_length")
return val, coords
def find_max(self, field, source=None, to_array=True):
"""
Returns (value, location) of the maximum of a given field.
This is a wrapper around _find_extremum
"""
mylog.debug("Searching for maximum value of %s", field)
return self._find_extremum(field, "max", source=source, to_array=to_array)
def find_min(self, field, source=None, to_array=True):
"""
Returns (value, location) for the minimum of a given field.
This is a wrapper around _find_extremum
"""
mylog.debug("Searching for minimum value of %s", field)
return self._find_extremum(field, "min", source=source, to_array=to_array)
def find_field_values_at_point(self, fields, coords):
"""